1
0

cvector-generator.cpp 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. #include "arg.h"
  2. #include "common.h"
  3. #include "llama.h"
  4. #include "ggml.h"
  5. #include "pca.hpp"
  6. #include "mean.hpp"
  7. #ifdef GGML_USE_CUDA
  8. #include "ggml-cuda.h"
  9. #endif
  10. #ifdef GGML_USE_METAL
  11. #include "ggml-metal.h"
  12. #endif
  13. #include <algorithm>
  14. #include <climits>
  15. #include <cstdio>
  16. #include <cstring>
  17. #include <fstream>
  18. #include <iostream>
  19. #include <string>
  20. #include <tuple>
  21. #include <vector>
  22. //////////////////////////////////////////////////
  23. // utils
  24. template <class Iter>
  25. static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
  26. std::string ret;
  27. for (; begin != end; ++begin) {
  28. ret += common_token_to_piece(ctx, *begin);
  29. }
  30. return ret;
  31. }
  32. static void print_usage(int, char ** argv) {
  33. printf("\nexample usage:\n");
  34. printf("\n CPU only: %s -m ./llama-3.Q4_K_M.gguf\n", argv[0]);
  35. printf("\n with GPU: %s -m ./llama-3.Q4_K_M.gguf -ngl 99\n", argv[0]);
  36. printf("\n advanced: %s -m ./llama-3.Q4_K_M.gguf -ngl 99 --pca-iter 2000 --pca-batch 100\n", argv[0]);
  37. printf("\n using mean: %s -m ./llama-3.Q4_K_M.gguf --method mean\n", argv[0]);
  38. printf("\n");
  39. }
  40. //////////////////////////////////////////////////
  41. // cb_eval is reused for each pair of positive - negative prompt
  42. struct callback_data {
  43. ggml_context * ctx_ggml = nullptr; // holds v_pos, v_neg, v_diff_filtered
  44. int n_layers = 0;
  45. int n_tokens = 0;
  46. bool is_eval_pos = true;
  47. // each element of the vector correspond to one layer
  48. std::vector<struct ggml_tensor *> v_pos; // vector of matrices of size [n_embd, n_tokens]
  49. std::vector<struct ggml_tensor *> v_neg; // vector of matrices of size [n_embd, n_tokens]
  50. std::vector<struct ggml_tensor *> v_diff_filtered; // vector of matrices of size [n_embd, n_nonzero_rows]. NOTE: n_nonzero_rows maybe different for each layer
  51. // save a tensor into either v_pos or v_neg (decided by is_eval_pos)
  52. void save_tensor_for_layer(struct ggml_tensor * t) {
  53. GGML_ASSERT(t->type == GGML_TYPE_F32);
  54. if (ctx_ggml == nullptr) {
  55. // alloc a new ctx_ggml if needed
  56. struct ggml_init_params params_ggml = {
  57. /*.mem_size =*/ ggml_tensor_overhead() * n_layers * 3u,
  58. /*.mem_buffer =*/ NULL,
  59. /*.no_alloc =*/ true,
  60. };
  61. ctx_ggml = ggml_init(params_ggml);
  62. }
  63. // copy tensor data
  64. auto n_bytes = ggml_nbytes(t);
  65. struct ggml_tensor * t_layer = ggml_new_tensor_2d(ctx_ggml, t->type, t->ne[0], t->ne[1]);
  66. t_layer->data = malloc(n_bytes); // TODO @ngxson : get rid of this malloc somehow
  67. ggml_backend_tensor_get(t, t_layer->data, 0, n_bytes);
  68. ggml_set_name(t_layer, ggml_get_name(t));
  69. //print_debug_tensor(t_layer);
  70. if (is_eval_pos) {
  71. v_pos.push_back(t_layer);
  72. } else {
  73. v_neg.push_back(t_layer);
  74. }
  75. }
  76. // calculate diff (v_pos - v_neg) and place the result back to v_pos
  77. // all zero rows in the diff tensor will also be removed
  78. // NOTE: final layer is ignored. we only have (n_layers - 1) to process
  79. std::vector<struct ggml_tensor *> calc_diff() {
  80. for (float il = 0; il < v_pos.size(); il++) {
  81. float * a = (float *) v_pos[il]->data;
  82. float * b = (float *) v_neg[il]->data;
  83. size_t n_elem = ggml_nelements(v_pos[il]);
  84. for (size_t j = 0; j < n_elem; j++) {
  85. a[j] -= b[j];
  86. }
  87. //print_debug_tensor(v_pos[i]);
  88. auto diff_filtered = filter_nonzero_rows(v_pos[il]);
  89. v_diff_filtered.push_back(diff_filtered);
  90. }
  91. return v_diff_filtered; // for convinient, we return the result std::vector
  92. }
  93. // delete zero rows from a given 2D tensor
  94. struct ggml_tensor * filter_nonzero_rows(struct ggml_tensor * a) {
  95. //printf("filter_nonzero_rows\n");
  96. auto is_row_all_zeros = [](struct ggml_tensor * t, int row, float eps) -> bool {
  97. // check if given row containing all zero elements
  98. int n_cols = t->ne[0]; // hint: should be equal to n_embd
  99. for (int col = 0; col < n_cols; ++col) {
  100. if (ggml_get_f32_nd(t, col, row, 0, 0) > eps) {
  101. return false;
  102. }
  103. }
  104. return true;
  105. };
  106. std::vector<int> rows_to_copy; // the idx of non-zero cols (to be copied to row of diff_filtered)
  107. for (int i_row = 0; i_row < a->ne[1]; i_row++) {
  108. if (!is_row_all_zeros(a, i_row, 1e-6)) {
  109. rows_to_copy.push_back(i_row);
  110. }
  111. }
  112. // get "n_nonzero_rows" for the output "diff_filtered"
  113. int n_nonzero_rows = rows_to_copy.size();
  114. //printf("n_nonzero_rows: %d\n", n_nonzero_rows);
  115. int n_embd = a->ne[0];
  116. GGML_ASSERT(n_nonzero_rows > 0);
  117. // diff_filtered: [n_embd, n_nonzero_rows]
  118. struct ggml_tensor * diff_filtered = ggml_new_tensor_2d(
  119. ctx_ggml, GGML_TYPE_F32, n_embd, n_nonzero_rows);
  120. ggml_format_name(diff_filtered, "diff_filtered_%s", a->name);
  121. diff_filtered->data = malloc(ggml_nbytes(diff_filtered));
  122. // copy non-zero rows
  123. for (int dest_row = 0; dest_row < n_nonzero_rows; dest_row++) {
  124. int src_row = rows_to_copy[dest_row];
  125. for (int i = 0; i < n_embd; i++) {
  126. float src_elem = ggml_get_f32_nd(a, i, src_row, 0, 0);
  127. ggml_set_f32_nd(diff_filtered, i, dest_row, 0, 0, src_elem);
  128. }
  129. }
  130. //print_debug_tensor(diff_filtered);
  131. return diff_filtered;
  132. }
  133. // we don't implement destructor, because we want to reuse callback_data. we just want to free the tensors
  134. void reset() {
  135. for (auto ptr : v_pos) free(ptr->data);
  136. for (auto ptr : v_neg) free(ptr->data);
  137. for (auto ptr : v_diff_filtered) free(ptr->data);
  138. v_pos.clear();
  139. v_neg.clear();
  140. v_diff_filtered.clear();
  141. if (ctx_ggml) {
  142. ggml_free(ctx_ggml);
  143. }
  144. ctx_ggml = nullptr;
  145. }
  146. };
  147. /**
  148. * process_ctx is used to store the ggml context for pre-post processing the diff vectors
  149. * in short, input => v_diff and output => v_final
  150. */
  151. struct train_context {
  152. ggml_context * ctx_ggml;
  153. int n_embd;
  154. int n_layers;
  155. /* pair of prompts to be used for generating final vector */
  156. std::vector<std::string> positive_entries;
  157. std::vector<std::string> negative_entries;
  158. // each element of the vector correspond to one layer
  159. // NOTE: the last layer is discard. therefore, we will have (n_layers - 1) elements here
  160. // NOTE (2): v_diff is transposed from v_diff_tmp
  161. std::vector<struct ggml_tensor *> v_diff; // vector of matrices of size [m, n_embd] where m ~ n_tokens * n_completions (v_diff contains no zero-rows)
  162. std::vector<struct ggml_tensor *> v_final; // vector of vectors of size [n_embd] to be written to file
  163. // to easily re-alloc when concat v_diff, we temporary store v_diff in a vector instead of a tensor
  164. // v_diff_tmp will get converted unto v_diff later on
  165. std::vector<std::vector<uint8_t>> v_diff_tmp;
  166. train_context(int n_embd_, int n_layers_) {
  167. n_embd = n_embd_;
  168. n_layers = n_layers_;
  169. struct ggml_init_params params_ggml = {
  170. /*.mem_size =*/ ggml_tensor_overhead() * (n_layers - 1) * 2u,
  171. /*.mem_buffer =*/ NULL,
  172. /*.no_alloc =*/ true,
  173. };
  174. ctx_ggml = ggml_init(params_ggml);
  175. for (int il = 0; il < n_layers - 1; il++) {
  176. std::vector<uint8_t> empty;
  177. v_diff_tmp.push_back(empty);
  178. auto t = ggml_new_tensor_1d(ctx_ggml, GGML_TYPE_F32, n_embd);
  179. t->data = malloc(ggml_nbytes(t)); // TODO: get rid of malloc if possible
  180. v_final.push_back(t);
  181. }
  182. }
  183. // add new rows into existing tensor in v_diff_tmp
  184. void concat_diff_tmp(const std::vector<struct ggml_tensor *> & diff_filtered) {
  185. GGML_ASSERT((int) diff_filtered.size() == n_layers - 1);
  186. for (int il = 0; il < n_layers - 1; il++) {
  187. auto t = diff_filtered[il];
  188. auto & diff_tmp = v_diff_tmp[il];
  189. size_t curr_size = diff_tmp.size();
  190. diff_tmp.resize(curr_size + ggml_nbytes(t));
  191. memcpy(diff_tmp.data() + curr_size, t->data, ggml_nbytes(t));
  192. }
  193. }
  194. // build the v_diff tensors from v_diff_tmp (v_diff need to be transposed)
  195. // TODO @ngxson : maybe add option NOT to transpose v_diff; will be useful for "mean" method
  196. void build_v_diff(bool transpose) {
  197. printf("build_v_diff\n");
  198. for (int il = 0; il < n_layers - 1; il++) {
  199. auto & diff_tmp = v_diff_tmp[il];
  200. int n_elem = diff_tmp.size() / sizeof(float);
  201. GGML_ASSERT(n_elem % n_embd == 0);
  202. int n_rows = n_elem / n_embd;
  203. struct ggml_tensor * diff = transpose
  204. ? ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_rows, n_embd)
  205. : ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_embd, n_rows);
  206. ggml_set_name(diff, (std::string("diff_") + std::to_string(il)).c_str());
  207. diff->data = malloc(ggml_nbytes(diff)); // TODO: get rid of this malloc if possible
  208. if (transpose) {
  209. // copy data & transpose
  210. float * arr = (float *) diff_tmp.data();
  211. for (int ir = 0; ir < n_rows; ++ir) {
  212. for (int ic = 0; ic < n_embd; ++ic) {
  213. float f = arr[ir*n_embd + ic];
  214. ggml_set_f32_nd(diff, ir, ic, 0, 0, f);
  215. }
  216. }
  217. } else {
  218. // only copy
  219. memcpy(diff->data, diff_tmp.data(), ggml_nbytes(diff));
  220. }
  221. v_diff.push_back(diff);
  222. print_debug_tensor(diff);
  223. // free memory of diff_tmp
  224. diff_tmp.resize(0);
  225. }
  226. }
  227. ~train_context() {
  228. for (auto ptr : v_final) free(ptr->data);
  229. for (auto ptr : v_diff) free(ptr->data);
  230. // no need to free v_diff_tmp, since we didn't use malloc
  231. ggml_free(ctx_ggml);
  232. }
  233. };
  234. struct tokenized_prompt {
  235. std::vector<llama_token> tokens_pos;
  236. std::vector<llama_token> tokens_neg;
  237. size_t max_seq_len;
  238. tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
  239. const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
  240. tokens_pos = common_tokenize(ctx, pos, add_bos, true);
  241. tokens_neg = common_tokenize(ctx, neg, add_bos, true);
  242. max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
  243. padding_seq(ctx, tokens_pos, max_seq_len);
  244. padding_seq(ctx, tokens_neg, max_seq_len);
  245. }
  246. void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {
  247. // TODO: customize padding token
  248. std::vector<llama_token> pad_tokens = common_tokenize(ctx, " ", false);
  249. llama_token pad_tok = pad_tokens.back();
  250. while (tokens.size() < len) {
  251. tokens.push_back(pad_tok);
  252. }
  253. }
  254. };
  255. //////////////////////////////////////////////////
  256. template <typename T>
  257. static std::string to_string(const T & val) {
  258. std::stringstream ss;
  259. ss << val;
  260. return ss.str();
  261. }
  262. static std::vector<std::string> ctrlvec_load_prompt_file(std::string path, bool skip_empty_lines) {
  263. std::vector<std::string> output;
  264. std::ifstream file(path);
  265. if (!file.is_open()) {
  266. fprintf(stderr, "error: unable to open file: %s\n", path.c_str());
  267. exit(1);
  268. }
  269. std::string line;
  270. while (std::getline(file, line)) {
  271. bool is_skip = skip_empty_lines && line.empty();
  272. if (!is_skip) {
  273. string_process_escapes(line);
  274. output.push_back(line);
  275. }
  276. }
  277. file.close();
  278. return output;
  279. }
  280. //////////////////////////////////////////////////
  281. static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
  282. auto * cb_data = (callback_data *) user_data;
  283. static const char * l_out_name = "l_out";
  284. const bool is_l_out = strncmp(t->name, l_out_name, strlen(l_out_name)) == 0;
  285. if (ask) {
  286. return is_l_out;
  287. }
  288. if (!is_l_out || t->ne[1] != cb_data->n_tokens) {
  289. return true;
  290. }
  291. // save the tensor to current context
  292. cb_data->save_tensor_for_layer(t);
  293. return true;
  294. }
  295. static bool get_hidden_layers(llama_context * ctx, std::vector<llama_token> & tokens) {
  296. llama_kv_cache_clear(ctx);
  297. if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), 0, 0))) {
  298. fprintf(stderr, "%s : failed to eval\n", __func__);
  299. return false;
  300. }
  301. return true;
  302. }
  303. static void export_gguf(const std::vector<struct ggml_tensor *> & v_ctrl, const std::string fname, const std::string model_hint) {
  304. struct gguf_context * ctx = gguf_init_empty();
  305. const std::string arch = "controlvector";
  306. gguf_set_val_str(ctx, "general.architecture", arch.c_str());
  307. gguf_set_val_str(ctx, (arch + ".model_hint").c_str(), model_hint.c_str());
  308. gguf_set_val_i32(ctx, (arch + ".layer_count").c_str(), v_ctrl.size());
  309. for (size_t i = 0; i < v_ctrl.size(); ++i) {
  310. gguf_add_tensor(ctx, v_ctrl[i]);
  311. print_debug_tensor(v_ctrl[i]);
  312. printf("Added tensor: %s\n", v_ctrl[i]->name);
  313. }
  314. printf("%s: writing file...\n", __func__);
  315. gguf_write_to_file(ctx, fname.c_str(), false);
  316. printf("%s: wrote file '%s'\n", __func__, fname.c_str());
  317. gguf_free(ctx);
  318. }
  319. /**
  320. * Load prompt files and completion file.
  321. * Then format each pair of prompt + completion to make an entry.
  322. */
  323. static int prepare_entries(common_params & params, train_context & ctx_train) {
  324. // load prompts
  325. std::vector<std::string> positive_prompts = ctrlvec_load_prompt_file(params.cvector_positive_file, true);
  326. std::vector<std::string> negative_prompts = ctrlvec_load_prompt_file(params.cvector_negative_file, true);
  327. if (positive_prompts.size() != negative_prompts.size()) {
  328. fprintf(stderr, "number of positive and negative prompts must be equal\n");
  329. return 1;
  330. }
  331. if (positive_prompts.empty()) {
  332. fprintf(stderr, "must provide at least one prompt pair\n");
  333. return 1;
  334. }
  335. ctx_train.positive_entries = positive_prompts;
  336. ctx_train.negative_entries = negative_prompts;
  337. return 0;
  338. }
  339. int main(int argc, char ** argv) {
  340. common_params params;
  341. if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_CVECTOR_GENERATOR, print_usage)) {
  342. return 1;
  343. }
  344. if (params.n_pca_iterations % params.n_pca_batch != 0) {
  345. fprintf(stderr, "PCA iterations must by multiply of PCA batch size\n");
  346. return 1;
  347. }
  348. callback_data cb_data;
  349. // pass the callback to the backend scheduler
  350. // it will be executed for each node during the graph computation
  351. params.cb_eval = cb_eval;
  352. params.cb_eval_user_data = &cb_data;
  353. params.warmup = false;
  354. print_build_info();
  355. llama_backend_init();
  356. llama_numa_init(params.numa);
  357. // load the model to get hparams
  358. common_init_result llama_init = common_init_from_params(params);
  359. llama_model * model = llama_init.model;
  360. llama_context * ctx = llama_init.context;
  361. // int n_ctx = llama_n_ctx(ctx);
  362. int n_layers = llama_n_layer(model);
  363. int n_embd = llama_n_embd(model);
  364. // get model hint param (a.k.a model arch name)
  365. char model_hint[128];
  366. llama_model_meta_val_str(model, "general.architecture", model_hint, 128);
  367. // init train_context
  368. train_context ctx_train(n_embd, n_layers);
  369. // load and prepare entries for training
  370. prepare_entries(params, ctx_train);
  371. // we have to pretokenize everything because otherwise we don't know how much overhead to allocate ctx_diffs_wrapped
  372. std::vector<tokenized_prompt> tokenized_prompts;
  373. size_t n_total_tokens = 0;
  374. for (size_t i = 0; i < ctx_train.positive_entries.size(); ++i) {
  375. tokenized_prompt t(ctx, ctx_train.positive_entries[i], ctx_train.negative_entries[i]);
  376. n_total_tokens += 2 * t.max_seq_len;
  377. tokenized_prompts.push_back(std::move(t));
  378. }
  379. std::cout << "n_total_tokens: " << n_total_tokens << std::endl;
  380. for(size_t i = 0; i < ctx_train.positive_entries.size(); ++i) {
  381. bool success = false;
  382. tokenized_prompt t = tokenized_prompts[i];
  383. cb_data.n_layers = n_layers;
  384. cb_data.n_tokens = t.max_seq_len;
  385. printf("Evaluating prompt[%d/%d]: \"%s\" - \"%s\" (%d tokens)\n",
  386. (int) i+1, (int) ctx_train.positive_entries.size(),
  387. tokens_to_str(ctx, t.tokens_pos.cbegin(), t.tokens_pos.cend()).c_str(),
  388. tokens_to_str(ctx, t.tokens_neg.cbegin(), t.tokens_neg.cend()).c_str(),
  389. (int) t.max_seq_len);
  390. cb_data.is_eval_pos = true;
  391. success = get_hidden_layers(ctx, t.tokens_pos);
  392. if (!success) break;
  393. cb_data.is_eval_pos = false;
  394. success = get_hidden_layers(ctx, t.tokens_neg);
  395. if (!success) break;
  396. // calculate diff and remove all zero rows
  397. auto v_diff_filtered = cb_data.calc_diff();
  398. // save & concat the filtered v_diff to ctx_train
  399. ctx_train.concat_diff_tmp(v_diff_filtered);
  400. // reset for next iteration
  401. cb_data.reset();
  402. }
  403. // done with the model, we can now free it to make gain some memory
  404. printf("Done evaluate prompts, unload model...\n");
  405. llama_free(ctx);
  406. llama_free_model(model);
  407. bool use_pca = params.cvector_dimre_method == DIMRE_METHOD_PCA;
  408. // prepare ctx_train for PCA
  409. ctx_train.build_v_diff(use_pca);
  410. if (use_pca) {
  411. // run PCA
  412. PCA::pca_params pca_params;
  413. pca_params.n_threads = params.cpuparams.n_threads;
  414. pca_params.n_batch = params.n_pca_batch;
  415. pca_params.n_iterations = params.n_pca_iterations;
  416. PCA::run_pca(pca_params, ctx_train.v_diff, ctx_train.v_final);
  417. } else {
  418. // run mean
  419. mean::run(ctx_train.v_diff, ctx_train.v_final);
  420. }
  421. // write output vectors to gguf
  422. export_gguf(ctx_train.v_final, params.cvector_outfile, model_hint);
  423. llama_backend_free();
  424. return 0;
  425. }