1
0

embedding.cpp 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. #include "common.h"
  2. #include "llama.h"
  3. #include <ctime>
  4. int main(int argc, char ** argv) {
  5. gpt_params params;
  6. params.model = "models/llama-7B/ggml-model.bin";
  7. if (gpt_params_parse(argc, argv, params) == false) {
  8. return 1;
  9. }
  10. params.embedding = true;
  11. if (params.n_ctx > 2048) {
  12. fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
  13. "expect poor results\n", __func__, params.n_ctx);
  14. }
  15. if (params.seed <= 0) {
  16. params.seed = time(NULL);
  17. }
  18. fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
  19. std::mt19937 rng(params.seed);
  20. if (params.random_prompt) {
  21. params.prompt = gpt_random_prompt(rng);
  22. }
  23. llama_context * ctx;
  24. // load the model
  25. {
  26. auto lparams = llama_context_default_params();
  27. lparams.n_ctx = params.n_ctx;
  28. lparams.n_parts = params.n_parts;
  29. lparams.seed = params.seed;
  30. lparams.f16_kv = params.memory_f16;
  31. lparams.logits_all = params.perplexity;
  32. lparams.use_mmap = params.use_mmap;
  33. lparams.use_mlock = params.use_mlock;
  34. lparams.embedding = params.embedding;
  35. ctx = llama_init_from_file(params.model.c_str(), lparams);
  36. if (ctx == NULL) {
  37. fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
  38. return 1;
  39. }
  40. }
  41. // print system information
  42. {
  43. fprintf(stderr, "\n");
  44. fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
  45. params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
  46. }
  47. int n_past = 0;
  48. // Add a space in front of the first character to match OG llama tokenizer behavior
  49. params.prompt.insert(0, 1, ' ');
  50. // tokenize the prompt
  51. auto embd_inp = ::llama_tokenize(ctx, params.prompt, true);
  52. // determine newline token
  53. auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);
  54. if (params.verbose_prompt) {
  55. fprintf(stderr, "\n");
  56. fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
  57. fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
  58. for (int i = 0; i < (int) embd_inp.size(); i++) {
  59. fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]));
  60. }
  61. fprintf(stderr, "\n");
  62. }
  63. if (params.embedding){
  64. if (embd_inp.size() > 0) {
  65. if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past, params.n_threads)) {
  66. fprintf(stderr, "%s : failed to eval\n", __func__);
  67. return 1;
  68. }
  69. }
  70. const int n_embd = llama_n_embd(ctx);
  71. const auto embeddings = llama_get_embeddings(ctx);
  72. for (int i = 0; i < n_embd; i++) {
  73. printf("%f ", embeddings[i]);
  74. }
  75. printf("\n");
  76. }
  77. llama_print_timings(ctx);
  78. llama_free(ctx);
  79. return 0;
  80. }