embedding.cpp 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. #include "common.h"
  2. #include "llama.h"
  3. #include <cassert>
  4. #include <cinttypes>
  5. #include <cmath>
  6. #include <cstdio>
  7. #include <cstring>
  8. #include <fstream>
  9. #include <string>
  10. #include <vector>
  11. int main(int argc, char ** argv) {
  12. gpt_params params;
  13. params.model = "models/llama-7B/ggml-model.bin";
  14. if (gpt_params_parse(argc, argv, params) == false) {
  15. return 1;
  16. }
  17. params.embedding = true;
  18. if (params.n_ctx > 2048) {
  19. fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
  20. "expect poor results\n", __func__, params.n_ctx);
  21. }
  22. if (params.seed <= 0) {
  23. params.seed = time(NULL);
  24. }
  25. fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
  26. std::mt19937 rng(params.seed);
  27. if (params.random_prompt) {
  28. params.prompt = gpt_random_prompt(rng);
  29. }
  30. llama_context * ctx;
  31. // load the model
  32. {
  33. auto lparams = llama_context_default_params();
  34. lparams.n_ctx = params.n_ctx;
  35. lparams.n_parts = params.n_parts;
  36. lparams.seed = params.seed;
  37. lparams.f16_kv = params.memory_f16;
  38. lparams.logits_all = params.perplexity;
  39. lparams.use_mlock = params.use_mlock;
  40. lparams.embedding = params.embedding;
  41. ctx = llama_init_from_file(params.model.c_str(), lparams);
  42. if (ctx == NULL) {
  43. fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
  44. return 1;
  45. }
  46. }
  47. // print system information
  48. {
  49. fprintf(stderr, "\n");
  50. fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
  51. params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
  52. }
  53. int n_past = 0;
  54. // Add a space in front of the first character to match OG llama tokenizer behavior
  55. params.prompt.insert(0, 1, ' ');
  56. // tokenize the prompt
  57. auto embd_inp = ::llama_tokenize(ctx, params.prompt, true);
  58. // determine newline token
  59. auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);
  60. if (params.verbose_prompt) {
  61. fprintf(stderr, "\n");
  62. fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
  63. fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
  64. for (int i = 0; i < (int) embd_inp.size(); i++) {
  65. fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]));
  66. }
  67. fprintf(stderr, "\n");
  68. }
  69. if (params.embedding){
  70. if (embd_inp.size() > 0) {
  71. if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past, params.n_threads)) {
  72. fprintf(stderr, "%s : failed to eval\n", __func__);
  73. return 1;
  74. }
  75. }
  76. const auto embeddings = llama_get_embeddings(ctx);
  77. // TODO: print / use the embeddings
  78. }
  79. llama_print_timings(ctx);
  80. llama_free(ctx);
  81. return 0;
  82. }