embedding.cpp 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899
  1. #include "common.h"
  2. #include "llama.h"
  3. #include "build-info.h"
  4. #include <ctime>
  5. #if defined(_MSC_VER)
  6. #pragma warning(disable: 4244 4267) // possible loss of data
  7. #endif
  8. int main(int argc, char ** argv) {
  9. gpt_params params;
  10. if (gpt_params_parse(argc, argv, params) == false) {
  11. return 1;
  12. }
  13. params.embedding = true;
  14. if (params.n_ctx > 2048) {
  15. fprintf(stderr, "%s: warning: model might not support context sizes greater than 2048 tokens (%d specified);"
  16. "expect poor results\n", __func__, params.n_ctx);
  17. }
  18. fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
  19. if (params.seed == LLAMA_DEFAULT_SEED) {
  20. params.seed = time(NULL);
  21. }
  22. fprintf(stderr, "%s: seed = %u\n", __func__, params.seed);
  23. std::mt19937 rng(params.seed);
  24. if (params.random_prompt) {
  25. params.prompt = gpt_random_prompt(rng);
  26. }
  27. llama_backend_init(params.numa);
  28. llama_model * model;
  29. llama_context * ctx;
  30. // load the model
  31. std::tie(model, ctx) = llama_init_from_gpt_params(params);
  32. if (model == NULL) {
  33. fprintf(stderr, "%s: error: unable to load model\n", __func__);
  34. return 1;
  35. }
  36. // print system information
  37. {
  38. fprintf(stderr, "\n");
  39. fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
  40. params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
  41. }
  42. int n_past = 0;
  43. // Add a space in front of the first character to match OG llama tokenizer behavior
  44. params.prompt.insert(0, 1, ' ');
  45. // tokenize the prompt
  46. auto embd_inp = ::llama_tokenize(ctx, params.prompt, true);
  47. if (params.verbose_prompt) {
  48. fprintf(stderr, "\n");
  49. fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
  50. fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
  51. for (int i = 0; i < (int) embd_inp.size(); i++) {
  52. fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]).c_str());
  53. }
  54. fprintf(stderr, "\n");
  55. }
  56. if (params.embedding){
  57. if (embd_inp.size() > 0) {
  58. if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past, params.n_threads)) {
  59. fprintf(stderr, "%s : failed to eval\n", __func__);
  60. return 1;
  61. }
  62. }
  63. const int n_embd = llama_n_embd(ctx);
  64. const auto embeddings = llama_get_embeddings(ctx);
  65. for (int i = 0; i < n_embd; i++) {
  66. printf("%f ", embeddings[i]);
  67. }
  68. printf("\n");
  69. }
  70. llama_print_timings(ctx);
  71. llama_free(ctx);
  72. llama_free_model(model);
  73. llama_backend_free();
  74. return 0;
  75. }