embedding.cpp 2.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. #include "common.h"
  2. #include "llama.h"
  3. #include "build-info.h"
  4. #include <ctime>
  5. #if defined(_MSC_VER)
  6. #pragma warning(disable: 4244 4267) // possible loss of data
  7. #endif
  8. int main(int argc, char ** argv) {
  9. gpt_params params;
  10. if (gpt_params_parse(argc, argv, params) == false) {
  11. return 1;
  12. }
  13. params.embedding = true;
  14. if (params.n_ctx > 2048) {
  15. fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
  16. "expect poor results\n", __func__, params.n_ctx);
  17. }
  18. fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
  19. if (params.seed < 0) {
  20. params.seed = time(NULL);
  21. }
  22. fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
  23. std::mt19937 rng(params.seed);
  24. if (params.random_prompt) {
  25. params.prompt = gpt_random_prompt(rng);
  26. }
  27. llama_init_backend();
  28. llama_context * ctx;
  29. // load the model
  30. ctx = llama_init_from_gpt_params(params);
  31. if (ctx == NULL) {
  32. fprintf(stderr, "%s: error: unable to load model\n", __func__);
  33. return 1;
  34. }
  35. // print system information
  36. {
  37. fprintf(stderr, "\n");
  38. fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
  39. params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
  40. }
  41. int n_past = 0;
  42. // Add a space in front of the first character to match OG llama tokenizer behavior
  43. params.prompt.insert(0, 1, ' ');
  44. // tokenize the prompt
  45. auto embd_inp = ::llama_tokenize(ctx, params.prompt, true);
  46. if (params.verbose_prompt) {
  47. fprintf(stderr, "\n");
  48. fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
  49. fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
  50. for (int i = 0; i < (int) embd_inp.size(); i++) {
  51. fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]));
  52. }
  53. fprintf(stderr, "\n");
  54. }
  55. if (params.embedding){
  56. if (embd_inp.size() > 0) {
  57. if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past, params.n_threads)) {
  58. fprintf(stderr, "%s : failed to eval\n", __func__);
  59. return 1;
  60. }
  61. }
  62. const int n_embd = llama_n_embd(ctx);
  63. const auto embeddings = llama_get_embeddings(ctx);
  64. for (int i = 0; i < n_embd; i++) {
  65. printf("%f ", embeddings[i]);
  66. }
  67. printf("\n");
  68. }
  69. llama_print_timings(ctx);
  70. llama_free(ctx);
  71. return 0;
  72. }