1
0

embedding.cpp 2.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. #include "common.h"
  2. #include "llama.h"
  3. #include "build-info.h"
  4. #include <ctime>
  5. int main(int argc, char ** argv) {
  6. gpt_params params;
  7. if (gpt_params_parse(argc, argv, params) == false) {
  8. return 1;
  9. }
  10. params.embedding = true;
  11. if (params.n_ctx > 2048) {
  12. fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
  13. "expect poor results\n", __func__, params.n_ctx);
  14. }
  15. fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
  16. if (params.seed < 0) {
  17. params.seed = time(NULL);
  18. }
  19. fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
  20. std::mt19937 rng(params.seed);
  21. if (params.random_prompt) {
  22. params.prompt = gpt_random_prompt(rng);
  23. }
  24. llama_context * ctx;
  25. // load the model
  26. ctx = llama_init_from_gpt_params(params);
  27. if (ctx == NULL) {
  28. fprintf(stderr, "%s: error: unable to load model\n", __func__);
  29. return 1;
  30. }
  31. // print system information
  32. {
  33. fprintf(stderr, "\n");
  34. fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
  35. params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
  36. }
  37. int n_past = 0;
  38. // Add a space in front of the first character to match OG llama tokenizer behavior
  39. params.prompt.insert(0, 1, ' ');
  40. // tokenize the prompt
  41. auto embd_inp = ::llama_tokenize(ctx, params.prompt, true);
  42. if (params.verbose_prompt) {
  43. fprintf(stderr, "\n");
  44. fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
  45. fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
  46. for (int i = 0; i < (int) embd_inp.size(); i++) {
  47. fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]));
  48. }
  49. fprintf(stderr, "\n");
  50. }
  51. if (params.embedding){
  52. if (embd_inp.size() > 0) {
  53. if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past, params.n_threads)) {
  54. fprintf(stderr, "%s : failed to eval\n", __func__);
  55. return 1;
  56. }
  57. }
  58. const int n_embd = llama_n_embd(ctx);
  59. const auto embeddings = llama_get_embeddings(ctx);
  60. for (int i = 0; i < n_embd; i++) {
  61. printf("%f ", embeddings[i]);
  62. }
  63. printf("\n");
  64. }
  65. llama_print_timings(ctx);
  66. llama_free(ctx);
  67. return 0;
  68. }