|
@@ -414,6 +414,8 @@ struct llama_context * llama_init_from_gpt_params(const gpt_params & params) {
|
|
|
lparams.f16_kv = params.memory_f16;
|
|
lparams.f16_kv = params.memory_f16;
|
|
|
lparams.use_mmap = params.use_mmap;
|
|
lparams.use_mmap = params.use_mmap;
|
|
|
lparams.use_mlock = params.use_mlock;
|
|
lparams.use_mlock = params.use_mlock;
|
|
|
|
|
+ lparams.logits_all = params.perplexity;
|
|
|
|
|
+ lparams.embedding = params.embedding;
|
|
|
|
|
|
|
|
llama_context * lctx = llama_init_from_file(params.model.c_str(), lparams);
|
|
llama_context * lctx = llama_init_from_file(params.model.c_str(), lparams);
|
|
|
|
|
|