Browse Source

ppl : fix n_seq_max for perplexity (#8277)

* ppl : fix n_seq_max for perplexity

* use 1 seq for kl_divergence
slaren 1 year ago
parent
commit
5f2d4e60e2
1 changed files with 6 additions and 3 deletions
  1. 6 3
      examples/perplexity/perplexity.cpp

+ 6 - 3
examples/perplexity/perplexity.cpp

@@ -1991,6 +1991,12 @@ int main(int argc, char ** argv) {
         params.n_batch = std::min(params.n_batch, n_kv);
         params.n_batch = std::min(params.n_batch, n_kv);
     } else {
     } else {
         params.n_batch = std::min(params.n_batch, params.n_ctx);
         params.n_batch = std::min(params.n_batch, params.n_ctx);
+        if (params.kl_divergence) {
+            params.n_parallel = 1;
+        } else {
+            // ensure there's at least enough seq_ids for HellaSwag
+            params.n_parallel = std::max(4, params.n_parallel);
+        }
     }
     }
 
 
     if (params.ppl_stride > 0) {
     if (params.ppl_stride > 0) {
@@ -2015,9 +2021,6 @@ int main(int argc, char ** argv) {
     llama_model * model;
     llama_model * model;
     llama_context * ctx;
     llama_context * ctx;
 
 
-    // ensure there's at least enough seq_ids for HellaSwag
-    params.n_parallel = std::max(4, params.n_parallel);
-
     // load the model and apply lora adapter, if any
     // load the model and apply lora adapter, if any
     std::tie(model, ctx) = llama_init_from_gpt_params(params);
     std::tie(model, ctx) = llama_init_from_gpt_params(params);
     if (model == NULL) {
     if (model == NULL) {