Browse Source

llama-fit-params: keep explicit --ctx-size 0 (#19070)

Johannes Gäßler 5 days ago
parent
commit
e9fd8dcab4
4 changed files with 12 additions and 3 deletions
  1. 4 0
      common/arg.cpp
  2. 1 0
      include/llama.h
  3. 6 2
      src/llama.cpp
  4. 1 1
      tools/fit-params/fit-params.cpp

+ 4 - 0
common/arg.cpp

@@ -1231,6 +1231,10 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
         string_format("size of the prompt context (default: %d, 0 = loaded from model)", params.n_ctx),
         string_format("size of the prompt context (default: %d, 0 = loaded from model)", params.n_ctx),
         [](common_params & params, int value) {
         [](common_params & params, int value) {
             params.n_ctx = value;
             params.n_ctx = value;
+            if (value == 0) {
+                // disable context reduction in llama_params_fit if the user explicitly requests the full context size:
+                params.fit_params_min_ctx = UINT32_MAX;
+            }
         }
         }
     ).set_env("LLAMA_ARG_CTX_SIZE"));
     ).set_env("LLAMA_ARG_CTX_SIZE"));
     add_opt(common_arg(
     add_opt(common_arg(

+ 1 - 0
include/llama.h

@@ -489,6 +489,7 @@ extern "C" {
     //   - returns true if the parameters could be successfully modified to fit device memory
     //   - returns true if the parameters could be successfully modified to fit device memory
     //   - this function is NOT thread safe because it modifies the global llama logger state
     //   - this function is NOT thread safe because it modifies the global llama logger state
     //   - only parameters that have the same value as in llama_default_model_params are modified
     //   - only parameters that have the same value as in llama_default_model_params are modified
+    //     with the exception of the context size which is modified if and only if equal to 0
     LLAMA_API enum llama_params_fit_status llama_params_fit(
     LLAMA_API enum llama_params_fit_status llama_params_fit(
                                    const char   * path_model,
                                    const char   * path_model,
                     struct llama_model_params   * mparams,
                     struct llama_model_params   * mparams,

+ 6 - 2
src/llama.cpp

@@ -311,8 +311,12 @@ static void llama_params_fit_impl(
                             __func__, hp_nct, cparams->n_ctx, memory_reduction/MiB);
                             __func__, hp_nct, cparams->n_ctx, memory_reduction/MiB);
                     }
                     }
                 } else {
                 } else {
-                    LLAMA_LOG_INFO("%s: default model context size is %" PRIu32 " which is <= the min. context size of %" PRIu32 " -> no change\n",
-                        __func__, hp_nct, n_ctx_min);
+                    if (n_ctx_min == UINT32_MAX) {
+                        LLAMA_LOG_INFO("%s: user has requested full context size of %" PRIu32 " -> no change\n", __func__, hp_nct);
+                    } else {
+                        LLAMA_LOG_INFO("%s: default model context size is %" PRIu32 " which is <= the min. context size of %" PRIu32 " -> no change\n",
+                            __func__, hp_nct, n_ctx_min);
+                    }
                 }
                 }
             } else {
             } else {
                 LLAMA_LOG_INFO("%s: context size set by user to %" PRIu32 " -> no change\n", __func__, cparams->n_ctx);
                 LLAMA_LOG_INFO("%s: context size set by user to %" PRIu32 " -> no change\n", __func__, cparams->n_ctx);

+ 1 - 1
tools/fit-params/fit-params.cpp

@@ -36,7 +36,7 @@ int main(int argc, char ** argv) {
 
 
     LOG_INF("%s: printing fitted CLI arguments to stdout...\n", __func__);
     LOG_INF("%s: printing fitted CLI arguments to stdout...\n", __func__);
     common_log_flush(common_log_main());
     common_log_flush(common_log_main());
-    printf("-c %" PRIu32 " -ngl %" PRIu32, cparams.n_ctx, mparams.n_gpu_layers);
+    printf("-c %" PRIu32 " -ngl %" PRIi32, cparams.n_ctx, mparams.n_gpu_layers);
 
 
     size_t nd = llama_max_devices();
     size_t nd = llama_max_devices();
     while (nd > 1 && mparams.tensor_split[nd - 1] == 0.0f) {
     while (nd > 1 && mparams.tensor_split[nd - 1] == 0.0f) {