|
@@ -311,8 +311,12 @@ static void llama_params_fit_impl(
|
|
|
__func__, hp_nct, cparams->n_ctx, memory_reduction/MiB);
|
|
__func__, hp_nct, cparams->n_ctx, memory_reduction/MiB);
|
|
|
}
|
|
}
|
|
|
} else {
|
|
} else {
|
|
|
- LLAMA_LOG_INFO("%s: default model context size is %" PRIu32 " which is <= the min. context size of %" PRIu32 " -> no change\n",
|
|
|
|
|
- __func__, hp_nct, n_ctx_min);
|
|
|
|
|
|
|
+ if (n_ctx_min == UINT32_MAX) {
|
|
|
|
|
+ LLAMA_LOG_INFO("%s: user has requested full context size of %" PRIu32 " -> no change\n", __func__, hp_nct);
|
|
|
|
|
+ } else {
|
|
|
|
|
+ LLAMA_LOG_INFO("%s: default model context size is %" PRIu32 " which is <= the min. context size of %" PRIu32 " -> no change\n",
|
|
|
|
|
+ __func__, hp_nct, n_ctx_min);
|
|
|
|
|
+ }
|
|
|
}
|
|
}
|
|
|
} else {
|
|
} else {
|
|
|
LLAMA_LOG_INFO("%s: context size set by user to %" PRIu32 " -> no change\n", __func__, cparams->n_ctx);
|
|
LLAMA_LOG_INFO("%s: context size set by user to %" PRIu32 " -> no change\n", __func__, cparams->n_ctx);
|