|
|
@@ -266,8 +266,10 @@ static llama_tokens format_infill(
|
|
|
}
|
|
|
|
|
|
// for now pick FIM context to fit in a batch (ratio prefix:suffix = 3:1, TODO: configurable?)
|
|
|
- const int n_suffix_take = std::min<int>(tokens_suffix.size(), (n_batch/4));
|
|
|
- const int n_prefix_take = std::min<int>(tokens_prefix.size(), 3*(n_batch/4) - 3);
|
|
|
+ const int n_prefix_take = std::min<int>(tokens_prefix.size(), 3*(n_batch/4));
|
|
|
+ const int n_suffix_take = std::min<int>(tokens_suffix.size(), std::max<int>(0, (n_batch/4) - (2 + tokens_prompt.size())));
|
|
|
+
|
|
|
+ SRV_DBG("n_prefix_take = %d, n_suffix_take = %d, total = %d\n", n_prefix_take, n_suffix_take, (n_prefix_take + n_suffix_take));
|
|
|
|
|
|
// fill the rest of the context with extra chunks
|
|
|
const int n_extra_take = std::min<int>(std::max<int>(0, n_ctx - (n_batch) - 2*n_predict), extra_tokens.size());
|