|
|
@@ -2006,7 +2006,7 @@ struct server_context {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- if (!llama_kv_self_can_shift(ctx)) {
|
|
|
+ if (!llama_memory_can_shift(llama_get_memory(ctx))) {
|
|
|
if (params_base.ctx_shift) {
|
|
|
params_base.ctx_shift = false;
|
|
|
SRV_WRN("%s\n", "ctx_shift is not supported by this context, it will be disabled");
|
|
|
@@ -2224,7 +2224,7 @@ struct server_context {
|
|
|
SRV_DBG("%s", "clearing KV cache\n");
|
|
|
|
|
|
// clear the entire KV cache
|
|
|
- llama_kv_self_clear(ctx);
|
|
|
+ llama_memory_clear(llama_get_memory(ctx), true);
|
|
|
clean_kv_cache = false;
|
|
|
}
|
|
|
|
|
|
@@ -2910,7 +2910,7 @@ struct server_context {
|
|
|
|
|
|
// Erase token cache
|
|
|
const size_t n_erased = slot->cache_tokens.size();
|
|
|
- llama_kv_self_seq_rm(ctx, slot->id, -1, -1);
|
|
|
+ llama_memory_seq_rm(llama_get_memory(ctx), slot->id, -1, -1);
|
|
|
slot->cache_tokens.clear();
|
|
|
|
|
|
auto res = std::make_unique<server_task_result_slot_erase>();
|
|
|
@@ -2985,8 +2985,8 @@ struct server_context {
|
|
|
|
|
|
SLT_WRN(slot, "slot context shift, n_keep = %d, n_left = %d, n_discard = %d\n", n_keep, n_left, n_discard);
|
|
|
|
|
|
- llama_kv_self_seq_rm (ctx, slot.id, n_keep , n_keep + n_discard);
|
|
|
- llama_kv_self_seq_add(ctx, slot.id, n_keep + n_discard, slot.n_past, -n_discard);
|
|
|
+ llama_memory_seq_rm (llama_get_memory(ctx), slot.id, n_keep , n_keep + n_discard);
|
|
|
+ llama_memory_seq_add(llama_get_memory(ctx), slot.id, n_keep + n_discard, slot.n_past, -n_discard);
|
|
|
|
|
|
// add generated tokens to cache
|
|
|
{
|
|
|
@@ -3189,8 +3189,8 @@ struct server_context {
|
|
|
|
|
|
const int64_t kv_shift = (int64_t) head_p - (int64_t) head_c;
|
|
|
|
|
|
- llama_kv_self_seq_rm (ctx, slot.id, head_p, head_c);
|
|
|
- llama_kv_self_seq_add(ctx, slot.id, head_c, head_c + n_match, kv_shift);
|
|
|
+ llama_memory_seq_rm (llama_get_memory(ctx), slot.id, head_p, head_c);
|
|
|
+ llama_memory_seq_add(llama_get_memory(ctx), slot.id, head_c, head_c + n_match, kv_shift);
|
|
|
|
|
|
for (size_t i = 0; i < n_match; i++) {
|
|
|
slot.cache_tokens.set_token(head_p + i, slot.cache_tokens[head_c + i]);
|
|
|
@@ -3212,7 +3212,7 @@ struct server_context {
|
|
|
}
|
|
|
|
|
|
if (slot.n_past > 0 && slot.n_past < (int) slot.cache_tokens.size()) {
|
|
|
- const auto pos_min = llama_kv_self_seq_pos_min(ctx, slot.id);
|
|
|
+ const auto pos_min = llama_memory_seq_pos_min(llama_get_memory(ctx), slot.id);
|
|
|
if (pos_min == -1) {
|
|
|
SLT_ERR(slot, "n_past = %d, cache_tokens.size() = %d, seq_id = %d, pos_min = %d\n", slot.n_past, (int) slot.cache_tokens.size(), slot.id, pos_min);
|
|
|
GGML_ABORT("pos_min == -1, but n_past > 0 - should not happen: https://github.com/ggml-org/llama.cpp/pull/13833#discussion_r2116181237");
|
|
|
@@ -3247,9 +3247,9 @@ struct server_context {
|
|
|
}
|
|
|
|
|
|
// keep only the common part
|
|
|
- if (!llama_kv_self_seq_rm(ctx, slot.id, slot.n_past, -1)) {
|
|
|
+ if (!llama_memory_seq_rm(llama_get_memory(ctx), slot.id, slot.n_past, -1)) {
|
|
|
// could not partially delete (likely using a non-Transformer model)
|
|
|
- llama_kv_self_seq_rm(ctx, slot.id, -1, -1);
|
|
|
+ llama_memory_seq_rm(llama_get_memory(ctx), slot.id, -1, -1);
|
|
|
|
|
|
// there is no common part left
|
|
|
slot.n_past = 0;
|
|
|
@@ -3589,7 +3589,7 @@ struct server_context {
|
|
|
slot.cache_tokens.push_back(id);
|
|
|
slot.cache_tokens.insert({ids.begin(), ids.end() - 1});
|
|
|
|
|
|
- llama_kv_self_seq_rm(ctx, slot.id, slot.n_past, -1);
|
|
|
+ llama_memory_seq_rm(llama_get_memory(ctx), slot.id, slot.n_past, -1);
|
|
|
|
|
|
for (size_t i = 0; i < ids.size(); ++i) {
|
|
|
completion_token_output result;
|