|
@@ -2436,7 +2436,10 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|
|
auto & attr = id_to_token[t.second].attr;
|
|
auto & attr = id_to_token[t.second].attr;
|
|
|
|
|
|
|
|
if (t.first == "<|channel|>" || t.first == "<|message|>" || t.first == "<|start|>" || t.first == "<|constrain|>") {
|
|
if (t.first == "<|channel|>" || t.first == "<|message|>" || t.first == "<|start|>" || t.first == "<|constrain|>") {
|
|
|
- attr = (llama_token_attr) (attr | LLAMA_TOKEN_ATTR_USER_DEFINED);
|
|
|
|
|
|
|
+ LLAMA_LOG_WARN("%s: setting token '%s' (%d) attribute to USER_DEFINED (%u), old attributes: %u\n",
|
|
|
|
|
+ __func__, t.first.c_str(), t.second, LLAMA_TOKEN_ATTR_USER_DEFINED, attr);
|
|
|
|
|
+
|
|
|
|
|
+ attr = LLAMA_TOKEN_ATTR_USER_DEFINED;
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -2489,7 +2492,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|
|
special_eog_ids.erase(end_id);
|
|
special_eog_ids.erase(end_id);
|
|
|
|
|
|
|
|
auto & attr = id_to_token[end_id].attr;
|
|
auto & attr = id_to_token[end_id].attr;
|
|
|
- attr = (llama_token_attr) (attr | LLAMA_TOKEN_ATTR_USER_DEFINED);
|
|
|
|
|
|
|
+ attr = LLAMA_TOKEN_ATTR_USER_DEFINED;
|
|
|
|
|
|
|
|
LLAMA_LOG_WARN("%s: special_eog_ids contains both '<|return|>' and '<|call|>', or '<|calls|>' and '<|flush|>' tokens, removing '<|end|>' token from EOG list\n", __func__);
|
|
LLAMA_LOG_WARN("%s: special_eog_ids contains both '<|return|>' and '<|call|>', or '<|calls|>' and '<|flush|>' tokens, removing '<|end|>' token from EOG list\n", __func__);
|
|
|
}
|
|
}
|