Explorar o código

llama : add 18-layer model type for Gemma 3-270m (#15319)

This commit adds support for the 18-layer model type in the Gemma3
series, which is the size of the Gemma3-270m model.

The motivation for this commit is was the only change required for
Gemma3-270m to be converted to GGUF format and used with llama.cpp.

Once the model has been converted and uploaded to Huggingface it can be
used like this:
```console
$ ./build/bin/llama-cli -hf ggml-org/gemma-3-270m-GGUF:Q8_0
```
Daniel Bevenius hai 5 meses
pai
achega
7a0de96045
Modificáronse 2 ficheiros con 2 adicións e 0 borrados
  1. 1 0
      src/llama-model.cpp
  2. 1 0
      src/llama-model.h

+ 1 - 0
src/llama-model.cpp

@@ -1095,6 +1095,7 @@ void llama_model::load_hparams(llama_model_loader & ml) {
                 ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
                 ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
 
 
                 switch (hparams.n_layer) {
                 switch (hparams.n_layer) {
+                    case 18: type = LLM_TYPE_537M; break;
                     case 26: type = LLM_TYPE_1B; break;
                     case 26: type = LLM_TYPE_1B; break;
                     case 34: type = LLM_TYPE_4B; break;
                     case 34: type = LLM_TYPE_4B; break;
                     case 48: type = LLM_TYPE_12B; break;
                     case 48: type = LLM_TYPE_12B; break;

+ 1 - 0
src/llama-model.h

@@ -39,6 +39,7 @@ enum llm_type {
     LLM_TYPE_410M,
     LLM_TYPE_410M,
     LLM_TYPE_450M,
     LLM_TYPE_450M,
     LLM_TYPE_475M,
     LLM_TYPE_475M,
+    LLM_TYPE_537M,
     LLM_TYPE_700M,
     LLM_TYPE_700M,
     LLM_TYPE_770M,
     LLM_TYPE_770M,
     LLM_TYPE_780M,
     LLM_TYPE_780M,