瀏覽代碼

Don't crash on ftype (formerly f16) == 4 (#917)

Stephan Walter 2 年之前
父節點
當前提交
e7f6997f89
共有 2 個文件被更改,包括 4 次插入1 次删除
  1. 3 1
      llama.cpp
  2. 1 0
      llama.h

+ 3 - 1
llama.cpp

@@ -827,7 +827,9 @@ static const char *llama_ftype_name(enum llama_ftype ftype) {
         case LLAMA_FTYPE_MOSTLY_F16:  return "mostly F16";
         case LLAMA_FTYPE_MOSTLY_F16:  return "mostly F16";
         case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0";
         case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0";
         case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1";
         case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1";
-        default: LLAMA_ASSERT(false);
+        case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
+                                      return "mostly Q4_1, some F16";
+        default:                      return "unknown, may not work";
     }
     }
 }
 }
 
 

+ 1 - 0
llama.h

@@ -71,6 +71,7 @@ extern "C" {
         LLAMA_FTYPE_MOSTLY_F16  = 1,  // except 1d tensors
         LLAMA_FTYPE_MOSTLY_F16  = 1,  // except 1d tensors
         LLAMA_FTYPE_MOSTLY_Q4_0 = 2,  // except 1d tensors
         LLAMA_FTYPE_MOSTLY_Q4_0 = 2,  // except 1d tensors
         LLAMA_FTYPE_MOSTLY_Q4_1 = 3,  // except 1d tensors
         LLAMA_FTYPE_MOSTLY_Q4_1 = 3,  // except 1d tensors
+        LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
     };
     };
 
 
     LLAMA_API struct llama_context_params llama_context_default_params();
     LLAMA_API struct llama_context_params llama_context_default_params();