|
@@ -481,14 +481,14 @@ ggml_fp16_t ggml_fp32_to_fp16(float x) {
|
|
|
return GGML_FP32_TO_FP16(x);
|
|
return GGML_FP32_TO_FP16(x);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, size_t n) {
|
|
|
|
|
- for (size_t i = 0; i < n; i++) {
|
|
|
|
|
|
|
+void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n) {
|
|
|
|
|
+ for (int i = 0; i < n; i++) {
|
|
|
y[i] = GGML_FP16_TO_FP32(x[i]);
|
|
y[i] = GGML_FP16_TO_FP32(x[i]);
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, size_t n) {
|
|
|
|
|
- size_t i = 0;
|
|
|
|
|
|
|
+void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n) {
|
|
|
|
|
+ int i = 0;
|
|
|
#if defined(__F16C__)
|
|
#if defined(__F16C__)
|
|
|
for (; i + 7 < n; i += 8) {
|
|
for (; i + 7 < n; i += 8) {
|
|
|
__m256 x_vec = _mm256_loadu_ps(x + i);
|
|
__m256 x_vec = _mm256_loadu_ps(x + i);
|
|
@@ -1627,109 +1627,112 @@ static void dequantize_row_q8_0(const void * restrict vx, float * restrict y, in
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y);
|
|
|
|
|
+static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y);
|
|
|
static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
static void ggml_vec_dot_q4_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
|
static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
static void ggml_vec_dot_q4_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
|
static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
static void ggml_vec_dot_q5_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
|
static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
static void ggml_vec_dot_q5_1_q8_1(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
|
static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
static void ggml_vec_dot_q8_0_q8_0(const int n, float * restrict s, const void * restrict vx, const void * restrict vy);
|
|
|
|
|
|
|
|
-static const quantize_fns_t quantize_fns[GGML_TYPE_COUNT] = {
|
|
|
|
|
|
|
+static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
|
|
|
|
|
+ [GGML_TYPE_F32] = {
|
|
|
|
|
+ .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32,
|
|
|
|
|
+ .vec_dot_type = GGML_TYPE_F32,
|
|
|
|
|
+ },
|
|
|
|
|
+ [GGML_TYPE_F16] = {
|
|
|
|
|
+ .to_float = (ggml_to_float_t) ggml_fp16_to_fp32_row,
|
|
|
|
|
+ .from_float = (ggml_from_float_t) ggml_fp32_to_fp16_row,
|
|
|
|
|
+ .from_float_reference = (ggml_from_float_t) ggml_fp32_to_fp16_row,
|
|
|
|
|
+ .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16,
|
|
|
|
|
+ .vec_dot_type = GGML_TYPE_F16,
|
|
|
|
|
+ },
|
|
|
[GGML_TYPE_Q4_0] = {
|
|
[GGML_TYPE_Q4_0] = {
|
|
|
- .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q4_0,
|
|
|
|
|
- .quantize_row_q = quantize_row_q4_0,
|
|
|
|
|
- .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q4_0_reference,
|
|
|
|
|
- .quantize_row_q_dot = quantize_row_q8_0,
|
|
|
|
|
- .vec_dot_q = ggml_vec_dot_q4_0_q8_0,
|
|
|
|
|
|
|
+ .to_float = (ggml_to_float_t) dequantize_row_q4_0,
|
|
|
|
|
+ .from_float = quantize_row_q4_0,
|
|
|
|
|
+ .from_float_reference = (ggml_from_float_t) quantize_row_q4_0_reference,
|
|
|
|
|
+ .vec_dot = ggml_vec_dot_q4_0_q8_0,
|
|
|
.vec_dot_type = GGML_TYPE_Q8_0,
|
|
.vec_dot_type = GGML_TYPE_Q8_0,
|
|
|
},
|
|
},
|
|
|
[GGML_TYPE_Q4_1] = {
|
|
[GGML_TYPE_Q4_1] = {
|
|
|
- .dequantize_row_q = (dequantize_row_q_t)dequantize_row_q4_1,
|
|
|
|
|
- .quantize_row_q = quantize_row_q4_1,
|
|
|
|
|
- .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q4_1_reference,
|
|
|
|
|
- .quantize_row_q_dot = quantize_row_q8_1,
|
|
|
|
|
- .vec_dot_q = ggml_vec_dot_q4_1_q8_1,
|
|
|
|
|
|
|
+ .to_float = (ggml_to_float_t) dequantize_row_q4_1,
|
|
|
|
|
+ .from_float = quantize_row_q4_1,
|
|
|
|
|
+ .from_float_reference = (ggml_from_float_t) quantize_row_q4_1_reference,
|
|
|
|
|
+ .vec_dot = ggml_vec_dot_q4_1_q8_1,
|
|
|
.vec_dot_type = GGML_TYPE_Q8_1,
|
|
.vec_dot_type = GGML_TYPE_Q8_1,
|
|
|
},
|
|
},
|
|
|
[GGML_TYPE_Q5_0] = {
|
|
[GGML_TYPE_Q5_0] = {
|
|
|
- .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q5_0,
|
|
|
|
|
- .quantize_row_q = quantize_row_q5_0,
|
|
|
|
|
- .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q5_0_reference,
|
|
|
|
|
- .quantize_row_q_dot = quantize_row_q8_0,
|
|
|
|
|
- .vec_dot_q = ggml_vec_dot_q5_0_q8_0,
|
|
|
|
|
|
|
+ .to_float = (ggml_to_float_t) dequantize_row_q5_0,
|
|
|
|
|
+ .from_float = quantize_row_q5_0,
|
|
|
|
|
+ .from_float_reference = (ggml_from_float_t) quantize_row_q5_0_reference,
|
|
|
|
|
+ .vec_dot = ggml_vec_dot_q5_0_q8_0,
|
|
|
.vec_dot_type = GGML_TYPE_Q8_0,
|
|
.vec_dot_type = GGML_TYPE_Q8_0,
|
|
|
},
|
|
},
|
|
|
[GGML_TYPE_Q5_1] = {
|
|
[GGML_TYPE_Q5_1] = {
|
|
|
- .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q5_1,
|
|
|
|
|
- .quantize_row_q = quantize_row_q5_1,
|
|
|
|
|
- .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q5_1_reference,
|
|
|
|
|
- .quantize_row_q_dot = quantize_row_q8_1,
|
|
|
|
|
- .vec_dot_q = ggml_vec_dot_q5_1_q8_1,
|
|
|
|
|
|
|
+ .to_float = (ggml_to_float_t) dequantize_row_q5_1,
|
|
|
|
|
+ .from_float = quantize_row_q5_1,
|
|
|
|
|
+ .from_float_reference = (ggml_from_float_t) quantize_row_q5_1_reference,
|
|
|
|
|
+ .vec_dot = ggml_vec_dot_q5_1_q8_1,
|
|
|
.vec_dot_type = GGML_TYPE_Q8_1,
|
|
.vec_dot_type = GGML_TYPE_Q8_1,
|
|
|
},
|
|
},
|
|
|
[GGML_TYPE_Q8_0] = {
|
|
[GGML_TYPE_Q8_0] = {
|
|
|
- .dequantize_row_q = dequantize_row_q8_0,
|
|
|
|
|
- .quantize_row_q = quantize_row_q8_0,
|
|
|
|
|
- .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q8_0_reference,
|
|
|
|
|
- .quantize_row_q_dot = quantize_row_q8_0,
|
|
|
|
|
- .vec_dot_q = ggml_vec_dot_q8_0_q8_0,
|
|
|
|
|
|
|
+ .to_float = dequantize_row_q8_0,
|
|
|
|
|
+ .from_float = quantize_row_q8_0,
|
|
|
|
|
+ .from_float_reference = (ggml_from_float_t) quantize_row_q8_0_reference,
|
|
|
|
|
+ .vec_dot = ggml_vec_dot_q8_0_q8_0,
|
|
|
.vec_dot_type = GGML_TYPE_Q8_0,
|
|
.vec_dot_type = GGML_TYPE_Q8_0,
|
|
|
},
|
|
},
|
|
|
[GGML_TYPE_Q8_1] = {
|
|
[GGML_TYPE_Q8_1] = {
|
|
|
- .dequantize_row_q = NULL, // TODO
|
|
|
|
|
- .quantize_row_q = quantize_row_q8_1,
|
|
|
|
|
- .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q8_1_reference,
|
|
|
|
|
- .quantize_row_q_dot = quantize_row_q8_1,
|
|
|
|
|
- .vec_dot_q = NULL, // TODO
|
|
|
|
|
|
|
+ .from_float = quantize_row_q8_1,
|
|
|
|
|
+ .from_float_reference = (ggml_from_float_t) quantize_row_q8_1_reference,
|
|
|
.vec_dot_type = GGML_TYPE_Q8_1,
|
|
.vec_dot_type = GGML_TYPE_Q8_1,
|
|
|
},
|
|
},
|
|
|
#ifdef GGML_USE_K_QUANTS
|
|
#ifdef GGML_USE_K_QUANTS
|
|
|
[GGML_TYPE_Q2_K] = {
|
|
[GGML_TYPE_Q2_K] = {
|
|
|
- .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q2_K,
|
|
|
|
|
- .quantize_row_q = quantize_row_q2_K,
|
|
|
|
|
- .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q2_K_reference,
|
|
|
|
|
- .quantize_row_q_dot = quantize_row_q8_K,
|
|
|
|
|
- .vec_dot_q = ggml_vec_dot_q2_K_q8_K,
|
|
|
|
|
|
|
+ .to_float = (ggml_to_float_t) dequantize_row_q2_K,
|
|
|
|
|
+ .from_float = quantize_row_q2_K,
|
|
|
|
|
+ .from_float_reference = (ggml_from_float_t) quantize_row_q2_K_reference,
|
|
|
|
|
+ .vec_dot = ggml_vec_dot_q2_K_q8_K,
|
|
|
.vec_dot_type = GGML_TYPE_Q8_K,
|
|
.vec_dot_type = GGML_TYPE_Q8_K,
|
|
|
},
|
|
},
|
|
|
[GGML_TYPE_Q3_K] = {
|
|
[GGML_TYPE_Q3_K] = {
|
|
|
- .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q3_K,
|
|
|
|
|
- .quantize_row_q = quantize_row_q3_K,
|
|
|
|
|
- .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q3_K_reference,
|
|
|
|
|
- .quantize_row_q_dot = quantize_row_q8_K,
|
|
|
|
|
- .vec_dot_q = ggml_vec_dot_q3_K_q8_K,
|
|
|
|
|
|
|
+ .to_float = (ggml_to_float_t) dequantize_row_q3_K,
|
|
|
|
|
+ .from_float = quantize_row_q3_K,
|
|
|
|
|
+ .from_float_reference = (ggml_from_float_t) quantize_row_q3_K_reference,
|
|
|
|
|
+ .vec_dot = ggml_vec_dot_q3_K_q8_K,
|
|
|
.vec_dot_type = GGML_TYPE_Q8_K,
|
|
.vec_dot_type = GGML_TYPE_Q8_K,
|
|
|
},
|
|
},
|
|
|
[GGML_TYPE_Q4_K] = {
|
|
[GGML_TYPE_Q4_K] = {
|
|
|
- .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q4_K,
|
|
|
|
|
- .quantize_row_q = quantize_row_q4_K,
|
|
|
|
|
- .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q4_K_reference,
|
|
|
|
|
- .quantize_row_q_dot = quantize_row_q8_K,
|
|
|
|
|
- .vec_dot_q = ggml_vec_dot_q4_K_q8_K,
|
|
|
|
|
|
|
+ .to_float = (ggml_to_float_t) dequantize_row_q4_K,
|
|
|
|
|
+ .from_float = quantize_row_q4_K,
|
|
|
|
|
+ .from_float_reference = (ggml_from_float_t) quantize_row_q4_K_reference,
|
|
|
|
|
+ .vec_dot = ggml_vec_dot_q4_K_q8_K,
|
|
|
.vec_dot_type = GGML_TYPE_Q8_K,
|
|
.vec_dot_type = GGML_TYPE_Q8_K,
|
|
|
},
|
|
},
|
|
|
[GGML_TYPE_Q5_K] = {
|
|
[GGML_TYPE_Q5_K] = {
|
|
|
- .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q5_K,
|
|
|
|
|
- .quantize_row_q = quantize_row_q5_K,
|
|
|
|
|
- .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q5_K_reference,
|
|
|
|
|
- .quantize_row_q_dot = quantize_row_q8_K,
|
|
|
|
|
- .vec_dot_q = ggml_vec_dot_q5_K_q8_K,
|
|
|
|
|
|
|
+ .to_float = (ggml_to_float_t) dequantize_row_q5_K,
|
|
|
|
|
+ .from_float = quantize_row_q5_K,
|
|
|
|
|
+ .from_float_reference = (ggml_from_float_t) quantize_row_q5_K_reference,
|
|
|
|
|
+ .vec_dot = ggml_vec_dot_q5_K_q8_K,
|
|
|
.vec_dot_type = GGML_TYPE_Q8_K,
|
|
.vec_dot_type = GGML_TYPE_Q8_K,
|
|
|
},
|
|
},
|
|
|
[GGML_TYPE_Q6_K] = {
|
|
[GGML_TYPE_Q6_K] = {
|
|
|
- .dequantize_row_q = (dequantize_row_q_t) dequantize_row_q6_K,
|
|
|
|
|
- .quantize_row_q = quantize_row_q6_K,
|
|
|
|
|
- .quantize_row_q_reference = (quantize_row_q_t) quantize_row_q6_K_reference,
|
|
|
|
|
- .quantize_row_q_dot = quantize_row_q8_K,
|
|
|
|
|
- .vec_dot_q = ggml_vec_dot_q6_K_q8_K,
|
|
|
|
|
|
|
+ .to_float = (ggml_to_float_t) dequantize_row_q6_K,
|
|
|
|
|
+ .from_float = quantize_row_q6_K,
|
|
|
|
|
+ .from_float_reference = (ggml_from_float_t) quantize_row_q6_K_reference,
|
|
|
|
|
+ .vec_dot = ggml_vec_dot_q6_K_q8_K,
|
|
|
.vec_dot_type = GGML_TYPE_Q8_K,
|
|
.vec_dot_type = GGML_TYPE_Q8_K,
|
|
|
},
|
|
},
|
|
|
|
|
+ [GGML_TYPE_Q8_K] = {
|
|
|
|
|
+ .from_float = quantize_row_q8_K,
|
|
|
|
|
+ }
|
|
|
#endif
|
|
#endif
|
|
|
};
|
|
};
|
|
|
|
|
|
|
|
// For internal test use
|
|
// For internal test use
|
|
|
-quantize_fns_t ggml_internal_get_quantize_fn(size_t i) {
|
|
|
|
|
|
|
+ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type i) {
|
|
|
GGML_ASSERT(i < GGML_TYPE_COUNT);
|
|
GGML_ASSERT(i < GGML_TYPE_COUNT);
|
|
|
- return quantize_fns[i];
|
|
|
|
|
|
|
+ return type_traits[i];
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
@@ -2275,7 +2278,7 @@ inline static void ggml_vec_neg_f32 (const int n, float * y, const float * x)
|
|
|
inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
|
|
inline static void ggml_vec_mul_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]*y[i]; }
|
|
|
inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
|
|
inline static void ggml_vec_div_f32 (const int n, float * z, const float * x, const float * y) { for (int i = 0; i < n; ++i) z[i] = x[i]/y[i]; }
|
|
|
|
|
|
|
|
-inline static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) {
|
|
|
|
|
|
|
+static void ggml_vec_dot_f32(const int n, float * restrict s, const float * restrict x, const float * restrict y) {
|
|
|
#ifdef GGML_SIMD
|
|
#ifdef GGML_SIMD
|
|
|
float sumf = 0.0f;
|
|
float sumf = 0.0f;
|
|
|
const int np = (n & ~(GGML_F32_STEP - 1));
|
|
const int np = (n & ~(GGML_F32_STEP - 1));
|
|
@@ -2312,7 +2315,7 @@ inline static void ggml_vec_dot_f32(const int n, float * restrict s, const float
|
|
|
*s = sumf;
|
|
*s = sumf;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-inline static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) {
|
|
|
|
|
|
|
+static void ggml_vec_dot_f16(const int n, float * restrict s, ggml_fp16_t * restrict x, ggml_fp16_t * restrict y) {
|
|
|
ggml_float sumf = 0.0;
|
|
ggml_float sumf = 0.0;
|
|
|
|
|
|
|
|
#if defined(GGML_SIMD)
|
|
#if defined(GGML_SIMD)
|
|
@@ -7825,8 +7828,8 @@ static void ggml_compute_forward_dup_f16(
|
|
|
id += ne00 * (ne01 - ir1);
|
|
id += ne00 * (ne01 - ir1);
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
- } else if (ggml_is_quantized(dst->type)) {
|
|
|
|
|
- quantize_row_q_t const quantize_row_q = quantize_fns[dst->type].quantize_row_q;
|
|
|
|
|
|
|
+ } else if (type_traits[dst->type].from_float) {
|
|
|
|
|
+ ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
|
|
|
float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
|
|
float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith;
|
|
|
|
|
|
|
|
size_t id = 0;
|
|
size_t id = 0;
|
|
@@ -8078,26 +8081,8 @@ static void ggml_compute_forward_dup_f32(
|
|
|
id += rs * (ne01 - ir1);
|
|
id += rs * (ne01 - ir1);
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
- } else if (dst->type == GGML_TYPE_F16) {
|
|
|
|
|
- size_t id = 0;
|
|
|
|
|
- ggml_fp16_t * dst_ptr = (ggml_fp16_t *) dst->data;
|
|
|
|
|
-
|
|
|
|
|
- for (int i03 = 0; i03 < ne03; i03++) {
|
|
|
|
|
- for (int i02 = 0; i02 < ne02; i02++) {
|
|
|
|
|
- id += ne00 * ir0;
|
|
|
|
|
- for (int i01 = ir0; i01 < ir1; i01++) {
|
|
|
|
|
- for (int i00 = 0; i00 < ne00; i00++) {
|
|
|
|
|
- const float * src0_ptr = (float *) ((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03);
|
|
|
|
|
-
|
|
|
|
|
- dst_ptr[id] = GGML_FP32_TO_FP16(*src0_ptr);
|
|
|
|
|
- id++;
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
- id += ne00 * (ne01 - ir1);
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
- } else if (ggml_is_quantized(dst->type)) {
|
|
|
|
|
- quantize_row_q_t const quantize_row_q = quantize_fns[dst->type].quantize_row_q;
|
|
|
|
|
|
|
+ } else if (type_traits[dst->type].from_float) {
|
|
|
|
|
+ ggml_from_float_t const quantize_row_q = type_traits[dst->type].from_float;
|
|
|
|
|
|
|
|
size_t id = 0;
|
|
size_t id = 0;
|
|
|
size_t rs = nb0 * (ne00 / GGML_BLCK_SIZE[dst->type]);
|
|
size_t rs = nb0 * (ne00 / GGML_BLCK_SIZE[dst->type]);
|
|
@@ -8503,8 +8488,8 @@ static void ggml_compute_forward_add_q_f32(
|
|
|
const int nth = params->nth;
|
|
const int nth = params->nth;
|
|
|
|
|
|
|
|
const enum ggml_type type = src0->type;
|
|
const enum ggml_type type = src0->type;
|
|
|
- dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q;
|
|
|
|
|
- quantize_row_q_t const quantize_row_q = quantize_fns[type].quantize_row_q;
|
|
|
|
|
|
|
+ ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
|
|
|
|
|
+ ggml_from_float_t const quantize_row_q = type_traits[type].from_float;
|
|
|
|
|
|
|
|
// we don't support permuted src0 or src1
|
|
// we don't support permuted src0 or src1
|
|
|
GGML_ASSERT(nb00 == GGML_TYPE_SIZE[type]);
|
|
GGML_ASSERT(nb00 == GGML_TYPE_SIZE[type]);
|
|
@@ -8777,8 +8762,8 @@ static void ggml_compute_forward_add1_q_f32(
|
|
|
GGML_TENSOR_UNARY_OP_LOCALS;
|
|
GGML_TENSOR_UNARY_OP_LOCALS;
|
|
|
|
|
|
|
|
const enum ggml_type type = src0->type;
|
|
const enum ggml_type type = src0->type;
|
|
|
- dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q;
|
|
|
|
|
- quantize_row_q_t const quantize_row_q = quantize_fns[type].quantize_row_q;
|
|
|
|
|
|
|
+ ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
|
|
|
|
|
+ ggml_from_float_t const quantize_row_q = type_traits[type].from_float;
|
|
|
|
|
|
|
|
// we don't support permuted src0
|
|
// we don't support permuted src0
|
|
|
GGML_ASSERT(nb00 == GGML_TYPE_SIZE[type]);
|
|
GGML_ASSERT(nb00 == GGML_TYPE_SIZE[type]);
|
|
@@ -10578,317 +10563,7 @@ static bool ggml_compute_forward_mul_mat_use_blas(
|
|
|
}
|
|
}
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
-static void ggml_compute_forward_mul_mat_f32(
|
|
|
|
|
- const struct ggml_compute_params * params,
|
|
|
|
|
- const struct ggml_tensor * src0,
|
|
|
|
|
- const struct ggml_tensor * src1,
|
|
|
|
|
- struct ggml_tensor * dst) {
|
|
|
|
|
- int64_t t0 = ggml_perf_time_us();
|
|
|
|
|
- UNUSED(t0);
|
|
|
|
|
-
|
|
|
|
|
- GGML_TENSOR_BINARY_OP_LOCALS;
|
|
|
|
|
-
|
|
|
|
|
- const int ith = params->ith;
|
|
|
|
|
- const int nth = params->nth;
|
|
|
|
|
-
|
|
|
|
|
- assert(ne02 == ne12);
|
|
|
|
|
- assert(ne03 == ne13);
|
|
|
|
|
- assert(ne2 == ne12);
|
|
|
|
|
- assert(ne3 == ne13);
|
|
|
|
|
-
|
|
|
|
|
- // we don't support permuted src0 or src1
|
|
|
|
|
- assert(nb00 == sizeof(float));
|
|
|
|
|
- assert(nb10 == sizeof(float));
|
|
|
|
|
-
|
|
|
|
|
- // dst cannot be transposed or permuted
|
|
|
|
|
- assert(nb0 == sizeof(float));
|
|
|
|
|
- assert(nb0 <= nb1);
|
|
|
|
|
- assert(nb1 <= nb2);
|
|
|
|
|
- assert(nb2 <= nb3);
|
|
|
|
|
-
|
|
|
|
|
- assert(ne0 == ne01);
|
|
|
|
|
- assert(ne1 == ne11);
|
|
|
|
|
- assert(ne2 == ne02);
|
|
|
|
|
- assert(ne3 == ne03);
|
|
|
|
|
-
|
|
|
|
|
- // nb01 >= nb00 - src0 is not transposed
|
|
|
|
|
- // compute by src0 rows
|
|
|
|
|
-
|
|
|
|
|
-#if defined(GGML_USE_CLBLAST)
|
|
|
|
|
- if (ggml_cl_can_mul_mat(src0, src1, dst)) {
|
|
|
|
|
- if (params->ith == 0 && params->type == GGML_TASK_COMPUTE) {
|
|
|
|
|
- ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize);
|
|
|
|
|
- }
|
|
|
|
|
- return;
|
|
|
|
|
- }
|
|
|
|
|
-#endif
|
|
|
|
|
-
|
|
|
|
|
-#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
|
|
|
|
|
- if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
|
|
|
|
|
- if (params->ith != 0) {
|
|
|
|
|
- return;
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- if (params->type == GGML_TASK_INIT) {
|
|
|
|
|
- return;
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- if (params->type == GGML_TASK_FINALIZE) {
|
|
|
|
|
- return;
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- for (int64_t i03 = 0; i03 < ne03; i03++) {
|
|
|
|
|
- for (int64_t i02 = 0; i02 < ne02; i02++) {
|
|
|
|
|
- const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03);
|
|
|
|
|
- const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13);
|
|
|
|
|
- float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
|
|
|
|
|
-
|
|
|
|
|
- cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
|
|
|
|
|
- ne11, ne01, ne10,
|
|
|
|
|
- 1.0f, y, ne10,
|
|
|
|
|
- x, ne00,
|
|
|
|
|
- 0.0f, d, ne01);
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
- //printf("CBLAS F32 = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);
|
|
|
|
|
-
|
|
|
|
|
- return;
|
|
|
|
|
- }
|
|
|
|
|
-#endif
|
|
|
|
|
-
|
|
|
|
|
- if (params->type == GGML_TASK_INIT) {
|
|
|
|
|
- return;
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- if (params->type == GGML_TASK_FINALIZE) {
|
|
|
|
|
- return;
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- // parallelize by src0 rows using ggml_vec_dot_f32
|
|
|
|
|
-
|
|
|
|
|
- // total rows in src0
|
|
|
|
|
- const int nr = ne01*ne02*ne03;
|
|
|
|
|
-
|
|
|
|
|
- // rows per thread
|
|
|
|
|
- const int dr = (nr + nth - 1)/nth;
|
|
|
|
|
-
|
|
|
|
|
- // row range for this thread
|
|
|
|
|
- const int ir0 = dr*ith;
|
|
|
|
|
- const int ir1 = MIN(ir0 + dr, nr);
|
|
|
|
|
-
|
|
|
|
|
- for (int ir = ir0; ir < ir1; ++ir) {
|
|
|
|
|
- // src0 indices
|
|
|
|
|
- const int i03 = ir/(ne02*ne01);
|
|
|
|
|
- const int i02 = (ir - i03*ne02*ne01)/ne01;
|
|
|
|
|
- const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
|
|
|
|
|
-
|
|
|
|
|
- for (int64_t ic = 0; ic < ne11; ++ic) {
|
|
|
|
|
- // src1 indices
|
|
|
|
|
- const int i13 = i03;
|
|
|
|
|
- const int i12 = i02;
|
|
|
|
|
- const int i11 = ic;
|
|
|
|
|
-
|
|
|
|
|
- // dst indices
|
|
|
|
|
- const int i0 = i01;
|
|
|
|
|
- const int i1 = i11;
|
|
|
|
|
- const int i2 = i02;
|
|
|
|
|
- const int i3 = i03;
|
|
|
|
|
-
|
|
|
|
|
- ggml_vec_dot_f32(ne00,
|
|
|
|
|
- (float *) ((char *) dst->data + (i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3)),
|
|
|
|
|
- (float *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03)),
|
|
|
|
|
- (float *) ((char *) src1->data + (i11*nb11 + i12*nb12 + i13*nb13)));
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- //int64_t t1 = ggml_perf_time_us();
|
|
|
|
|
- //static int64_t acc = 0;
|
|
|
|
|
- //acc += t1 - t0;
|
|
|
|
|
- //if (t1 - t0 > 10) {
|
|
|
|
|
- // printf("\n");
|
|
|
|
|
- // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
|
|
|
|
|
- // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
|
|
|
|
|
- // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
|
|
|
|
|
- // printf("nb10 = %5d, nb11 = %5d, nb12 = %5d, nb13 = %5d\n", nb10, nb11, nb12, nb13);
|
|
|
|
|
-
|
|
|
|
|
- // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
|
|
|
|
|
- //}
|
|
|
|
|
-}
|
|
|
|
|
-
|
|
|
|
|
-static void ggml_compute_forward_mul_mat_f16_f32(
|
|
|
|
|
- const struct ggml_compute_params * params,
|
|
|
|
|
- const struct ggml_tensor * src0,
|
|
|
|
|
- const struct ggml_tensor * src1,
|
|
|
|
|
- struct ggml_tensor * dst) {
|
|
|
|
|
- int64_t t0 = ggml_perf_time_us();
|
|
|
|
|
- UNUSED(t0);
|
|
|
|
|
-
|
|
|
|
|
- GGML_TENSOR_BINARY_OP_LOCALS;
|
|
|
|
|
-
|
|
|
|
|
- //const int64_t ne = ne0*ne1*ne2*ne3;
|
|
|
|
|
-
|
|
|
|
|
- const int ith = params->ith;
|
|
|
|
|
- const int nth = params->nth;
|
|
|
|
|
-
|
|
|
|
|
- GGML_ASSERT(ne02 == ne12);
|
|
|
|
|
- GGML_ASSERT(ne03 == ne13);
|
|
|
|
|
- GGML_ASSERT(ne2 == ne12);
|
|
|
|
|
- GGML_ASSERT(ne3 == ne13);
|
|
|
|
|
-
|
|
|
|
|
- // TODO: we don't support permuted src0
|
|
|
|
|
- GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
|
|
|
|
|
-
|
|
|
|
|
- // dst cannot be transposed or permuted
|
|
|
|
|
- GGML_ASSERT(nb0 == sizeof(float));
|
|
|
|
|
- GGML_ASSERT(nb0 <= nb1);
|
|
|
|
|
- GGML_ASSERT(nb1 <= nb2);
|
|
|
|
|
- GGML_ASSERT(nb2 <= nb3);
|
|
|
|
|
-
|
|
|
|
|
- GGML_ASSERT(ne0 == ne01);
|
|
|
|
|
- GGML_ASSERT(ne1 == ne11);
|
|
|
|
|
- GGML_ASSERT(ne2 == ne02);
|
|
|
|
|
- GGML_ASSERT(ne3 == ne03);
|
|
|
|
|
-
|
|
|
|
|
- // nb01 >= nb00 - src0 is not transposed
|
|
|
|
|
- // compute by src0 rows
|
|
|
|
|
-
|
|
|
|
|
-#if defined(GGML_USE_CLBLAST)
|
|
|
|
|
- if (ggml_cl_can_mul_mat(src0, src1, dst)) {
|
|
|
|
|
- if (params->ith == 0 && params->type == GGML_TASK_COMPUTE) {
|
|
|
|
|
- ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize);
|
|
|
|
|
- }
|
|
|
|
|
- return;
|
|
|
|
|
- }
|
|
|
|
|
-#endif
|
|
|
|
|
-
|
|
|
|
|
-#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
|
|
|
|
|
- if (ggml_compute_forward_mul_mat_use_blas(src0, src1, dst)) {
|
|
|
|
|
- GGML_ASSERT(nb10 == sizeof(float));
|
|
|
|
|
-
|
|
|
|
|
- if (params->ith != 0) {
|
|
|
|
|
- return;
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- if (params->type == GGML_TASK_INIT) {
|
|
|
|
|
- return;
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- if (params->type == GGML_TASK_FINALIZE) {
|
|
|
|
|
- return;
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- for (int64_t i03 = 0; i03 < ne03; i03++) {
|
|
|
|
|
- for (int64_t i02 = 0; i02 < ne02; i02++) {
|
|
|
|
|
- float * const wdata = params->wdata;
|
|
|
|
|
- {
|
|
|
|
|
- size_t id = 0;
|
|
|
|
|
- for (int64_t i01 = 0; i01 < ne01; ++i01) {
|
|
|
|
|
- for (int64_t i00 = 0; i00 < ne00; ++i00) {
|
|
|
|
|
- wdata[id++] = GGML_FP16_TO_FP32(*(ggml_fp16_t *) ((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00));
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- assert(id*sizeof(float) <= params->wsize);
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- const float * x = wdata;
|
|
|
|
|
- const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13);
|
|
|
|
|
-
|
|
|
|
|
- float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
|
|
|
|
|
-
|
|
|
|
|
- // zT = y * xT
|
|
|
|
|
- cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
|
|
|
|
|
- ne11, ne01, ne10,
|
|
|
|
|
- 1.0f, y, ne10,
|
|
|
|
|
- x, ne00,
|
|
|
|
|
- 0.0f, d, ne01);
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- /*printf("CBLAS F16 = %f ms, %d x %d x %d x %d\n", (ggml_perf_time_us() - t0)/1000.0, ne0, ne1, ne2, ne3);*/
|
|
|
|
|
-
|
|
|
|
|
- return;
|
|
|
|
|
- }
|
|
|
|
|
-#endif
|
|
|
|
|
-
|
|
|
|
|
- if (params->type == GGML_TASK_INIT) {
|
|
|
|
|
- ggml_fp16_t * const wdata = params->wdata;
|
|
|
|
|
-
|
|
|
|
|
- size_t id = 0;
|
|
|
|
|
- for (int64_t i13 = 0; i13 < ne13; ++i13) {
|
|
|
|
|
- for (int64_t i12 = 0; i12 < ne12; ++i12) {
|
|
|
|
|
- for (int64_t i11 = 0; i11 < ne11; ++i11) {
|
|
|
|
|
- for (int64_t i10 = 0; i10 < ne10; ++i10) {
|
|
|
|
|
- wdata[id++] = GGML_FP32_TO_FP16(*(float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + i10*nb10));
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- GGML_ASSERT(id*sizeof(ggml_fp16_t) <= params->wsize);
|
|
|
|
|
-
|
|
|
|
|
- return;
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- if (params->type == GGML_TASK_FINALIZE) {
|
|
|
|
|
- return;
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- // fp16 -> half the size, so divide by 2
|
|
|
|
|
- // TODO: do not support transposed src1
|
|
|
|
|
- assert(nb10/2 == sizeof(ggml_fp16_t));
|
|
|
|
|
-
|
|
|
|
|
- // parallelize by src0 rows using ggml_vec_dot_f16
|
|
|
|
|
-
|
|
|
|
|
- // total rows in src0
|
|
|
|
|
- const int nr = ne01*ne02*ne03;
|
|
|
|
|
-
|
|
|
|
|
- // rows per thread
|
|
|
|
|
- const int dr = (nr + nth - 1)/nth;
|
|
|
|
|
-
|
|
|
|
|
- // row range for this thread
|
|
|
|
|
- const int ir0 = dr*ith;
|
|
|
|
|
- const int ir1 = MIN(ir0 + dr, nr);
|
|
|
|
|
-
|
|
|
|
|
- ggml_fp16_t * wdata = params->wdata;
|
|
|
|
|
-
|
|
|
|
|
- for (int ir = ir0; ir < ir1; ++ir) {
|
|
|
|
|
- // src0 indices
|
|
|
|
|
- const int i03 = ir/(ne02*ne01);
|
|
|
|
|
- const int i02 = (ir - i03*ne02*ne01)/ne01;
|
|
|
|
|
- const int i01 = (ir - i03*ne02*ne01 - i02*ne01);
|
|
|
|
|
-
|
|
|
|
|
- const int i13 = i03;
|
|
|
|
|
- const int i12 = i02;
|
|
|
|
|
-
|
|
|
|
|
- const int i0 = i01;
|
|
|
|
|
- const int i2 = i02;
|
|
|
|
|
- const int i3 = i03;
|
|
|
|
|
-
|
|
|
|
|
- ggml_fp16_t * src0_row = (ggml_fp16_t *) ((char *) src0->data + (i01*nb01 + i02*nb02 + i03*nb03));
|
|
|
|
|
- ggml_fp16_t * src1_col = wdata + ( 0 + i12*ne11 + i13*ne12*ne11)*ne00;
|
|
|
|
|
-
|
|
|
|
|
- float * dst_col = (float *) ((char *) dst->data + (i0*nb0 + 0*nb1 + i2*nb2 + i3*nb3));
|
|
|
|
|
-
|
|
|
|
|
- for (int64_t ic = 0; ic < ne11; ++ic) {
|
|
|
|
|
- ggml_vec_dot_f16(ne00, &dst_col[ic*ne0], src0_row, src1_col + ic*ne00);
|
|
|
|
|
- }
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- //int64_t t1 = ggml_time_us();
|
|
|
|
|
- //static int64_t acc = 0;
|
|
|
|
|
- //acc += t1 - t0;
|
|
|
|
|
- //if (t1 - t0 > 10) {
|
|
|
|
|
- // printf("\n");
|
|
|
|
|
- // printf("ne00 = %5d, ne01 = %5d, ne02 = %5d, ne03 = %5d\n", ne00, ne01, ne02, ne03);
|
|
|
|
|
- // printf("nb00 = %5d, nb01 = %5d, nb02 = %5d, nb03 = %5d\n", nb00, nb01, nb02, nb03);
|
|
|
|
|
- // printf("ne10 = %5d, ne11 = %5d, ne12 = %5d, ne13 = %5d\n", ne10, ne11, ne12, ne13);
|
|
|
|
|
-
|
|
|
|
|
- // printf("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX task %d/%d: %d us, acc = %d\n", ith, nth, (int) (t1 - t0), (int) acc);
|
|
|
|
|
- //}
|
|
|
|
|
-}
|
|
|
|
|
-
|
|
|
|
|
-static void ggml_compute_forward_mul_mat_q_f32(
|
|
|
|
|
|
|
+static void ggml_compute_forward_mul_mat(
|
|
|
const struct ggml_compute_params * params,
|
|
const struct ggml_compute_params * params,
|
|
|
const struct ggml_tensor * src0,
|
|
const struct ggml_tensor * src0,
|
|
|
const struct ggml_tensor * src1,
|
|
const struct ggml_tensor * src1,
|
|
@@ -10907,9 +10582,10 @@ static void ggml_compute_forward_mul_mat_q_f32(
|
|
|
GGML_ASSERT(ne3 == ne13);
|
|
GGML_ASSERT(ne3 == ne13);
|
|
|
|
|
|
|
|
const enum ggml_type type = src0->type;
|
|
const enum ggml_type type = src0->type;
|
|
|
- quantize_row_q_t const quantize_row_q_dot = quantize_fns[type].quantize_row_q_dot;
|
|
|
|
|
- vec_dot_q_t const vec_dot_q = quantize_fns[type].vec_dot_q;
|
|
|
|
|
- enum ggml_type const vec_dot_type = quantize_fns[type].vec_dot_type;
|
|
|
|
|
|
|
+
|
|
|
|
|
+ ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot;
|
|
|
|
|
+ enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
|
|
|
|
|
+ ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
|
|
|
|
|
|
|
|
// we don't support permuted src0 or src1
|
|
// we don't support permuted src0 or src1
|
|
|
GGML_ASSERT(nb00 == GGML_TYPE_SIZE[type]);
|
|
GGML_ASSERT(nb00 == GGML_TYPE_SIZE[type]);
|
|
@@ -10952,27 +10628,27 @@ static void ggml_compute_forward_mul_mat_q_f32(
|
|
|
return;
|
|
return;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- float * const wdata = params->wdata;
|
|
|
|
|
- dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q;
|
|
|
|
|
-
|
|
|
|
|
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
|
for (int64_t i03 = 0; i03 < ne03; i03++) {
|
|
|
for (int64_t i02 = 0; i02 < ne02; i02++) {
|
|
for (int64_t i02 = 0; i02 < ne02; i02++) {
|
|
|
|
|
+ const void * x = (char *) src0->data + i03*nb03 + i02*nb02;
|
|
|
const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13);
|
|
const float * y = (float *) ((char *) src1->data + i02*nb12 + i03*nb13);
|
|
|
|
|
|
|
|
float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
|
|
float * d = (float *) ((char *) dst->data + i02*nb2 + i03*nb3);
|
|
|
|
|
|
|
|
- {
|
|
|
|
|
|
|
+ if (type != GGML_TYPE_F32) {
|
|
|
|
|
+ float * const wdata = params->wdata;
|
|
|
|
|
+ ggml_to_float_t const to_float = type_traits[type].to_float;
|
|
|
|
|
+
|
|
|
size_t id = 0;
|
|
size_t id = 0;
|
|
|
for (int64_t i01 = 0; i01 < ne01; ++i01) {
|
|
for (int64_t i01 = 0; i01 < ne01; ++i01) {
|
|
|
- dequantize_row_q((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01, wdata + id, ne00);
|
|
|
|
|
|
|
+ to_float((char *) src0->data + i03*nb03 + i02*nb02 + i01*nb01, wdata + id, ne00);
|
|
|
id += ne00;
|
|
id += ne00;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
assert(id*sizeof(float) <= params->wsize);
|
|
assert(id*sizeof(float) <= params->wsize);
|
|
|
|
|
+ x = wdata;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- const float * x = wdata;
|
|
|
|
|
-
|
|
|
|
|
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
|
|
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
|
|
|
ne11, ne01, ne10,
|
|
ne11, ne01, ne10,
|
|
|
1.0f, y, ne10,
|
|
1.0f, y, ne10,
|
|
@@ -10988,14 +10664,16 @@ static void ggml_compute_forward_mul_mat_q_f32(
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
if (params->type == GGML_TASK_INIT) {
|
|
if (params->type == GGML_TASK_INIT) {
|
|
|
- char * wdata = params->wdata;
|
|
|
|
|
- const size_t row_size = ne10*GGML_TYPE_SIZE[vec_dot_type]/GGML_BLCK_SIZE[vec_dot_type];
|
|
|
|
|
-
|
|
|
|
|
- for (int64_t i13 = 0; i13 < ne13; ++i13) {
|
|
|
|
|
- for (int64_t i12 = 0; i12 < ne12; ++i12) {
|
|
|
|
|
- for (int64_t i11 = 0; i11 < ne11; ++i11) {
|
|
|
|
|
- quantize_row_q_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
|
|
|
|
|
- wdata += row_size;
|
|
|
|
|
|
|
+ if (src1->type != vec_dot_type) {
|
|
|
|
|
+ char * wdata = params->wdata;
|
|
|
|
|
+ const size_t row_size = ne10*GGML_TYPE_SIZE[vec_dot_type]/GGML_BLCK_SIZE[vec_dot_type];
|
|
|
|
|
+
|
|
|
|
|
+ for (int64_t i13 = 0; i13 < ne13; ++i13) {
|
|
|
|
|
+ for (int64_t i12 = 0; i12 < ne12; ++i12) {
|
|
|
|
|
+ for (int64_t i11 = 0; i11 < ne11; ++i11) {
|
|
|
|
|
+ from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11), (void *) wdata, ne10);
|
|
|
|
|
+ wdata += row_size;
|
|
|
|
|
+ }
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
@@ -11019,7 +10697,7 @@ static void ggml_compute_forward_mul_mat_q_f32(
|
|
|
const int ir0 = dr*ith;
|
|
const int ir0 = dr*ith;
|
|
|
const int ir1 = MIN(ir0 + dr, nr);
|
|
const int ir1 = MIN(ir0 + dr, nr);
|
|
|
|
|
|
|
|
- void * wdata = params->wdata;
|
|
|
|
|
|
|
+ void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
|
|
|
const size_t row_size = ne00*GGML_TYPE_SIZE[vec_dot_type]/GGML_BLCK_SIZE[vec_dot_type];
|
|
const size_t row_size = ne00*GGML_TYPE_SIZE[vec_dot_type]/GGML_BLCK_SIZE[vec_dot_type];
|
|
|
|
|
|
|
|
for (int ir = ir0; ir < ir1; ++ir) {
|
|
for (int ir = ir0; ir < ir1; ++ir) {
|
|
@@ -11043,7 +10721,7 @@ static void ggml_compute_forward_mul_mat_q_f32(
|
|
|
assert(ne00 % 32 == 0);
|
|
assert(ne00 % 32 == 0);
|
|
|
|
|
|
|
|
for (int64_t ic = 0; ic < ne11; ++ic) {
|
|
for (int64_t ic = 0; ic < ne11; ++ic) {
|
|
|
- vec_dot_q(ne00, &dst_col[ic*ne0], src0_row, (void *) (src1_col + ic*row_size));
|
|
|
|
|
|
|
+ vec_dot(ne00, &dst_col[ic*ne0], src0_row, (void *) (src1_col + ic*row_size));
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -11060,40 +10738,6 @@ static void ggml_compute_forward_mul_mat_q_f32(
|
|
|
//}
|
|
//}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
-static void ggml_compute_forward_mul_mat(
|
|
|
|
|
- const struct ggml_compute_params * params,
|
|
|
|
|
- const struct ggml_tensor * src0,
|
|
|
|
|
- const struct ggml_tensor * src1,
|
|
|
|
|
- struct ggml_tensor * dst) {
|
|
|
|
|
- switch (src0->type) {
|
|
|
|
|
- case GGML_TYPE_Q4_0:
|
|
|
|
|
- case GGML_TYPE_Q4_1:
|
|
|
|
|
- case GGML_TYPE_Q5_0:
|
|
|
|
|
- case GGML_TYPE_Q5_1:
|
|
|
|
|
- case GGML_TYPE_Q8_0:
|
|
|
|
|
- case GGML_TYPE_Q8_1:
|
|
|
|
|
- case GGML_TYPE_Q2_K:
|
|
|
|
|
- case GGML_TYPE_Q3_K:
|
|
|
|
|
- case GGML_TYPE_Q4_K:
|
|
|
|
|
- case GGML_TYPE_Q5_K:
|
|
|
|
|
- case GGML_TYPE_Q6_K:
|
|
|
|
|
- {
|
|
|
|
|
- ggml_compute_forward_mul_mat_q_f32(params, src0, src1, dst);
|
|
|
|
|
- } break;
|
|
|
|
|
- case GGML_TYPE_F16:
|
|
|
|
|
- {
|
|
|
|
|
- ggml_compute_forward_mul_mat_f16_f32(params, src0, src1, dst);
|
|
|
|
|
- } break;
|
|
|
|
|
- case GGML_TYPE_F32:
|
|
|
|
|
- {
|
|
|
|
|
- ggml_compute_forward_mul_mat_f32(params, src0, src1, dst);
|
|
|
|
|
- } break;
|
|
|
|
|
- default:
|
|
|
|
|
- {
|
|
|
|
|
- GGML_ASSERT(false);
|
|
|
|
|
- } break;
|
|
|
|
|
- }
|
|
|
|
|
-}
|
|
|
|
|
|
|
|
|
|
// ggml_compute_forward_out_prod
|
|
// ggml_compute_forward_out_prod
|
|
|
|
|
|
|
@@ -11483,7 +11127,7 @@ static void ggml_compute_forward_get_rows_q(
|
|
|
const int nc = src0->ne[0];
|
|
const int nc = src0->ne[0];
|
|
|
const int nr = ggml_nelements(src1);
|
|
const int nr = ggml_nelements(src1);
|
|
|
const enum ggml_type type = src0->type;
|
|
const enum ggml_type type = src0->type;
|
|
|
- dequantize_row_q_t const dequantize_row_q = quantize_fns[type].dequantize_row_q;
|
|
|
|
|
|
|
+ ggml_to_float_t const dequantize_row_q = type_traits[type].to_float;
|
|
|
|
|
|
|
|
assert( dst->ne[0] == nc);
|
|
assert( dst->ne[0] == nc);
|
|
|
assert( dst->ne[1] == nr);
|
|
assert( dst->ne[1] == nr);
|
|
@@ -16529,6 +16173,7 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph)
|
|
|
//printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks = %d\n", nr0, nr1, nr0*nr1, node->n_tasks);
|
|
//printf("nr0 = %8d, nr1 = %8d, nr0*nr1 = %8d, n_tasks = %d\n", nr0, nr1, nr0*nr1, node->n_tasks);
|
|
|
|
|
|
|
|
size_t cur = 0;
|
|
size_t cur = 0;
|
|
|
|
|
+ const enum ggml_type vec_dot_type = type_traits[node->src0->type].vec_dot_type;
|
|
|
|
|
|
|
|
#if defined(GGML_USE_CUBLAS)
|
|
#if defined(GGML_USE_CUBLAS)
|
|
|
if (ggml_cuda_can_mul_mat(node->src0, node->src1, node)) {
|
|
if (ggml_cuda_can_mul_mat(node->src0, node->src1, node)) {
|
|
@@ -16544,37 +16189,18 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph)
|
|
|
}
|
|
}
|
|
|
else
|
|
else
|
|
|
#endif
|
|
#endif
|
|
|
- if (node->src0->type == GGML_TYPE_F16 && node->src1->type == GGML_TYPE_F32) {
|
|
|
|
|
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
|
|
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
|
|
|
- if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
|
|
|
|
|
- node->n_tasks = 1; // TODO: this actually is doing nothing
|
|
|
|
|
- // the threads are still spinning
|
|
|
|
|
|
|
+ if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
|
|
|
|
|
+ node->n_tasks = 1; // TODO: this actually is doing nothing
|
|
|
|
|
+ // the threads are still spinning
|
|
|
|
|
+ if (node->src0->type != GGML_TYPE_F32) {
|
|
|
// here we need memory just for single 2D matrix from src0
|
|
// here we need memory just for single 2D matrix from src0
|
|
|
cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]);
|
|
cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]);
|
|
|
- } else {
|
|
|
|
|
- cur = GGML_TYPE_SIZE[GGML_TYPE_F16]*ggml_nelements(node->src1);
|
|
|
|
|
- }
|
|
|
|
|
-#else
|
|
|
|
|
- cur = GGML_TYPE_SIZE[GGML_TYPE_F16]*ggml_nelements(node->src1);
|
|
|
|
|
-#endif
|
|
|
|
|
- } else if (node->src0->type == GGML_TYPE_F32 && node->src1->type == GGML_TYPE_F32) {
|
|
|
|
|
- cur = 0;
|
|
|
|
|
-#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
|
|
|
|
|
- if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
|
|
|
|
|
- node->n_tasks = 1;
|
|
|
|
|
}
|
|
}
|
|
|
|
|
+ } else
|
|
|
#endif
|
|
#endif
|
|
|
- } else if (ggml_is_quantized(node->src0->type) && node->src1->type == GGML_TYPE_F32) {
|
|
|
|
|
-#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS)
|
|
|
|
|
- if (ggml_compute_forward_mul_mat_use_blas(node->src0, node->src1, node)) {
|
|
|
|
|
- node->n_tasks = 1;
|
|
|
|
|
- cur = GGML_TYPE_SIZE[GGML_TYPE_F32]*(node->src0->ne[0]*node->src0->ne[1]);
|
|
|
|
|
- } else
|
|
|
|
|
-#endif
|
|
|
|
|
- {
|
|
|
|
|
- const enum ggml_type type_q = quantize_fns[node->src0->type].vec_dot_type;
|
|
|
|
|
- cur = GGML_TYPE_SIZE[type_q]*ggml_nelements(node->src1)/GGML_BLCK_SIZE[type_q];
|
|
|
|
|
- }
|
|
|
|
|
|
|
+ if (node->src1->type != vec_dot_type) {
|
|
|
|
|
+ cur = GGML_TYPE_SIZE[vec_dot_type]*ggml_nelements(node->src1)/GGML_BLCK_SIZE[vec_dot_type];
|
|
|
} else {
|
|
} else {
|
|
|
GGML_ASSERT(false);
|
|
GGML_ASSERT(false);
|
|
|
}
|
|
}
|