|
@@ -79,8 +79,8 @@ kernel void kernel_mul_mm_f16_f32_l4_lm(
|
|
|
|
|
|
|
|
for (int block = 0; block < ne00; block += BK) {
|
|
for (int block = 0; block < ne00; block += BK) {
|
|
|
for (int l = 0; l < BM; l += loadstride_a) {
|
|
for (int l = 0; l < BM; l += loadstride_a) {
|
|
|
- if (loadc_a + l < ne01) {
|
|
|
|
|
- const int idx = pos_a + (loadc_a + l) * stride_a / LOAD_VEC_A + loadr_a;
|
|
|
|
|
|
|
+ if (ir*BM + loadc_a + l < ne01) {
|
|
|
|
|
+ const int idx = pos_a + (loadc_a + l) * stride_a / LOAD_VEC_A + loadr_a;
|
|
|
buf_a[(loadr_a * LOAD_VEC_A + 0) * BM + loadc_a + l] = src0[idx].s0;
|
|
buf_a[(loadr_a * LOAD_VEC_A + 0) * BM + loadc_a + l] = src0[idx].s0;
|
|
|
buf_a[(loadr_a * LOAD_VEC_A + 1) * BM + loadc_a + l] = src0[idx].s1;
|
|
buf_a[(loadr_a * LOAD_VEC_A + 1) * BM + loadc_a + l] = src0[idx].s1;
|
|
|
buf_a[(loadr_a * LOAD_VEC_A + 2) * BM + loadc_a + l] = src0[idx].s2;
|
|
buf_a[(loadr_a * LOAD_VEC_A + 2) * BM + loadc_a + l] = src0[idx].s2;
|
|
@@ -94,7 +94,7 @@ kernel void kernel_mul_mm_f16_f32_l4_lm(
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
for (int l = 0; l < BN; l += loadstride_b) {
|
|
for (int l = 0; l < BN; l += loadstride_b) {
|
|
|
- if (loadc_b + l < ne11) {
|
|
|
|
|
|
|
+ if (ic*BN + loadc_b + l < ne11) {
|
|
|
const int idx = pos_b + (loadc_b + l) * stride_b / LOAD_VEC_B + loadr_b;
|
|
const int idx = pos_b + (loadc_b + l) * stride_b / LOAD_VEC_B + loadr_b;
|
|
|
buf_b[(loadr_b * LOAD_VEC_B + 0) * BN + loadc_b + l] = src1[idx].s0;
|
|
buf_b[(loadr_b * LOAD_VEC_B + 0) * BN + loadc_b + l] = src1[idx].s0;
|
|
|
buf_b[(loadr_b * LOAD_VEC_B + 1) * BN + loadc_b + l] = src1[idx].s1;
|
|
buf_b[(loadr_b * LOAD_VEC_B + 1) * BN + loadc_b + l] = src1[idx].s1;
|