|
@@ -367,6 +367,7 @@ struct ggml_backend_opencl_context {
|
|
|
cl_program program_mul_mv_q4_0_f32_1d_8x_flat;
|
|
cl_program program_mul_mv_q4_0_f32_1d_8x_flat;
|
|
|
cl_program program_mul_mv_q4_0_f32_1d_16x_flat;
|
|
cl_program program_mul_mv_q4_0_f32_1d_16x_flat;
|
|
|
cl_program program_mul_mv_q6_K;
|
|
cl_program program_mul_mv_q6_K;
|
|
|
|
|
+ cl_program program_mul_mv_q8_0_f32, program_mul_mv_q8_0_f32_flat;
|
|
|
cl_program program_mul_mv_mxfp4_f32;
|
|
cl_program program_mul_mv_mxfp4_f32;
|
|
|
cl_program program_mul_mv_mxfp4_f32_flat;
|
|
cl_program program_mul_mv_mxfp4_f32_flat;
|
|
|
cl_program program_mul_mv_f16_f16;
|
|
cl_program program_mul_mv_f16_f16;
|
|
@@ -402,6 +403,7 @@ struct ggml_backend_opencl_context {
|
|
|
cl_program program_conv_2d_f16_f32;
|
|
cl_program program_conv_2d_f16_f32;
|
|
|
cl_program program_tsembd;
|
|
cl_program program_tsembd;
|
|
|
cl_program program_mul_mv_id_q4_0_f32_8x_flat;
|
|
cl_program program_mul_mv_id_q4_0_f32_8x_flat;
|
|
|
|
|
+ cl_program program_mul_mv_id_q8_0_f32, program_mul_mv_id_q8_0_f32_flat;
|
|
|
cl_program program_mul_mv_id_mxfp4_f32;
|
|
cl_program program_mul_mv_id_mxfp4_f32;
|
|
|
cl_program program_mul_mv_id_mxfp4_f32_flat;
|
|
cl_program program_mul_mv_id_mxfp4_f32_flat;
|
|
|
cl_program program_mul_mm_f32_f32_l4_lm;
|
|
cl_program program_mul_mm_f32_f32_l4_lm;
|
|
@@ -450,11 +452,13 @@ struct ggml_backend_opencl_context {
|
|
|
cl_kernel kernel_mul_mat_q4_0_f32, kernel_mul_mat_q4_0_f32_v;
|
|
cl_kernel kernel_mul_mat_q4_0_f32, kernel_mul_mat_q4_0_f32_v;
|
|
|
cl_kernel kernel_convert_block_q4_0, kernel_restore_block_q4_0;
|
|
cl_kernel kernel_convert_block_q4_0, kernel_restore_block_q4_0;
|
|
|
cl_kernel kernel_convert_block_mxfp4, kernel_restore_block_mxfp4;
|
|
cl_kernel kernel_convert_block_mxfp4, kernel_restore_block_mxfp4;
|
|
|
|
|
+ cl_kernel kernel_convert_block_q8_0, kernel_restore_block_q8_0;
|
|
|
cl_kernel kernel_mul_mat_q4_0_f32_8x_flat;
|
|
cl_kernel kernel_mul_mat_q4_0_f32_8x_flat;
|
|
|
cl_kernel kernel_convert_block_q4_0_noshuffle;
|
|
cl_kernel kernel_convert_block_q4_0_noshuffle;
|
|
|
cl_kernel kernel_mul_mat_q4_0_f32_1d_8x_flat, kernel_mul_mat_q4_0_f32_1d_16x_flat;
|
|
cl_kernel kernel_mul_mat_q4_0_f32_1d_8x_flat, kernel_mul_mat_q4_0_f32_1d_16x_flat;
|
|
|
cl_kernel kernel_mul_mv_q6_K_f32;
|
|
cl_kernel kernel_mul_mv_q6_K_f32;
|
|
|
cl_kernel kernel_mul_mv_mxfp4_f32, kernel_mul_mv_mxfp4_f32_flat;
|
|
cl_kernel kernel_mul_mv_mxfp4_f32, kernel_mul_mv_mxfp4_f32_flat;
|
|
|
|
|
+ cl_kernel kernel_mul_mv_q8_0_f32, kernel_mul_mv_q8_0_f32_flat;
|
|
|
cl_kernel kernel_im2col_f32, kernel_im2col_f16;
|
|
cl_kernel kernel_im2col_f32, kernel_im2col_f16;
|
|
|
cl_kernel kernel_argsort_f32_i32;
|
|
cl_kernel kernel_argsort_f32_i32;
|
|
|
cl_kernel kernel_sum_rows_f32;
|
|
cl_kernel kernel_sum_rows_f32;
|
|
@@ -471,6 +475,7 @@ struct ggml_backend_opencl_context {
|
|
|
cl_kernel kernel_conv_2d_f16_f32;
|
|
cl_kernel kernel_conv_2d_f16_f32;
|
|
|
cl_kernel kernel_timestep_embedding;
|
|
cl_kernel kernel_timestep_embedding;
|
|
|
cl_kernel kernel_mul_mv_id_q4_0_f32_8x_flat;
|
|
cl_kernel kernel_mul_mv_id_q4_0_f32_8x_flat;
|
|
|
|
|
+ cl_kernel kernel_mul_mv_id_q8_0_f32, kernel_mul_mv_id_q8_0_f32_flat;
|
|
|
cl_kernel kernel_mul_mv_id_mxfp4_f32;
|
|
cl_kernel kernel_mul_mv_id_mxfp4_f32;
|
|
|
cl_kernel kernel_mul_mv_id_mxfp4_f32_flat;
|
|
cl_kernel kernel_mul_mv_id_mxfp4_f32_flat;
|
|
|
cl_kernel kernel_mul_mm_f32_f32_l4_lm;
|
|
cl_kernel kernel_mul_mm_f32_f32_l4_lm;
|
|
@@ -769,8 +774,10 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve
|
|
|
CL_CHECK((backend_ctx->kernel_convert_block_q4_0_noshuffle = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q4_0_noshuffle", &err), err));
|
|
CL_CHECK((backend_ctx->kernel_convert_block_q4_0_noshuffle = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q4_0_noshuffle", &err), err));
|
|
|
CL_CHECK((backend_ctx->kernel_convert_block_q4_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q4_0", &err), err));
|
|
CL_CHECK((backend_ctx->kernel_convert_block_q4_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q4_0", &err), err));
|
|
|
CL_CHECK((backend_ctx->kernel_restore_block_q4_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q4_0", &err), err));
|
|
CL_CHECK((backend_ctx->kernel_restore_block_q4_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q4_0", &err), err));
|
|
|
- CL_CHECK((backend_ctx->kernel_convert_block_mxfp4 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_mxfp4", &err), err));
|
|
|
|
|
- CL_CHECK((backend_ctx->kernel_restore_block_mxfp4 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_mxfp4", &err), err));
|
|
|
|
|
|
|
+ CL_CHECK((backend_ctx->kernel_convert_block_mxfp4 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_mxfp4", &err), err));
|
|
|
|
|
+ CL_CHECK((backend_ctx->kernel_restore_block_mxfp4 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_mxfp4", &err), err));
|
|
|
|
|
+ CL_CHECK((backend_ctx->kernel_convert_block_q8_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q8_0", &err), err));
|
|
|
|
|
+ CL_CHECK((backend_ctx->kernel_restore_block_q8_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q8_0", &err), err));
|
|
|
GGML_LOG_CONT(".");
|
|
GGML_LOG_CONT(".");
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -992,6 +999,38 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve
|
|
|
GGML_LOG_CONT(".");
|
|
GGML_LOG_CONT(".");
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+ // mul_mv_q8_0_f32
|
|
|
|
|
+ {
|
|
|
|
|
+#ifdef GGML_OPENCL_EMBED_KERNELS
|
|
|
|
|
+ const std::string kernel_src {
|
|
|
|
|
+ #include "mul_mv_q8_0_f32.cl.h"
|
|
|
|
|
+ };
|
|
|
|
|
+#else
|
|
|
|
|
+ const std::string kernel_src = read_file("mul_mv_q8_0_f32.cl");
|
|
|
|
|
+#endif
|
|
|
|
|
+ backend_ctx->program_mul_mv_q8_0_f32 =
|
|
|
|
|
+ build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
|
|
|
|
|
+
|
|
|
|
|
+ CL_CHECK((backend_ctx->kernel_mul_mv_q8_0_f32 = clCreateKernel(backend_ctx->program_mul_mv_q8_0_f32, "kernel_mul_mv_q8_0_f32", &err), err));
|
|
|
|
|
+ GGML_LOG_CONT(".");
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ // mul_mv_q8_0_f32_flat
|
|
|
|
|
+ {
|
|
|
|
|
+#ifdef GGML_OPENCL_EMBED_KERNELS
|
|
|
|
|
+ const std::string kernel_src {
|
|
|
|
|
+ #include "mul_mv_q8_0_f32_flat.cl.h"
|
|
|
|
|
+ };
|
|
|
|
|
+#else
|
|
|
|
|
+ const std::string kernel_src = read_file("mul_mv_q8_0_f32_flat.cl");
|
|
|
|
|
+#endif
|
|
|
|
|
+ backend_ctx->program_mul_mv_q8_0_f32_flat =
|
|
|
|
|
+ build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
|
|
|
|
|
+
|
|
|
|
|
+ CL_CHECK((backend_ctx->kernel_mul_mv_q8_0_f32_flat = clCreateKernel(backend_ctx->program_mul_mv_q8_0_f32_flat, "kernel_mul_mv_q8_0_f32_flat", &err), err));
|
|
|
|
|
+ GGML_LOG_CONT(".");
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
// mul_mv_mxfp4_f32
|
|
// mul_mv_mxfp4_f32
|
|
|
{
|
|
{
|
|
|
#ifdef GGML_OPENCL_EMBED_KERNELS
|
|
#ifdef GGML_OPENCL_EMBED_KERNELS
|
|
@@ -1733,6 +1772,38 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve
|
|
|
GGML_LOG_CONT(".");
|
|
GGML_LOG_CONT(".");
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+ // mul_mv_id_q8_0_f32
|
|
|
|
|
+ {
|
|
|
|
|
+#ifdef GGML_OPENCL_EMBED_KERNELS
|
|
|
|
|
+ const std::string kernel_src {
|
|
|
|
|
+ #include "mul_mv_id_q8_0_f32.cl.h"
|
|
|
|
|
+ };
|
|
|
|
|
+#else
|
|
|
|
|
+ const std::string kernel_src = read_file("mul_mv_id_q8_0_f32.cl");
|
|
|
|
|
+#endif
|
|
|
|
|
+ backend_ctx->program_mul_mv_id_q8_0_f32 =
|
|
|
|
|
+ build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
|
|
|
|
|
+
|
|
|
|
|
+ CL_CHECK((backend_ctx->kernel_mul_mv_id_q8_0_f32 = clCreateKernel(backend_ctx->program_mul_mv_id_q8_0_f32, "kernel_mul_mv_id_q8_0_f32", &err), err));
|
|
|
|
|
+ GGML_LOG_CONT(".");
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ // mul_mv_id_q8_0_f32_flat
|
|
|
|
|
+ {
|
|
|
|
|
+#ifdef GGML_OPENCL_EMBED_KERNELS
|
|
|
|
|
+ const std::string kernel_src {
|
|
|
|
|
+ #include "mul_mv_id_q8_0_f32_flat.cl.h"
|
|
|
|
|
+ };
|
|
|
|
|
+#else
|
|
|
|
|
+ const std::string kernel_src = read_file("mul_mv_id_q8_0_f32_flat.cl");
|
|
|
|
|
+#endif
|
|
|
|
|
+ backend_ctx->program_mul_mv_id_q8_0_f32_flat =
|
|
|
|
|
+ build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
|
|
|
|
|
+
|
|
|
|
|
+ CL_CHECK((backend_ctx->kernel_mul_mv_id_q8_0_f32_flat = clCreateKernel(backend_ctx->program_mul_mv_id_q8_0_f32_flat, "kernel_mul_mv_id_q8_0_f32_flat", &err), err));
|
|
|
|
|
+ GGML_LOG_CONT(".");
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
// mul_mv_id_mxfp4_f32
|
|
// mul_mv_id_mxfp4_f32
|
|
|
{
|
|
{
|
|
|
#ifdef GGML_OPENCL_EMBED_KERNELS
|
|
#ifdef GGML_OPENCL_EMBED_KERNELS
|
|
@@ -2463,10 +2534,8 @@ struct ggml_tensor_extra_cl_mxfp4 {
|
|
|
CL_CHECK(clReleaseMemObject(q_img));
|
|
CL_CHECK(clReleaseMemObject(q_img));
|
|
|
q = nullptr;
|
|
q = nullptr;
|
|
|
}
|
|
}
|
|
|
- // Currently, q_img and d_img are only initialized when SMALL_ALLOC is
|
|
|
|
|
- // enabled. They point to the images in ggml_backend_opencl_buffer_context.
|
|
|
|
|
- // So, there is no need to release them here.
|
|
|
|
|
- // TODO: initialize them for non SMALL_PATH path, or remove them.
|
|
|
|
|
|
|
+ // Currently, q_img and d_img are not used. They can be image1d_buffer_t
|
|
|
|
|
+ // that wraps around q and d to utilize image access path.
|
|
|
q_img = nullptr;
|
|
q_img = nullptr;
|
|
|
e_img = nullptr;
|
|
e_img = nullptr;
|
|
|
size_q = 0;
|
|
size_q = 0;
|
|
@@ -2474,6 +2543,41 @@ struct ggml_tensor_extra_cl_mxfp4 {
|
|
|
}
|
|
}
|
|
|
};
|
|
};
|
|
|
|
|
|
|
|
|
|
+struct ggml_tensor_extra_cl_q8_0 {
|
|
|
|
|
+ cl_mem q = nullptr;
|
|
|
|
|
+ cl_mem q_img = nullptr;
|
|
|
|
|
+
|
|
|
|
|
+ cl_mem d = nullptr;
|
|
|
|
|
+ cl_mem d_img = nullptr;
|
|
|
|
|
+
|
|
|
|
|
+ size_t size_q = 0;
|
|
|
|
|
+ size_t size_d = 0;
|
|
|
|
|
+
|
|
|
|
|
+ ~ggml_tensor_extra_cl_q8_0() {
|
|
|
|
|
+ reset();
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ void reset() {
|
|
|
|
|
+ // q and d are subbuffers into the bigger buffer allocated in ggml_backend_buffer.
|
|
|
|
|
+ // They must be properly released so that the original buffer can be
|
|
|
|
|
+ // properly released to avoid memory leak.
|
|
|
|
|
+ if (q != nullptr) {
|
|
|
|
|
+ CL_CHECK(clReleaseMemObject(q));
|
|
|
|
|
+ q = nullptr;
|
|
|
|
|
+ }
|
|
|
|
|
+ if (d != nullptr) {
|
|
|
|
|
+ CL_CHECK(clReleaseMemObject(d));
|
|
|
|
|
+ d = nullptr;
|
|
|
|
|
+ }
|
|
|
|
|
+ // Currently, q_img and d_img are not used. They can be image1d_buffer_t
|
|
|
|
|
+ // that wraps around q and d to utilize image access path.
|
|
|
|
|
+ q_img = nullptr;
|
|
|
|
|
+ d_img = nullptr;
|
|
|
|
|
+ size_q = 0;
|
|
|
|
|
+ size_d = 0;
|
|
|
|
|
+ }
|
|
|
|
|
+};
|
|
|
|
|
+
|
|
|
//------------------------------------------------------------------------------
|
|
//------------------------------------------------------------------------------
|
|
|
// Backend API
|
|
// Backend API
|
|
|
//------------------------------------------------------------------------------
|
|
//------------------------------------------------------------------------------
|
|
@@ -2807,10 +2911,13 @@ static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_te
|
|
|
} else if (op->src[0]->type == GGML_TYPE_Q4_0 || op->src[0]->type == GGML_TYPE_MXFP4 ||
|
|
} else if (op->src[0]->type == GGML_TYPE_Q4_0 || op->src[0]->type == GGML_TYPE_MXFP4 ||
|
|
|
op->src[0]->type == GGML_TYPE_Q6_K) {
|
|
op->src[0]->type == GGML_TYPE_Q6_K) {
|
|
|
return op->src[1]->type == GGML_TYPE_F32 && ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]);
|
|
return op->src[1]->type == GGML_TYPE_F32 && ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]);
|
|
|
|
|
+ } else if (op->src[0]->type == GGML_TYPE_Q8_0) {
|
|
|
|
|
+ return op->src[1]->type == GGML_TYPE_F32;
|
|
|
}
|
|
}
|
|
|
return false;
|
|
return false;
|
|
|
case GGML_OP_MUL_MAT_ID:
|
|
case GGML_OP_MUL_MAT_ID:
|
|
|
if (op->src[0]->type == GGML_TYPE_Q4_0 ||
|
|
if (op->src[0]->type == GGML_TYPE_Q4_0 ||
|
|
|
|
|
+ op->src[0]->type == GGML_TYPE_Q8_0 ||
|
|
|
op->src[0]->type == GGML_TYPE_MXFP4) {
|
|
op->src[0]->type == GGML_TYPE_MXFP4) {
|
|
|
if (op->src[1]->type == GGML_TYPE_F32) {
|
|
if (op->src[1]->type == GGML_TYPE_F32) {
|
|
|
return ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]);
|
|
return ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]);
|
|
@@ -2983,6 +3090,12 @@ struct ggml_backend_opencl_buffer_context {
|
|
|
for (ggml_tensor_extra_cl_mxfp4 * e : temp_tensor_extras_mxfp4_in_use) {
|
|
for (ggml_tensor_extra_cl_mxfp4 * e : temp_tensor_extras_mxfp4_in_use) {
|
|
|
delete e;
|
|
delete e;
|
|
|
}
|
|
}
|
|
|
|
|
+ for (ggml_tensor_extra_cl_q8_0 * e : temp_tensor_extras_q8_0) {
|
|
|
|
|
+ delete e;
|
|
|
|
|
+ }
|
|
|
|
|
+ for (ggml_tensor_extra_cl_q8_0 * e : temp_tensor_extras_q8_0_in_use) {
|
|
|
|
|
+ delete e;
|
|
|
|
|
+ }
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
ggml_tensor_extra_cl * ggml_opencl_alloc_temp_tensor_extra() {
|
|
ggml_tensor_extra_cl * ggml_opencl_alloc_temp_tensor_extra() {
|
|
@@ -3030,6 +3143,21 @@ struct ggml_backend_opencl_buffer_context {
|
|
|
return extra;
|
|
return extra;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+ ggml_tensor_extra_cl_q8_0 * ggml_opencl_alloc_temp_tensor_extra_q8_0() {
|
|
|
|
|
+ ggml_tensor_extra_cl_q8_0 * extra;
|
|
|
|
|
+ if (temp_tensor_extras_q8_0.empty()) {
|
|
|
|
|
+ extra = new ggml_tensor_extra_cl_q8_0();
|
|
|
|
|
+ } else {
|
|
|
|
|
+ extra = temp_tensor_extras_q8_0.back();
|
|
|
|
|
+ temp_tensor_extras_q8_0.pop_back();
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ temp_tensor_extras_q8_0_in_use.push_back(extra);
|
|
|
|
|
+
|
|
|
|
|
+ extra->reset();
|
|
|
|
|
+ return extra;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
void reset() {
|
|
void reset() {
|
|
|
for (ggml_tensor_extra_cl * e : temp_tensor_extras_in_use) {
|
|
for (ggml_tensor_extra_cl * e : temp_tensor_extras_in_use) {
|
|
|
temp_tensor_extras.push_back(e);
|
|
temp_tensor_extras.push_back(e);
|
|
@@ -3045,6 +3173,11 @@ struct ggml_backend_opencl_buffer_context {
|
|
|
temp_tensor_extras_mxfp4.push_back(e);
|
|
temp_tensor_extras_mxfp4.push_back(e);
|
|
|
}
|
|
}
|
|
|
temp_tensor_extras_mxfp4_in_use.clear();
|
|
temp_tensor_extras_mxfp4_in_use.clear();
|
|
|
|
|
+
|
|
|
|
|
+ for (ggml_tensor_extra_cl_q8_0 * e : temp_tensor_extras_q8_0_in_use) {
|
|
|
|
|
+ temp_tensor_extras_q8_0.push_back(e);
|
|
|
|
|
+ }
|
|
|
|
|
+ temp_tensor_extras_q8_0_in_use.clear();
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
// Pools for extras. Available extras are in `temp_tensor_extras`. Extras
|
|
// Pools for extras. Available extras are in `temp_tensor_extras`. Extras
|
|
@@ -3058,6 +3191,8 @@ struct ggml_backend_opencl_buffer_context {
|
|
|
std::vector<ggml_tensor_extra_cl_q4_0 *> temp_tensor_extras_q4_0_in_use;
|
|
std::vector<ggml_tensor_extra_cl_q4_0 *> temp_tensor_extras_q4_0_in_use;
|
|
|
std::vector<ggml_tensor_extra_cl_mxfp4 *> temp_tensor_extras_mxfp4;
|
|
std::vector<ggml_tensor_extra_cl_mxfp4 *> temp_tensor_extras_mxfp4;
|
|
|
std::vector<ggml_tensor_extra_cl_mxfp4 *> temp_tensor_extras_mxfp4_in_use;
|
|
std::vector<ggml_tensor_extra_cl_mxfp4 *> temp_tensor_extras_mxfp4_in_use;
|
|
|
|
|
+ std::vector<ggml_tensor_extra_cl_q8_0 *> temp_tensor_extras_q8_0;
|
|
|
|
|
+ std::vector<ggml_tensor_extra_cl_q8_0 *> temp_tensor_extras_q8_0_in_use;
|
|
|
|
|
|
|
|
// The buffer_context is initially created by ggml_backend_buft_alloc_buffer
|
|
// The buffer_context is initially created by ggml_backend_buft_alloc_buffer
|
|
|
// before any tensor is initialized (at the beginning of alloc_tensor_range).
|
|
// before any tensor is initialized (at the beginning of alloc_tensor_range).
|
|
@@ -3470,6 +3605,65 @@ static void ggml_backend_opencl_buffer_set_tensor(ggml_backend_buffer_t buffer,
|
|
|
|
|
|
|
|
tensor->extra = extra;
|
|
tensor->extra = extra;
|
|
|
|
|
|
|
|
|
|
+ return;
|
|
|
|
|
+ }
|
|
|
|
|
+ if (tensor->type == GGML_TYPE_Q8_0) {
|
|
|
|
|
+ ggml_tensor_extra_cl * extra_orig = (ggml_tensor_extra_cl *)tensor->extra;
|
|
|
|
|
+ GGML_ASSERT(extra_orig && "Tesnors in OpenCL backend should have been allocated and initialized");
|
|
|
|
|
+
|
|
|
|
|
+ // Allocate the new extra and create aliases from the original.
|
|
|
|
|
+ ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context;
|
|
|
|
|
+ ggml_tensor_extra_cl_q8_0 * extra = ctx->ggml_opencl_alloc_temp_tensor_extra_q8_0();
|
|
|
|
|
+
|
|
|
|
|
+ size_t size_d = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*sizeof(ggml_fp16_t);
|
|
|
|
|
+ size_t size_q = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*(ggml_blck_size(tensor->type)*sizeof(char));
|
|
|
|
|
+ GGML_ASSERT(size_d + size_q == ggml_nbytes(tensor) && "Incorrect tensor size");
|
|
|
|
|
+
|
|
|
|
|
+ cl_int err;
|
|
|
|
|
+ cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE,
|
|
|
|
|
+ ggml_nbytes(tensor), NULL, &err);
|
|
|
|
|
+ CL_CHECK(err);
|
|
|
|
|
+ CL_CHECK(clEnqueueWriteBuffer(
|
|
|
|
|
+ queue, data_device, CL_TRUE, 0,
|
|
|
|
|
+ ggml_nbytes(tensor), data, 0, NULL, NULL));
|
|
|
|
|
+
|
|
|
|
|
+ // The original tensor memory is divided into scales and quants, i.e.,
|
|
|
|
|
+ // we first store scales, then quants.
|
|
|
|
|
+ cl_buffer_region region;
|
|
|
|
|
+
|
|
|
|
|
+ // Create subbuffer for scales.
|
|
|
|
|
+ region.origin = align_to(extra_orig->offset + tensor->view_offs + offset, backend_ctx->alignment);
|
|
|
|
|
+ region.size = size_d;
|
|
|
|
|
+ extra->d = clCreateSubBuffer(
|
|
|
|
|
+ extra_orig->data_device, CL_MEM_READ_WRITE,
|
|
|
|
|
+ CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err);
|
|
|
|
|
+ CL_CHECK(err);
|
|
|
|
|
+ auto previous_origin = region.origin;
|
|
|
|
|
+
|
|
|
|
|
+ // Create subbuffer for quants.
|
|
|
|
|
+ region.origin = align_to(previous_origin + size_d, backend_ctx->alignment);
|
|
|
|
|
+ region.size = size_q;
|
|
|
|
|
+ extra->q = clCreateSubBuffer(
|
|
|
|
|
+ extra_orig->data_device, CL_MEM_READ_WRITE,
|
|
|
|
|
+ CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err);
|
|
|
|
|
+ CL_CHECK(err);
|
|
|
|
|
+
|
|
|
|
|
+ cl_kernel kernel = backend_ctx->kernel_convert_block_q8_0;
|
|
|
|
|
+
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &data_device));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->q));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra->d));
|
|
|
|
|
+
|
|
|
|
|
+ size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1};
|
|
|
|
|
+ size_t local_work_size[] = {64, 1, 1};
|
|
|
|
|
+
|
|
|
|
|
+ cl_event evt;
|
|
|
|
|
+ CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt));
|
|
|
|
|
+ CL_CHECK(clWaitForEvents(1, &evt));
|
|
|
|
|
+ CL_CHECK(clReleaseMemObject(data_device));
|
|
|
|
|
+
|
|
|
|
|
+ tensor->extra = extra;
|
|
|
|
|
+
|
|
|
return;
|
|
return;
|
|
|
}
|
|
}
|
|
|
#endif // GGML_OPENCL_SOA_Q
|
|
#endif // GGML_OPENCL_SOA_Q
|
|
@@ -3543,6 +3737,32 @@ static void ggml_backend_opencl_buffer_get_tensor(ggml_backend_buffer_t buffer,
|
|
|
size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1};
|
|
size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1};
|
|
|
size_t local_work_size[] = {1, 1, 1};
|
|
size_t local_work_size[] = {1, 1, 1};
|
|
|
|
|
|
|
|
|
|
+ cl_event evt;
|
|
|
|
|
+ CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL,
|
|
|
|
|
+ global_work_size, local_work_size, 0, NULL, &evt));
|
|
|
|
|
+ CL_CHECK(clWaitForEvents(1, &evt));
|
|
|
|
|
+ CL_CHECK(clEnqueueReadBuffer(
|
|
|
|
|
+ queue, data_device, CL_TRUE, offset,
|
|
|
|
|
+ size, data, 0, NULL, NULL));
|
|
|
|
|
+ CL_CHECK(clReleaseMemObject(data_device));
|
|
|
|
|
+ return;
|
|
|
|
|
+ }
|
|
|
|
|
+ if (tensor->type == GGML_TYPE_Q8_0) {
|
|
|
|
|
+ ggml_tensor_extra_cl_q8_0 * extra = (ggml_tensor_extra_cl_q8_0 *)tensor->extra;
|
|
|
|
|
+
|
|
|
|
|
+ cl_int err;
|
|
|
|
|
+ cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE,
|
|
|
|
|
+ ggml_nbytes(tensor), NULL, &err);
|
|
|
|
|
+ CL_CHECK(err);
|
|
|
|
|
+
|
|
|
|
|
+ cl_kernel kernel = backend_ctx->kernel_restore_block_q8_0;
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra->q));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->d));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &data_device));
|
|
|
|
|
+
|
|
|
|
|
+ size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1};
|
|
|
|
|
+ size_t local_work_size[] = {1, 1, 1};
|
|
|
|
|
+
|
|
|
cl_event evt;
|
|
cl_event evt;
|
|
|
CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL,
|
|
CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL,
|
|
|
global_work_size, local_work_size, 0, NULL, &evt));
|
|
global_work_size, local_work_size, 0, NULL, &evt));
|
|
@@ -6268,6 +6488,7 @@ static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, co
|
|
|
#ifdef GGML_OPENCL_SOA_Q
|
|
#ifdef GGML_OPENCL_SOA_Q
|
|
|
ggml_tensor_extra_cl_q4_0 * extra0_q4_0 = (ggml_tensor_extra_cl_q4_0 *)src0->extra;
|
|
ggml_tensor_extra_cl_q4_0 * extra0_q4_0 = (ggml_tensor_extra_cl_q4_0 *)src0->extra;
|
|
|
ggml_tensor_extra_cl_mxfp4 * extra0_mxfp4 = (ggml_tensor_extra_cl_mxfp4 *)src0->extra;
|
|
ggml_tensor_extra_cl_mxfp4 * extra0_mxfp4 = (ggml_tensor_extra_cl_mxfp4 *)src0->extra;
|
|
|
|
|
+ ggml_tensor_extra_cl_q8_0 * extra0_q8_0 = (ggml_tensor_extra_cl_q8_0 *)src0->extra;
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
const int ne00 = src0 ? src0->ne[0] : 0;
|
|
const int ne00 = src0 ? src0->ne[0] : 0;
|
|
@@ -6937,7 +7158,84 @@ static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, co
|
|
|
#endif // GGML_OPENCL_SOA_Q
|
|
#endif // GGML_OPENCL_SOA_Q
|
|
|
break;
|
|
break;
|
|
|
case GGML_TYPE_Q4_1:
|
|
case GGML_TYPE_Q4_1:
|
|
|
- case GGML_TYPE_Q8_0:
|
|
|
|
|
|
|
+ case GGML_TYPE_Q8_0: {
|
|
|
|
|
+#ifdef GGML_OPENCL_SOA_Q
|
|
|
|
|
+ kernel = backend_ctx->kernel_mul_mv_q8_0_f32_flat;
|
|
|
|
|
+
|
|
|
|
|
+ // nth0 - subgroup size
|
|
|
|
|
+ // nth1 - number of subgroups per workgroup
|
|
|
|
|
+ // ndst - number of output values per workgroup = output per subgroup * number of subgroups
|
|
|
|
|
+ if (backend_ctx->gpu_family == INTEL) {
|
|
|
|
|
+ nth0 = 16;
|
|
|
|
|
+ nth1 = 2;
|
|
|
|
|
+ ndst = nth1*4;
|
|
|
|
|
+ } else if (backend_ctx->gpu_family == ADRENO) {
|
|
|
|
|
+ nth0 = 64;
|
|
|
|
|
+ nth1 = 2;
|
|
|
|
|
+ ndst = nth1*4;
|
|
|
|
|
+ } else {
|
|
|
|
|
+ GGML_ASSERT(false && "TODO: Unknown GPU");
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q8_0->q));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q8_0->d));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb01));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb02));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb03));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne12));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb11));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb12));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb13));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne0));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne1));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &r2));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &r3));
|
|
|
|
|
+#else
|
|
|
|
|
+ kernel = backend_ctx->kernel_mul_mv_q8_0_f32;
|
|
|
|
|
+
|
|
|
|
|
+ // nth0 - subgroup size
|
|
|
|
|
+ // nth1 - number of subgroups per workgroup
|
|
|
|
|
+ // ndst - number of output values per workgroup = output per subgroup * number of subgroups
|
|
|
|
|
+ if (backend_ctx->gpu_family == INTEL) {
|
|
|
|
|
+ nth0 = 16;
|
|
|
|
|
+ nth1 = 2;
|
|
|
|
|
+ ndst = nth1*4;
|
|
|
|
|
+ } else if (backend_ctx->gpu_family == ADRENO) {
|
|
|
|
|
+ nth0 = 64;
|
|
|
|
|
+ nth1 = 2;
|
|
|
|
|
+ ndst = nth1*4;
|
|
|
|
|
+ } else {
|
|
|
|
|
+ GGML_ASSERT(false && "TODO: Unknown GPU");
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 7, sizeof(int), &ne01));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb01));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb02));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb03));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 11, sizeof(int), &ne12));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb11));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb12));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb13));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne0));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne1));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &r2));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &r3));
|
|
|
|
|
+#endif // GGML_OPENCL_SOA_Q
|
|
|
|
|
+ break;
|
|
|
|
|
+ }
|
|
|
case GGML_TYPE_Q2_K:
|
|
case GGML_TYPE_Q2_K:
|
|
|
case GGML_TYPE_Q3_K:
|
|
case GGML_TYPE_Q3_K:
|
|
|
case GGML_TYPE_Q4_K:
|
|
case GGML_TYPE_Q4_K:
|
|
@@ -7115,6 +7413,7 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0,
|
|
|
#ifdef GGML_OPENCL_SOA_Q
|
|
#ifdef GGML_OPENCL_SOA_Q
|
|
|
ggml_tensor_extra_cl_q4_0 * extra0_q4_0 = (ggml_tensor_extra_cl_q4_0 *)src0->extra;
|
|
ggml_tensor_extra_cl_q4_0 * extra0_q4_0 = (ggml_tensor_extra_cl_q4_0 *)src0->extra;
|
|
|
ggml_tensor_extra_cl_mxfp4 * extra0_mxfp4 = (ggml_tensor_extra_cl_mxfp4 *)src0->extra;
|
|
ggml_tensor_extra_cl_mxfp4 * extra0_mxfp4 = (ggml_tensor_extra_cl_mxfp4 *)src0->extra;
|
|
|
|
|
+ ggml_tensor_extra_cl_q8_0 * extra0_q8_0 = (ggml_tensor_extra_cl_q8_0 *)src0->extra;
|
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
const int ne00 = src0->ne[0];
|
|
const int ne00 = src0->ne[0];
|
|
@@ -7202,6 +7501,82 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0,
|
|
|
|
|
|
|
|
break;
|
|
break;
|
|
|
}
|
|
}
|
|
|
|
|
+ case GGML_TYPE_Q8_0: {
|
|
|
|
|
+#ifdef GGML_OPENCL_SOA_Q
|
|
|
|
|
+ kernel = backend_ctx->kernel_mul_mv_id_q8_0_f32_flat;
|
|
|
|
|
+
|
|
|
|
|
+ if (backend_ctx->gpu_family == INTEL) {
|
|
|
|
|
+ sgs = 16;
|
|
|
|
|
+ nsg = 2;
|
|
|
|
|
+ ndst = 4;
|
|
|
|
|
+ } else if (backend_ctx->gpu_family == ADRENO) {
|
|
|
|
|
+ sgs = 64;
|
|
|
|
|
+ nsg = 2;
|
|
|
|
|
+ ndst = 4;
|
|
|
|
|
+ } else {
|
|
|
|
|
+ GGML_ASSERT(false && "TODO: Unknown GPU");
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0_q8_0->q));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_q8_0->d));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra2->data_device));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne00));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne01));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb01));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb02));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne11));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne12));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb11));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &nb12));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne20));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne21));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb21));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 19, sizeof(int), &ne0));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 20, sizeof(int), &ne1));
|
|
|
|
|
+#else
|
|
|
|
|
+ kernel = backend_ctx->kernel_mul_mv_id_q8_0_f32;
|
|
|
|
|
+
|
|
|
|
|
+ if (backend_ctx->gpu_family == INTEL) {
|
|
|
|
|
+ sgs = 16;
|
|
|
|
|
+ nsg = 2;
|
|
|
|
|
+ ndst = 4;
|
|
|
|
|
+ } else if (backend_ctx->gpu_family == ADRENO) {
|
|
|
|
|
+ sgs = 64;
|
|
|
|
|
+ nsg = 2;
|
|
|
|
|
+ ndst = 4;
|
|
|
|
|
+ } else {
|
|
|
|
|
+ GGML_ASSERT(false && "TODO: Unknown GPU");
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra2->data_device));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne00));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 9, sizeof(int), &ne01));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb01));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb02));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne11));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne12));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb11));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &nb12));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &ne20));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne21));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 18, sizeof(cl_ulong), &nb21));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 19, sizeof(int), &ne0));
|
|
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 20, sizeof(int), &ne1));
|
|
|
|
|
+#endif // GGML_OPENCL_SOA_Q
|
|
|
|
|
+ break;
|
|
|
|
|
+ }
|
|
|
case GGML_TYPE_MXFP4: {
|
|
case GGML_TYPE_MXFP4: {
|
|
|
#ifdef GGML_OPENCL_SOA_Q
|
|
#ifdef GGML_OPENCL_SOA_Q
|
|
|
kernel = backend_ctx->kernel_mul_mv_id_mxfp4_f32_flat;
|
|
kernel = backend_ctx->kernel_mul_mv_id_mxfp4_f32_flat;
|