|
|
@@ -368,6 +368,7 @@ struct ggml_backend_opencl_context {
|
|
|
cl_program program_mul_mv_q4_0_f32_1d_16x_flat;
|
|
|
cl_program program_mul_mv_q6_K;
|
|
|
cl_program program_mul_mv_mxfp4_f32;
|
|
|
+ cl_program program_mul_mv_mxfp4_f32_flat;
|
|
|
cl_program program_mul_mv_f16_f16;
|
|
|
cl_program program_mul_mv_f16_f32_1row;
|
|
|
cl_program program_mul_mv_f16_f32_l4;
|
|
|
@@ -402,6 +403,7 @@ struct ggml_backend_opencl_context {
|
|
|
cl_program program_tsembd;
|
|
|
cl_program program_mul_mv_id_q4_0_f32_8x_flat;
|
|
|
cl_program program_mul_mv_id_mxfp4_f32;
|
|
|
+ cl_program program_mul_mv_id_mxfp4_f32_flat;
|
|
|
cl_program program_mul_mm_f32_f32_l4_lm;
|
|
|
cl_program program_mul_mm_f16_f32_l4_lm;
|
|
|
|
|
|
@@ -447,11 +449,12 @@ struct ggml_backend_opencl_context {
|
|
|
cl_kernel kernel_mul_mat_f16_f32_tiled;
|
|
|
cl_kernel kernel_mul_mat_q4_0_f32, kernel_mul_mat_q4_0_f32_v;
|
|
|
cl_kernel kernel_convert_block_q4_0, kernel_restore_block_q4_0;
|
|
|
+ cl_kernel kernel_convert_block_mxfp4, kernel_restore_block_mxfp4;
|
|
|
cl_kernel kernel_mul_mat_q4_0_f32_8x_flat;
|
|
|
cl_kernel kernel_convert_block_q4_0_noshuffle;
|
|
|
cl_kernel kernel_mul_mat_q4_0_f32_1d_8x_flat, kernel_mul_mat_q4_0_f32_1d_16x_flat;
|
|
|
cl_kernel kernel_mul_mv_q6_K_f32;
|
|
|
- cl_kernel kernel_mul_mv_mxfp4_f32;
|
|
|
+ cl_kernel kernel_mul_mv_mxfp4_f32, kernel_mul_mv_mxfp4_f32_flat;
|
|
|
cl_kernel kernel_im2col_f32, kernel_im2col_f16;
|
|
|
cl_kernel kernel_argsort_f32_i32;
|
|
|
cl_kernel kernel_sum_rows_f32;
|
|
|
@@ -469,6 +472,7 @@ struct ggml_backend_opencl_context {
|
|
|
cl_kernel kernel_timestep_embedding;
|
|
|
cl_kernel kernel_mul_mv_id_q4_0_f32_8x_flat;
|
|
|
cl_kernel kernel_mul_mv_id_mxfp4_f32;
|
|
|
+ cl_kernel kernel_mul_mv_id_mxfp4_f32_flat;
|
|
|
cl_kernel kernel_mul_mm_f32_f32_l4_lm;
|
|
|
cl_kernel kernel_mul_mm_f16_f32_l4_lm;
|
|
|
|
|
|
@@ -765,6 +769,8 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve
|
|
|
CL_CHECK((backend_ctx->kernel_convert_block_q4_0_noshuffle = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q4_0_noshuffle", &err), err));
|
|
|
CL_CHECK((backend_ctx->kernel_convert_block_q4_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_q4_0", &err), err));
|
|
|
CL_CHECK((backend_ctx->kernel_restore_block_q4_0 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_q4_0", &err), err));
|
|
|
+ CL_CHECK((backend_ctx->kernel_convert_block_mxfp4 = clCreateKernel(backend_ctx->program_cvt, "kernel_convert_block_mxfp4", &err), err));
|
|
|
+ CL_CHECK((backend_ctx->kernel_restore_block_mxfp4 = clCreateKernel(backend_ctx->program_cvt, "kernel_restore_block_mxfp4", &err), err));
|
|
|
GGML_LOG_CONT(".");
|
|
|
}
|
|
|
|
|
|
@@ -1002,6 +1008,22 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve
|
|
|
GGML_LOG_CONT(".");
|
|
|
}
|
|
|
|
|
|
+ // mul_mv_mxfp4_f32_flat
|
|
|
+ {
|
|
|
+#ifdef GGML_OPENCL_EMBED_KERNELS
|
|
|
+ const std::string kernel_src {
|
|
|
+ #include "mul_mv_mxfp4_f32_flat.cl.h"
|
|
|
+ };
|
|
|
+#else
|
|
|
+ const std::string kernel_src = read_file("mul_mv_mxfp4_f32_flat.cl");
|
|
|
+#endif
|
|
|
+ backend_ctx->program_mul_mv_mxfp4_f32_flat =
|
|
|
+ build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
|
|
|
+
|
|
|
+ CL_CHECK((backend_ctx->kernel_mul_mv_mxfp4_f32_flat = clCreateKernel(backend_ctx->program_mul_mv_mxfp4_f32_flat, "kernel_mul_mv_mxfp4_f32_flat", &err), err));
|
|
|
+ GGML_LOG_CONT(".");
|
|
|
+ }
|
|
|
+
|
|
|
// mul_mv_f16_f16
|
|
|
{
|
|
|
#ifdef GGML_OPENCL_EMBED_KERNELS
|
|
|
@@ -1727,6 +1749,22 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve
|
|
|
GGML_LOG_CONT(".");
|
|
|
}
|
|
|
|
|
|
+ // mul_mv_id_mxfp4_f32_flat
|
|
|
+ {
|
|
|
+#ifdef GGML_OPENCL_EMBED_KERNELS
|
|
|
+ const std::string kernel_src {
|
|
|
+ #include "mul_mv_id_mxfp4_f32_flat.cl.h"
|
|
|
+ };
|
|
|
+#else
|
|
|
+ const std::string kernel_src = read_file("mul_mv_id_mxfp4_f32_flat.cl");
|
|
|
+#endif
|
|
|
+ backend_ctx->program_mul_mv_id_mxfp4_f32_flat =
|
|
|
+ build_program_from_source(backend_ctx->context, backend_ctx->device, kernel_src.c_str(), compile_opts);
|
|
|
+
|
|
|
+ CL_CHECK((backend_ctx->kernel_mul_mv_id_mxfp4_f32_flat = clCreateKernel(backend_ctx->program_mul_mv_id_mxfp4_f32_flat, "kernel_mul_mv_id_mxfp4_f32_flat", &err), err));
|
|
|
+ GGML_LOG_CONT(".");
|
|
|
+ }
|
|
|
+
|
|
|
// Adreno kernels
|
|
|
#ifdef GGML_OPENCL_USE_ADRENO_KERNELS
|
|
|
// transpose
|
|
|
@@ -2391,6 +2429,51 @@ struct ggml_tensor_extra_cl_q4_0 {
|
|
|
}
|
|
|
};
|
|
|
|
|
|
+struct ggml_tensor_extra_cl_mxfp4 {
|
|
|
+ // Quantized values.
|
|
|
+ cl_mem q = nullptr;
|
|
|
+ // Quantized values in image1d_buffer_t.
|
|
|
+ cl_mem q_img = nullptr;
|
|
|
+ // Scales in E8M0.
|
|
|
+ cl_mem e = nullptr;
|
|
|
+ // Scales in image1d_buffer_t.
|
|
|
+ cl_mem e_img = nullptr;
|
|
|
+ // Size of quantized values.
|
|
|
+ size_t size_q = 0;
|
|
|
+ // Size of scales.
|
|
|
+ size_t size_e = 0;
|
|
|
+
|
|
|
+ ~ggml_tensor_extra_cl_mxfp4() {
|
|
|
+ reset();
|
|
|
+ }
|
|
|
+
|
|
|
+ void reset() {
|
|
|
+ // q and d are subbuffers into the bigger buffer allocated in ggml_backend_buffer.
|
|
|
+ // They must be properly released so that the original buffer can be
|
|
|
+ // properly released to avoid memory leak.
|
|
|
+ if (q != nullptr) {
|
|
|
+ CL_CHECK(clReleaseMemObject(q));
|
|
|
+ q = nullptr;
|
|
|
+ }
|
|
|
+ if (e != nullptr) {
|
|
|
+ CL_CHECK(clReleaseMemObject(e));
|
|
|
+ e = nullptr;
|
|
|
+ }
|
|
|
+ if (q != nullptr) {
|
|
|
+ CL_CHECK(clReleaseMemObject(q_img));
|
|
|
+ q = nullptr;
|
|
|
+ }
|
|
|
+ // Currently, q_img and d_img are only initialized when SMALL_ALLOC is
|
|
|
+ // enabled. They point to the images in ggml_backend_opencl_buffer_context.
|
|
|
+ // So, there is no need to release them here.
|
|
|
+ // TODO: initialize them for non SMALL_PATH path, or remove them.
|
|
|
+ q_img = nullptr;
|
|
|
+ e_img = nullptr;
|
|
|
+ size_q = 0;
|
|
|
+ size_e = 0;
|
|
|
+ }
|
|
|
+};
|
|
|
+
|
|
|
//------------------------------------------------------------------------------
|
|
|
// Backend API
|
|
|
//------------------------------------------------------------------------------
|
|
|
@@ -2894,6 +2977,12 @@ struct ggml_backend_opencl_buffer_context {
|
|
|
for (ggml_tensor_extra_cl_q4_0 * e : temp_tensor_extras_q4_0_in_use) {
|
|
|
delete e;
|
|
|
}
|
|
|
+ for (ggml_tensor_extra_cl_mxfp4 * e : temp_tensor_extras_mxfp4) {
|
|
|
+ delete e;
|
|
|
+ }
|
|
|
+ for (ggml_tensor_extra_cl_mxfp4 * e : temp_tensor_extras_mxfp4_in_use) {
|
|
|
+ delete e;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
ggml_tensor_extra_cl * ggml_opencl_alloc_temp_tensor_extra() {
|
|
|
@@ -2926,6 +3015,21 @@ struct ggml_backend_opencl_buffer_context {
|
|
|
return extra;
|
|
|
}
|
|
|
|
|
|
+ ggml_tensor_extra_cl_mxfp4 * ggml_opencl_alloc_temp_tensor_extra_mxfp4() {
|
|
|
+ ggml_tensor_extra_cl_mxfp4 * extra;
|
|
|
+ if (temp_tensor_extras_mxfp4.empty()) {
|
|
|
+ extra = new ggml_tensor_extra_cl_mxfp4();
|
|
|
+ } else {
|
|
|
+ extra = temp_tensor_extras_mxfp4.back();
|
|
|
+ temp_tensor_extras_mxfp4.pop_back();
|
|
|
+ }
|
|
|
+
|
|
|
+ temp_tensor_extras_mxfp4_in_use.push_back(extra);
|
|
|
+
|
|
|
+ extra->reset();
|
|
|
+ return extra;
|
|
|
+ }
|
|
|
+
|
|
|
void reset() {
|
|
|
for (ggml_tensor_extra_cl * e : temp_tensor_extras_in_use) {
|
|
|
temp_tensor_extras.push_back(e);
|
|
|
@@ -2936,6 +3040,11 @@ struct ggml_backend_opencl_buffer_context {
|
|
|
temp_tensor_extras_q4_0.push_back(e);
|
|
|
}
|
|
|
temp_tensor_extras_q4_0_in_use.clear();
|
|
|
+
|
|
|
+ for (ggml_tensor_extra_cl_mxfp4 * e : temp_tensor_extras_mxfp4_in_use) {
|
|
|
+ temp_tensor_extras_mxfp4.push_back(e);
|
|
|
+ }
|
|
|
+ temp_tensor_extras_mxfp4_in_use.clear();
|
|
|
}
|
|
|
|
|
|
// Pools for extras. Available extras are in `temp_tensor_extras`. Extras
|
|
|
@@ -2947,6 +3056,8 @@ struct ggml_backend_opencl_buffer_context {
|
|
|
std::vector<ggml_tensor_extra_cl *> temp_tensor_extras_in_use;
|
|
|
std::vector<ggml_tensor_extra_cl_q4_0 *> temp_tensor_extras_q4_0;
|
|
|
std::vector<ggml_tensor_extra_cl_q4_0 *> temp_tensor_extras_q4_0_in_use;
|
|
|
+ std::vector<ggml_tensor_extra_cl_mxfp4 *> temp_tensor_extras_mxfp4;
|
|
|
+ std::vector<ggml_tensor_extra_cl_mxfp4 *> temp_tensor_extras_mxfp4_in_use;
|
|
|
|
|
|
// The buffer_context is initially created by ggml_backend_buft_alloc_buffer
|
|
|
// before any tensor is initialized (at the beginning of alloc_tensor_range).
|
|
|
@@ -3289,6 +3400,76 @@ static void ggml_backend_opencl_buffer_set_tensor(ggml_backend_buffer_t buffer,
|
|
|
}
|
|
|
#endif // GGML_OPENCL_USE_ADRENO_KERNELS
|
|
|
|
|
|
+ return;
|
|
|
+
|
|
|
+ }
|
|
|
+ if (tensor->type == GGML_TYPE_MXFP4) {
|
|
|
+ ggml_tensor_extra_cl * extra_orig = (ggml_tensor_extra_cl *)tensor->extra;
|
|
|
+ GGML_ASSERT(extra_orig && "Tesnors in OpenCL backend should have been allocated and initialized");
|
|
|
+
|
|
|
+ // Allocate the new extra and create aliases from the original.
|
|
|
+ ggml_backend_opencl_buffer_context * ctx = (ggml_backend_opencl_buffer_context *) buffer->context;
|
|
|
+ ggml_tensor_extra_cl_mxfp4 * extra = ctx->ggml_opencl_alloc_temp_tensor_extra_mxfp4();
|
|
|
+
|
|
|
+ size_t size_e = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*sizeof(char);
|
|
|
+ size_t size_q = ggml_nelements(tensor)/ggml_blck_size(tensor->type)*ggml_blck_size(tensor->type)/2;
|
|
|
+ GGML_ASSERT(size_e + size_q == ggml_nbytes(tensor) && "Incorrect tensor size");
|
|
|
+
|
|
|
+ cl_int err;
|
|
|
+ cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE,
|
|
|
+ ggml_nbytes(tensor), NULL, &err);
|
|
|
+ CL_CHECK(err);
|
|
|
+ CL_CHECK(clEnqueueWriteBuffer(
|
|
|
+ queue, data_device, CL_TRUE, 0,
|
|
|
+ ggml_nbytes(tensor), data, 0, NULL, NULL));
|
|
|
+
|
|
|
+ // The original tensor memory is divided into scales and quants, i.e.,
|
|
|
+ // we first store scales, then quants.
|
|
|
+ cl_buffer_region region;
|
|
|
+
|
|
|
+ // Create subbuffer for scales.
|
|
|
+ region.origin = align_to(extra_orig->offset + tensor->view_offs + offset, backend_ctx->alignment);
|
|
|
+ region.size = size_e;
|
|
|
+ extra->e = clCreateSubBuffer(
|
|
|
+ extra_orig->data_device, CL_MEM_READ_WRITE,
|
|
|
+ CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err);
|
|
|
+ CL_CHECK(err);
|
|
|
+ auto previous_origin = region.origin;
|
|
|
+
|
|
|
+ // Create subbuffer for quants.
|
|
|
+ region.origin = align_to(previous_origin + size_e, backend_ctx->alignment);
|
|
|
+ region.size = size_q;
|
|
|
+ extra->q = clCreateSubBuffer(
|
|
|
+ extra_orig->data_device, CL_MEM_READ_WRITE,
|
|
|
+ CL_BUFFER_CREATE_TYPE_REGION, ®ion, &err);
|
|
|
+ CL_CHECK(err);
|
|
|
+
|
|
|
+ cl_kernel kernel = backend_ctx->kernel_convert_block_mxfp4;
|
|
|
+
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &data_device));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->q));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra->e));
|
|
|
+
|
|
|
+ size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1};
|
|
|
+ size_t local_work_size[] = {64, 1, 1};
|
|
|
+
|
|
|
+ cl_event evt;
|
|
|
+ CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL, global_work_size, local_work_size, 0, NULL, &evt));
|
|
|
+ CL_CHECK(clWaitForEvents(1, &evt));
|
|
|
+ CL_CHECK(clReleaseMemObject(data_device));
|
|
|
+
|
|
|
+ // Create image for Q
|
|
|
+ cl_image_format img_format_q = {CL_RG, CL_UNSIGNED_INT32};
|
|
|
+ cl_image_desc img_desc_q = {
|
|
|
+ CL_MEM_OBJECT_IMAGE1D_BUFFER,
|
|
|
+ static_cast<size_t>(ggml_nelements(tensor)/32*2),
|
|
|
+ 0, 0, 0, 0, 0, 0, 0,
|
|
|
+ { extra->q }
|
|
|
+ };
|
|
|
+ extra->q_img = clCreateImage(context, CL_MEM_READ_ONLY, &img_format_q, &img_desc_q, NULL, &err);
|
|
|
+
|
|
|
+ tensor->extra = extra;
|
|
|
+
|
|
|
return;
|
|
|
}
|
|
|
#endif // GGML_OPENCL_SOA_Q
|
|
|
@@ -3337,6 +3518,31 @@ static void ggml_backend_opencl_buffer_get_tensor(ggml_backend_buffer_t buffer,
|
|
|
size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1};
|
|
|
size_t local_work_size[] = {1, 1, 1};
|
|
|
|
|
|
+ cl_event evt;
|
|
|
+ CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL,
|
|
|
+ global_work_size, local_work_size, 0, NULL, &evt));
|
|
|
+ CL_CHECK(clWaitForEvents(1, &evt));
|
|
|
+ CL_CHECK(clEnqueueReadBuffer(
|
|
|
+ queue, data_device, CL_TRUE, offset,
|
|
|
+ size, data, 0, NULL, NULL));
|
|
|
+ CL_CHECK(clReleaseMemObject(data_device));
|
|
|
+ return;
|
|
|
+ } else if (tensor->type == GGML_TYPE_MXFP4) {
|
|
|
+ ggml_tensor_extra_cl_mxfp4 * extra = (ggml_tensor_extra_cl_mxfp4 *)tensor->extra;
|
|
|
+
|
|
|
+ cl_int err;
|
|
|
+ cl_mem data_device = clCreateBuffer(context, CL_MEM_READ_WRITE,
|
|
|
+ ggml_nbytes(tensor), NULL, &err);
|
|
|
+ CL_CHECK(err);
|
|
|
+
|
|
|
+ cl_kernel kernel = backend_ctx->kernel_restore_block_mxfp4;
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra->q));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra->e));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &data_device));
|
|
|
+
|
|
|
+ size_t global_work_size[] = {(size_t)ggml_nelements(tensor)/ggml_blck_size(tensor->type), 1, 1};
|
|
|
+ size_t local_work_size[] = {1, 1, 1};
|
|
|
+
|
|
|
cl_event evt;
|
|
|
CL_CHECK(clEnqueueNDRangeKernel(queue, kernel, 3, NULL,
|
|
|
global_work_size, local_work_size, 0, NULL, &evt));
|
|
|
@@ -3658,6 +3864,19 @@ static void dump_tensor(ggml_backend_t backend, const struct ggml_tensor * tenso
|
|
|
CL_CHECK(clEnqueueReadBuffer(queue, extra->q, CL_TRUE, 0, size_q, buf_q, 0, NULL, NULL));
|
|
|
CL_CHECK(clEnqueueReadBuffer(queue, extra->d, CL_TRUE, 0, size_d, buf_d, 0, NULL, NULL));
|
|
|
CL_CHECK(clFinish(queue));
|
|
|
+ } else if (tensor->type == GGML_TYPE_MXFP4) {
|
|
|
+ ggml_tensor_extra_cl_mxfp4 * extra = (ggml_tensor_extra_cl_mxfp4 *) tensor->extra;
|
|
|
+ GGML_ASSERT(extra);
|
|
|
+
|
|
|
+ size_t size_q = ggml_nelements(tensor)/QK_MXFP4 * QK_MXFP4/2;
|
|
|
+ size_t size_e = ggml_nelements(tensor)/QK_MXFP4 * sizeof(char);
|
|
|
+ GGML_ASSERT(size_q + size_e == ggml_nbytes(tensor));
|
|
|
+ buf_q = malloc(size_q);
|
|
|
+ buf_d = malloc(size_e);
|
|
|
+
|
|
|
+ CL_CHECK(clEnqueueReadBuffer(queue, extra->q, CL_TRUE, 0, size_q, buf_q, 0, NULL, NULL));
|
|
|
+ CL_CHECK(clEnqueueReadBuffer(queue, extra->d, CL_TRUE, 0, size_e, buf_d, 0, NULL, NULL));
|
|
|
+ CL_CHECK(clFinish(queue));
|
|
|
} else {
|
|
|
// Read out the tensor from GPU memory.
|
|
|
ggml_tensor_extra_cl * extra = (ggml_tensor_extra_cl *) tensor->extra;
|
|
|
@@ -6048,6 +6267,7 @@ static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, co
|
|
|
|
|
|
#ifdef GGML_OPENCL_SOA_Q
|
|
|
ggml_tensor_extra_cl_q4_0 * extra0_q4_0 = (ggml_tensor_extra_cl_q4_0 *)src0->extra;
|
|
|
+ ggml_tensor_extra_cl_mxfp4 * extra0_mxfp4 = (ggml_tensor_extra_cl_mxfp4 *)src0->extra;
|
|
|
#endif
|
|
|
|
|
|
const int ne00 = src0 ? src0->ne[0] : 0;
|
|
|
@@ -6752,6 +6972,45 @@ static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, co
|
|
|
CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &r3));
|
|
|
break;
|
|
|
case GGML_TYPE_MXFP4: {
|
|
|
+#ifdef GGML_OPENCL_SOA_Q
|
|
|
+ kernel = backend_ctx->kernel_mul_mv_mxfp4_f32_flat;
|
|
|
+
|
|
|
+ cl_mem q;
|
|
|
+ if (backend_ctx->gpu_family == INTEL) {
|
|
|
+ nth0 = 16;
|
|
|
+ nth1 = 2;
|
|
|
+ ndst = nth1*2;
|
|
|
+
|
|
|
+ q = extra0_mxfp4->q;
|
|
|
+ } else if (backend_ctx->gpu_family == ADRENO) {
|
|
|
+ nth0 = 64;
|
|
|
+ nth1 = 2;
|
|
|
+ ndst = nth1*2;
|
|
|
+
|
|
|
+ q = extra0_mxfp4->q_img;
|
|
|
+ } else {
|
|
|
+ GGML_ASSERT(false && "TODO: Unknown GPU");
|
|
|
+ }
|
|
|
+
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &q));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_mxfp4->e));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extrad->data_device));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offsetd));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(int), &ne00));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &nb01));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 8, sizeof(cl_ulong), &nb02));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb03));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 10, sizeof(int), &ne12));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb11));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 12, sizeof(cl_ulong), &nb12));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 13, sizeof(cl_ulong), &nb13));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 14, sizeof(int), &ne0));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 15, sizeof(int), &ne1));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &r2));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &r3));
|
|
|
+#else
|
|
|
kernel = backend_ctx->kernel_mul_mv_mxfp4_f32;
|
|
|
|
|
|
if (backend_ctx->gpu_family == INTEL) {
|
|
|
@@ -6785,6 +7044,7 @@ static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, co
|
|
|
CL_CHECK(clSetKernelArg(kernel, 16, sizeof(int), &r2));
|
|
|
CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &r3));
|
|
|
CL_CHECK(clSetKernelArg(kernel, 18, sizeof(float)*nth0,nullptr));
|
|
|
+#endif
|
|
|
break;
|
|
|
}
|
|
|
default:
|
|
|
@@ -6850,8 +7110,11 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0,
|
|
|
cl_ulong offset2 = extra2->offset + src2->view_offs;
|
|
|
cl_ulong offsetd = extrad->offset + dst->view_offs;
|
|
|
|
|
|
+ GGML_UNUSED(offset0);
|
|
|
+
|
|
|
#ifdef GGML_OPENCL_SOA_Q
|
|
|
ggml_tensor_extra_cl_q4_0 * extra0_q4_0 = (ggml_tensor_extra_cl_q4_0 *)src0->extra;
|
|
|
+ ggml_tensor_extra_cl_mxfp4 * extra0_mxfp4 = (ggml_tensor_extra_cl_mxfp4 *)src0->extra;
|
|
|
#endif
|
|
|
|
|
|
const int ne00 = src0->ne[0];
|
|
|
@@ -6940,6 +7203,51 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0,
|
|
|
break;
|
|
|
}
|
|
|
case GGML_TYPE_MXFP4: {
|
|
|
+#ifdef GGML_OPENCL_SOA_Q
|
|
|
+ kernel = backend_ctx->kernel_mul_mv_id_mxfp4_f32_flat;
|
|
|
+
|
|
|
+ cl_mem q;
|
|
|
+ if (backend_ctx->gpu_family == INTEL) {
|
|
|
+ sgs = 16;
|
|
|
+ nsg = 2;
|
|
|
+ ndst = 2;
|
|
|
+
|
|
|
+ q = extra0_mxfp4->q;
|
|
|
+ } else if (backend_ctx->gpu_family == ADRENO) {
|
|
|
+ sgs = 64;
|
|
|
+ nsg = 1;
|
|
|
+ ndst = 4;
|
|
|
+
|
|
|
+ q = extra0_mxfp4->q_img;
|
|
|
+ } else {
|
|
|
+ GGML_ASSERT(false && "TODO: Unknown GPU");
|
|
|
+ }
|
|
|
+
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &q));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_mem), &extra0_mxfp4->e));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extra1->data_device));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offset1));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 4, sizeof(cl_mem), &extra2->data_device));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 5, sizeof(cl_ulong), &offset2));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 6, sizeof(cl_mem), &extrad->data_device));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 7, sizeof(cl_ulong), &offsetd));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 8, sizeof(int), &ne00));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 9, sizeof(cl_ulong), &nb01));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 10, sizeof(cl_ulong), &nb02));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 11, sizeof(cl_ulong), &nb03));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 12, sizeof(int), &ne11));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 13, sizeof(int), &ne12));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 14, sizeof(cl_ulong), &nb11));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 15, sizeof(cl_ulong), &nb12));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 16, sizeof(cl_ulong), &nb13));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 17, sizeof(int), &ne20));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 18, sizeof(int), &ne21));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 19, sizeof(cl_ulong), &nb21));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 20, sizeof(int), &ne0));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 21, sizeof(int), &ne1));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 22, sizeof(int), &r2));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 23, sizeof(int), &r3));
|
|
|
+#else // GGML_OPENCL_SOA_Q
|
|
|
kernel = backend_ctx->kernel_mul_mv_id_mxfp4_f32;
|
|
|
|
|
|
if (backend_ctx->gpu_family == INTEL) {
|
|
|
@@ -6979,7 +7287,7 @@ static void ggml_cl_mul_mat_id(ggml_backend_t backend, const ggml_tensor * src0,
|
|
|
CL_CHECK(clSetKernelArg(kernel, 22, sizeof(int), &r2));
|
|
|
CL_CHECK(clSetKernelArg(kernel, 23, sizeof(int), &r3));
|
|
|
CL_CHECK(clSetKernelArg(kernel, 24, sizeof(float)*sgs,nullptr));
|
|
|
-
|
|
|
+#endif // GGML_OPENCL_SOA_Q
|
|
|
break;
|
|
|
}
|
|
|
default:
|