|
|
@@ -398,6 +398,7 @@ struct ggml_backend_opencl_context {
|
|
|
cl_kernel kernel_scale;
|
|
|
cl_kernel kernel_silu, kernel_silu_4;
|
|
|
cl_kernel kernel_gelu, kernel_gelu_4;
|
|
|
+ cl_kernel kernel_gelu_erf, kernel_gelu_erf_4;
|
|
|
cl_kernel kernel_gelu_quick, kernel_gelu_quick_4;
|
|
|
cl_kernel kernel_relu;
|
|
|
cl_kernel kernel_sigmoid_f32, kernel_sigmoid_f16;
|
|
|
@@ -736,6 +737,8 @@ static void load_cl_kernels(ggml_backend_opencl_context *backend_ctx, ggml_cl_ve
|
|
|
|
|
|
CL_CHECK((backend_ctx->kernel_gelu = clCreateKernel(backend_ctx->program_gelu, "kernel_gelu", &err), err));
|
|
|
CL_CHECK((backend_ctx->kernel_gelu_4 = clCreateKernel(backend_ctx->program_gelu, "kernel_gelu_4", &err), err));
|
|
|
+ CL_CHECK((backend_ctx->kernel_gelu_erf = clCreateKernel(backend_ctx->program_gelu, "kernel_gelu_erf", &err), err));
|
|
|
+ CL_CHECK((backend_ctx->kernel_gelu_erf_4 = clCreateKernel(backend_ctx->program_gelu, "kernel_gelu_erf_4", &err), err));
|
|
|
CL_CHECK((backend_ctx->kernel_gelu_quick = clCreateKernel(backend_ctx->program_gelu, "kernel_gelu_quick", &err), err));
|
|
|
CL_CHECK((backend_ctx->kernel_gelu_quick_4 = clCreateKernel(backend_ctx->program_gelu, "kernel_gelu_quick_4", &err), err));
|
|
|
GGML_LOG_CONT(".");
|
|
|
@@ -2266,6 +2269,7 @@ static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_te
|
|
|
case GGML_UNARY_OP_GELU:
|
|
|
case GGML_UNARY_OP_SILU:
|
|
|
case GGML_UNARY_OP_RELU:
|
|
|
+ case GGML_UNARY_OP_GELU_ERF:
|
|
|
case GGML_UNARY_OP_GELU_QUICK:
|
|
|
return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32;
|
|
|
case GGML_UNARY_OP_SIGMOID:
|
|
|
@@ -3870,6 +3874,44 @@ static void ggml_cl_gelu(ggml_backend_t backend, const ggml_tensor * src0, const
|
|
|
backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst);
|
|
|
}
|
|
|
|
|
|
+static void ggml_cl_gelu_erf(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
|
|
+ GGML_ASSERT(src0);
|
|
|
+ GGML_ASSERT(src0->extra);
|
|
|
+ GGML_ASSERT(dst);
|
|
|
+ GGML_ASSERT(dst->extra);
|
|
|
+
|
|
|
+ UNUSED(src1);
|
|
|
+
|
|
|
+ ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context;
|
|
|
+
|
|
|
+ ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra;
|
|
|
+ ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra;
|
|
|
+
|
|
|
+ cl_ulong offset0 = extra0->offset + src0->view_offs;
|
|
|
+ cl_ulong offsetd = extrad->offset + dst->view_offs;
|
|
|
+
|
|
|
+ cl_kernel kernel;
|
|
|
+
|
|
|
+ int n = ggml_nelements(dst);
|
|
|
+
|
|
|
+ if (n % 4 == 0) {
|
|
|
+ kernel = backend_ctx->kernel_gelu_erf_4;
|
|
|
+ n /= 4;
|
|
|
+ } else {
|
|
|
+ kernel = backend_ctx->kernel_gelu_erf;
|
|
|
+ }
|
|
|
+
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 0, sizeof(cl_mem), &extra0->data_device));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 1, sizeof(cl_ulong), &offset0));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device));
|
|
|
+ CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd));
|
|
|
+
|
|
|
+ size_t global_work_size[] = {(size_t)n, 1, 1};
|
|
|
+ size_t local_work_size[] = {64, 1, 1};
|
|
|
+
|
|
|
+ backend_ctx->enqueue_ndrange_kernel(kernel, 3, global_work_size, local_work_size, dst);
|
|
|
+}
|
|
|
+
|
|
|
static void ggml_cl_gelu_quick(ggml_backend_t backend, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
|
|
GGML_ASSERT(src0);
|
|
|
GGML_ASSERT(src0->extra);
|
|
|
@@ -6388,6 +6430,12 @@ bool ggml_cl_compute_forward(ggml_backend_t backend, struct ggml_tensor * tensor
|
|
|
}
|
|
|
func = ggml_cl_gelu;
|
|
|
break;
|
|
|
+ case GGML_UNARY_OP_GELU_ERF:
|
|
|
+ if (!any_on_device) {
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ func = ggml_cl_gelu_erf;
|
|
|
+ break;
|
|
|
case GGML_UNARY_OP_GELU_QUICK:
|
|
|
if (!any_on_device) {
|
|
|
return false;
|