|
|
@@ -83,6 +83,10 @@ static __device__ __forceinline__ float op_log(float x) {
|
|
|
return logf(x);
|
|
|
}
|
|
|
|
|
|
+static __device__ __forceinline__ float op_elu(float x) {
|
|
|
+ return (x > 0.f) ? x : expm1f(x);
|
|
|
+}
|
|
|
+
|
|
|
template <float (*op)(float), typename T>
|
|
|
static __global__ void unary_op_kernel(const T * x, T * dst, const int k) {
|
|
|
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
|
|
@@ -196,6 +200,9 @@ void ggml_cuda_op_log(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|
|
ggml_cuda_op_unary<op_log>(ctx, dst);
|
|
|
}
|
|
|
|
|
|
+void ggml_cuda_op_elu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|
|
+ ggml_cuda_op_unary<op_elu>(ctx, dst);
|
|
|
+}
|
|
|
/* gated ops */
|
|
|
|
|
|
template <float (*op)(float), typename T>
|