|
|
@@ -85,6 +85,22 @@ static __device__ __forceinline__ float op_elu(float x) {
|
|
|
return (x > 0.f) ? x : expm1f(x);
|
|
|
}
|
|
|
|
|
|
+static __device__ __forceinline__ float op_floor(float x) {
|
|
|
+ return floorf(x);
|
|
|
+}
|
|
|
+
|
|
|
+static __device__ __forceinline__ float op_ceil(float x) {
|
|
|
+ return ceilf(x);
|
|
|
+}
|
|
|
+
|
|
|
+static __device__ __forceinline__ float op_round(float x) {
|
|
|
+ return round(x);
|
|
|
+}
|
|
|
+
|
|
|
+static __device__ __forceinline__ float op_trunc(float x) {
|
|
|
+ return trunc(x);
|
|
|
+}
|
|
|
+
|
|
|
template <float (*op)(float), typename T>
|
|
|
static __global__ void unary_op_kernel(const T * x, T * dst, const int k) {
|
|
|
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
|
|
@@ -201,6 +217,22 @@ void ggml_cuda_op_log(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|
|
void ggml_cuda_op_elu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|
|
ggml_cuda_op_unary<op_elu>(ctx, dst);
|
|
|
}
|
|
|
+
|
|
|
+void ggml_cuda_op_floor(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|
|
+ ggml_cuda_op_unary<op_floor>(ctx, dst);
|
|
|
+}
|
|
|
+
|
|
|
+void ggml_cuda_op_ceil(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|
|
+ ggml_cuda_op_unary<op_ceil>(ctx, dst);
|
|
|
+}
|
|
|
+
|
|
|
+void ggml_cuda_op_round(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|
|
+ ggml_cuda_op_unary<op_round>(ctx, dst);
|
|
|
+}
|
|
|
+
|
|
|
+void ggml_cuda_op_trunc(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|
|
+ ggml_cuda_op_unary<op_trunc>(ctx, dst);
|
|
|
+}
|
|
|
/* gated ops */
|
|
|
|
|
|
template <float (*op)(float), typename T>
|