| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100 |
- #ifndef GGML_SYCL_ELEMENTWISE_HPP
- #define GGML_SYCL_ELEMENTWISE_HPP
- #include "common.hpp"
- #include "ggml.h"
- #include <limits.h>
- template <typename T>
- T neg_infinity() {
- return -std::numeric_limits<T>::infinity();
- }
- static __dpct_inline__ float op_repeat(const float a, const float b) {
- return b;
- GGML_UNUSED(a);
- }
- static __dpct_inline__ float op_add(const float a, const float b) {
- return a + b;
- }
- static __dpct_inline__ float op_sub(const float a, const float b) {
- return a - b;
- }
- static __dpct_inline__ float op_mul(const float a, const float b) {
- return a * b;
- }
- static __dpct_inline__ float op_div(const float a, const float b) {
- return a / b;
- }
- template<typename T>
- struct typed_data {
- const T * src;
- T * dst;
- };
- template<typename T>
- typed_data<T> cast_data(ggml_tensor * dst) {
- return {
- /* .src = */ static_cast<const T *>(dst->src[0]->data),
- /* .dst = */ static_cast<T *>(dst->data)
- };
- }
- void ggml_sycl_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_sin(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_cos(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_acc(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_exp(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_log(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_neg(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_step(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_leaky_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_sqr(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_pad(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_clamp(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- // ---------
- void ggml_sycl_add(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_sub(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_mul(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- void ggml_sycl_div(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
- #endif // GGML_SYCL_ELEMENTWISE_HPP
|