element_wise.hpp 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. #ifndef GGML_SYCL_ELEMENTWISE_HPP
  2. #define GGML_SYCL_ELEMENTWISE_HPP
  3. #include "common.hpp"
  4. #include "ggml.h"
  5. #include <limits> // For std::numeric_limits
  6. template <typename T>
  7. T neg_infinity() {
  8. return -std::numeric_limits<T>::infinity();
  9. }
  10. template<typename T_Dst, typename T_Src = T_Dst>
  11. struct typed_data {
  12. const T_Src * src;
  13. T_Dst * dst;
  14. };
  15. template<typename T_Dst, typename T_Src = T_Dst>
  16. typed_data<T_Dst, T_Src> cast_data(ggml_tensor * dst) {
  17. return {
  18. /* .src = */ static_cast<const T_Src *>(dst->src[0]->data),
  19. /* .dst = */ static_cast<T_Dst *>(dst->data)
  20. };
  21. }
  22. const float GELU_QUICK_COEF = -1.702f;
  23. void ggml_sycl_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  24. void ggml_sycl_sin(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  25. void ggml_sycl_cos(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  26. void ggml_sycl_acc(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  27. void ggml_sycl_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  28. void ggml_sycl_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  29. void ggml_sycl_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  30. void ggml_sycl_gelu_erf(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  31. void ggml_sycl_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  32. void ggml_sycl_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  33. void ggml_sycl_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  34. void ggml_sycl_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  35. void ggml_sycl_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  36. void ggml_sycl_exp(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  37. void ggml_sycl_log(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  38. void ggml_sycl_neg(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  39. void ggml_sycl_step(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  40. void ggml_sycl_leaky_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  41. void ggml_sycl_sqr(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  42. void ggml_sycl_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  43. void ggml_sycl_pad(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  44. void ggml_sycl_clamp(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  45. void ggml_sycl_sgn(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  46. void ggml_sycl_abs(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  47. void ggml_sycl_elu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  48. void ggml_sycl_geglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  49. void ggml_sycl_reglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  50. void ggml_sycl_swiglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  51. #endif // GGML_SYCL_ELEMENTWISE_HPP