element_wise.hpp 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. #ifndef GGML_SYCL_ELEMENTWISE_HPP
  2. #define GGML_SYCL_ELEMENTWISE_HPP
  3. #include "common.hpp"
  4. #include "ggml.h"
  5. #include <limits.h>
  6. template <typename T>
  7. T neg_infinity() {
  8. return -std::numeric_limits<T>::infinity();
  9. }
  10. static __dpct_inline__ float op_repeat(const float a, const float b) {
  11. return b;
  12. GGML_UNUSED(a);
  13. }
  14. static __dpct_inline__ float op_add(const float a, const float b) {
  15. return a + b;
  16. }
  17. static __dpct_inline__ float op_sub(const float a, const float b) {
  18. return a - b;
  19. }
  20. static __dpct_inline__ float op_mul(const float a, const float b) {
  21. return a * b;
  22. }
  23. static __dpct_inline__ float op_div(const float a, const float b) {
  24. return a / b;
  25. }
  26. template<typename T>
  27. struct typed_data {
  28. const T * src;
  29. T * dst;
  30. };
  31. template<typename T>
  32. typed_data<T> cast_data(ggml_tensor * dst) {
  33. return {
  34. /* .src = */ static_cast<const T *>(dst->src[0]->data),
  35. /* .dst = */ static_cast<T *>(dst->data)
  36. };
  37. }
  38. void ggml_sycl_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  39. void ggml_sycl_sin(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  40. void ggml_sycl_cos(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  41. void ggml_sycl_acc(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  42. void ggml_sycl_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  43. void ggml_sycl_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  44. void ggml_sycl_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  45. void ggml_sycl_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  46. void ggml_sycl_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  47. void ggml_sycl_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  48. void ggml_sycl_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  49. void ggml_sycl_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  50. void ggml_sycl_exp(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  51. void ggml_sycl_log(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  52. void ggml_sycl_neg(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  53. void ggml_sycl_step(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  54. void ggml_sycl_leaky_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  55. void ggml_sycl_sqr(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  56. void ggml_sycl_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  57. void ggml_sycl_pad(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  58. void ggml_sycl_clamp(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  59. // ---------
  60. void ggml_sycl_add(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  61. void ggml_sycl_sub(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  62. void ggml_sycl_mul(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  63. void ggml_sycl_div(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
  64. #endif // GGML_SYCL_ELEMENTWISE_HPP