op_add.comp 1.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758
  1. #version 450
  2. #include "common.comp"
  3. layout(local_size_x = 1024) in;
  4. layout(binding = 0) buffer restrict readonly tensorInA { float inA[]; };
  5. layout(binding = 1) buffer restrict readonly tensorInB { float inB[]; };
  6. layout(binding = 2) buffer restrict writeonly tensorOut { float out_[]; };
  7. layout(push_constant) uniform PushConstants {
  8. uint inAOff;
  9. uint inBOff;
  10. uint outOff;
  11. int ne00;
  12. int nb00;
  13. int nb01;
  14. int nb02;
  15. int nb03;
  16. int ne10;
  17. int ne11;
  18. int ne12;
  19. int ne13;
  20. int nb10;
  21. int nb11;
  22. int nb12;
  23. int nb13;
  24. int ne0;
  25. int nb0;
  26. int nb1;
  27. int nb2;
  28. int nb3;
  29. //int offs; // TODO: needed for GGML_OP_ACC, see metal code
  30. } pcs;
  31. // general-purpose kernel for addition of two tensors
  32. // pros: works for non-contiguous tensors, supports broadcast across dims 1, 2 and 3
  33. // cons: not very efficient
  34. void main() {
  35. const uint i03 = gl_WorkGroupID.z;
  36. const uint i02 = gl_WorkGroupID.y;
  37. const uint i01 = gl_WorkGroupID.x;
  38. const uint i13 = i03 % pcs.ne13;
  39. const uint i12 = i02 % pcs.ne12;
  40. const uint i11 = i01 % pcs.ne11;
  41. int offs = 0; // TMP (see above)
  42. uint src0_off = uint((i03*pcs.nb03 + i02*pcs.nb02 + i01*pcs.nb01 + offs) / 4);
  43. uint src1_off = uint((i13*pcs.nb13 + i12*pcs.nb12 + i11*pcs.nb11 ) / 4);
  44. uint dst_off = uint((i03*pcs.nb3 + i02*pcs.nb2 + i01*pcs.nb1 + offs) / 4);
  45. for (uint i0 = gl_LocalInvocationID.x; i0 < pcs.ne0; i0 += gl_WorkGroupSize.x) {
  46. const uint i10 = i0 % pcs.ne10;
  47. out_[pcs.outOff + dst_off + i0] = inA[pcs.inAOff + src0_off + i0] + inB[pcs.inBOff + src1_off + i10];
  48. }
  49. }