mmq.cu 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. #include "mmq.cuh"
  2. void ggml_cuda_op_mul_mat_q(
  3. ggml_backend_cuda_context & ctx,
  4. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i,
  5. const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
  6. const int64_t src1_padded_row_size, cudaStream_t stream) {
  7. const int64_t ne00 = src0->ne[0];
  8. const int64_t nb01 = src0->nb[1];
  9. const int64_t ne10 = src1->ne[0];
  10. const int64_t ne11 = src1->ne[1];
  11. GGML_ASSERT(ne10 % QK8_1 == 0);
  12. const int64_t ne0 = dst->ne[0];
  13. const int64_t row_diff = row_high - row_low;
  14. const int64_t stride00 = nb01 / ggml_type_size(src0->type);
  15. int id = ggml_cuda_get_device();
  16. const int compute_capability = ggml_cuda_info().devices[id].cc;
  17. // the main device has a larger memory buffer to hold the results from all GPUs
  18. // nrows_dst == nrows of the matrix that the kernel writes into
  19. const int64_t nrows_dst = id == ctx.device ? ne0 : row_diff;
  20. const mmq_args args = {src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stride00, src1_padded_row_size, src1_ncols, ne11, nrows_dst};
  21. switch (src0->type) {
  22. case GGML_TYPE_Q4_0:
  23. mul_mat_q_case<GGML_TYPE_Q4_0>(ctx, args, stream);
  24. break;
  25. case GGML_TYPE_Q4_1:
  26. mul_mat_q_case<GGML_TYPE_Q4_1>(ctx, args, stream);
  27. break;
  28. case GGML_TYPE_Q5_0:
  29. mul_mat_q_case<GGML_TYPE_Q5_0>(ctx, args, stream);
  30. break;
  31. case GGML_TYPE_Q5_1:
  32. mul_mat_q_case<GGML_TYPE_Q5_1>(ctx, args, stream);
  33. break;
  34. case GGML_TYPE_Q8_0:
  35. mul_mat_q_case<GGML_TYPE_Q8_0>(ctx, args, stream);
  36. break;
  37. case GGML_TYPE_Q2_K:
  38. mul_mat_q_case<GGML_TYPE_Q2_K>(ctx, args, stream);
  39. break;
  40. case GGML_TYPE_Q3_K:
  41. mul_mat_q_case<GGML_TYPE_Q3_K>(ctx, args, stream);
  42. break;
  43. case GGML_TYPE_Q4_K:
  44. mul_mat_q_case<GGML_TYPE_Q4_K>(ctx, args, stream);
  45. break;
  46. case GGML_TYPE_Q5_K:
  47. mul_mat_q_case<GGML_TYPE_Q5_K>(ctx, args, stream);
  48. break;
  49. case GGML_TYPE_Q6_K:
  50. mul_mat_q_case<GGML_TYPE_Q6_K>(ctx, args, stream);
  51. break;
  52. default:
  53. GGML_ASSERT(false);
  54. break;
  55. }
  56. GGML_UNUSED(src1);
  57. GGML_UNUSED(dst);
  58. GGML_UNUSED(src1_ddf_i);
  59. }
  60. bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) {
  61. #ifdef GGML_CUDA_FORCE_CUBLAS
  62. return false;
  63. #endif // GGML_CUDA_FORCE_CUBLAS
  64. bool mmq_supported;
  65. switch (type) {
  66. case GGML_TYPE_Q4_0:
  67. case GGML_TYPE_Q4_1:
  68. case GGML_TYPE_Q5_0:
  69. case GGML_TYPE_Q5_1:
  70. case GGML_TYPE_Q8_0:
  71. case GGML_TYPE_Q2_K:
  72. case GGML_TYPE_Q3_K:
  73. case GGML_TYPE_Q4_K:
  74. case GGML_TYPE_Q5_K:
  75. case GGML_TYPE_Q6_K:
  76. mmq_supported = true;
  77. break;
  78. default:
  79. mmq_supported = false;
  80. break;
  81. }
  82. if (!mmq_supported) {
  83. return false;
  84. }
  85. if (int8_mma_available(cc)) {
  86. return true;
  87. }
  88. if (cc < MIN_CC_DP4A) {
  89. return false;
  90. }
  91. #ifdef GGML_CUDA_FORCE_MMQ
  92. return true;
  93. #endif //GGML_CUDA_FORCE_MMQ
  94. if (cc < CC_OFFSET_AMD) {
  95. return cc < CC_VOLTA || ne11 < MMQ_DP4A_MAX_BATCH_SIZE;
  96. }
  97. return cc < CC_RDNA3 || ne11 < MMQ_DP4A_MAX_BATCH_SIZE;
  98. }