mmq.cu 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687
  1. #include "mmq.cuh"
  2. void ggml_cuda_op_mul_mat_q(
  3. ggml_backend_cuda_context & ctx,
  4. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i,
  5. const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
  6. const int64_t src1_padded_row_size, cudaStream_t stream) {
  7. const int64_t ne00 = src0->ne[0];
  8. const int64_t nb01 = src0->nb[1];
  9. const int64_t ne10 = src1->ne[0];
  10. GGML_ASSERT(ne10 % QK8_1 == 0);
  11. const int64_t ne0 = dst->ne[0];
  12. const int64_t row_diff = row_high - row_low;
  13. const int64_t stride00 = nb01 / ggml_type_size(src0->type);
  14. int id = ggml_cuda_get_device();
  15. const int compute_capability = ggml_cuda_info().devices[id].cc;
  16. // the main device has a larger memory buffer to hold the results from all GPUs
  17. // nrows_dst == nrows of the matrix that the kernel writes into
  18. const int64_t nrows_dst = id == ctx.device ? ne0 : row_diff;
  19. const mmq_args args = {src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stride00, src1_padded_row_size, src1_ncols, nrows_dst};
  20. switch (src0->type) {
  21. case GGML_TYPE_Q4_0:
  22. mul_mat_q_case<GGML_TYPE_Q4_0>(args, stream);
  23. break;
  24. case GGML_TYPE_Q4_1:
  25. mul_mat_q_case<GGML_TYPE_Q4_1>(args, stream);
  26. break;
  27. case GGML_TYPE_Q5_0:
  28. mul_mat_q_case<GGML_TYPE_Q5_0>(args, stream);
  29. break;
  30. case GGML_TYPE_Q5_1:
  31. mul_mat_q_case<GGML_TYPE_Q5_1>(args, stream);
  32. break;
  33. case GGML_TYPE_Q8_0:
  34. mul_mat_q_case<GGML_TYPE_Q8_0>(args, stream);
  35. break;
  36. case GGML_TYPE_Q2_K:
  37. mul_mat_q_case<GGML_TYPE_Q2_K>(args, stream);
  38. break;
  39. case GGML_TYPE_Q3_K:
  40. mul_mat_q_case<GGML_TYPE_Q3_K>(args, stream);
  41. break;
  42. case GGML_TYPE_Q4_K:
  43. mul_mat_q_case<GGML_TYPE_Q4_K>(args, stream);
  44. break;
  45. case GGML_TYPE_Q5_K:
  46. mul_mat_q_case<GGML_TYPE_Q5_K>(args, stream);
  47. break;
  48. case GGML_TYPE_Q6_K:
  49. mul_mat_q_case<GGML_TYPE_Q6_K>(args, stream);
  50. break;
  51. default:
  52. GGML_ASSERT(false);
  53. break;
  54. }
  55. GGML_UNUSED(src1);
  56. GGML_UNUSED(dst);
  57. GGML_UNUSED(src1_ddf_i);
  58. }
  59. bool ggml_cuda_supports_mmq(enum ggml_type type) {
  60. switch (type) {
  61. case GGML_TYPE_Q4_0:
  62. case GGML_TYPE_Q4_1:
  63. case GGML_TYPE_Q5_0:
  64. case GGML_TYPE_Q5_1:
  65. case GGML_TYPE_Q8_0:
  66. case GGML_TYPE_Q2_K:
  67. case GGML_TYPE_Q3_K:
  68. case GGML_TYPE_Q4_K:
  69. case GGML_TYPE_Q5_K:
  70. case GGML_TYPE_Q6_K:
  71. return true;
  72. default:
  73. return false;
  74. }
  75. }