quants.py 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. from __future__ import annotations
  2. from typing import Callable, Sequence
  3. from numpy.typing import DTypeLike
  4. from .constants import GGML_QUANT_SIZES, GGMLQuantizationType
  5. from .lazy import LazyNumpyTensor
  6. import numpy as np
  7. def quant_shape_to_byte_shape(shape: Sequence[int], quant_type: GGMLQuantizationType):
  8. block_size, type_size = GGML_QUANT_SIZES[quant_type]
  9. if shape[-1] % block_size != 0:
  10. raise ValueError(f"Quantized tensor row size ({shape[-1]}) is not a multiple of {quant_type.name} block size ({block_size})")
  11. return (*shape[:-1], shape[-1] // block_size * type_size)
  12. def quant_shape_from_byte_shape(shape: Sequence[int], quant_type: GGMLQuantizationType):
  13. block_size, type_size = GGML_QUANT_SIZES[quant_type]
  14. if shape[-1] % type_size != 0:
  15. raise ValueError(f"Quantized tensor bytes per row ({shape[-1]}) is not a multiple of {quant_type.name} type size ({type_size})")
  16. return (*shape[:-1], shape[-1] // type_size * block_size)
  17. # same as ggml_compute_fp32_to_bf16 in ggml-impl.h
  18. def __compute_fp32_to_bf16(n: np.ndarray) -> np.ndarray:
  19. n = n.astype(np.float32, copy=False).view(np.int32)
  20. # force nan to quiet
  21. n = np.where((n & 0x7fffffff) > 0x7f800000, (n & 0xffff0000) | (64 << 16), n)
  22. # flush subnormals to zero
  23. n = np.where((n & 0x7f800000) == 0, n & 0x80000000, n)
  24. # round to nearest even
  25. n = (n + (0x7fff + ((n >> 16) & 1))) >> 16
  26. return n.astype(np.int16)
  27. # This is faster than np.vectorize and np.apply_along_axis because it works on more than one row at a time
  28. def __apply_over_grouped_rows(func: Callable[[np.ndarray], np.ndarray], arr: np.ndarray, otype: DTypeLike, oshape: tuple[int, ...]) -> np.ndarray:
  29. rows = arr.reshape((-1, arr.shape[-1]))
  30. osize = 1
  31. for dim in oshape:
  32. osize *= dim
  33. out = np.empty(shape=osize, dtype=otype)
  34. # compute over groups of 16 rows (arbitrary, but seems good for performance)
  35. n_groups = rows.shape[0] // 16
  36. np.concatenate([func(group).ravel() for group in np.array_split(rows, n_groups)], axis=0, out=out)
  37. return out.reshape(oshape)
  38. def __quantize_bf16_array(n: np.ndarray) -> np.ndarray:
  39. return __apply_over_grouped_rows(__compute_fp32_to_bf16, arr=n, otype=np.int16, oshape=n.shape)
  40. __quantize_bf16_lazy = LazyNumpyTensor._wrap_fn(__quantize_bf16_array, meta_noop=np.int16)
  41. def quantize_bf16(n: np.ndarray):
  42. if type(n) is LazyNumpyTensor:
  43. return __quantize_bf16_lazy(n)
  44. else:
  45. return __quantize_bf16_array(n)
  46. __q8_block_size, __q8_type_size = GGML_QUANT_SIZES[GGMLQuantizationType.Q8_0]
  47. def can_quantize_to_q8_0(n: np.ndarray) -> bool:
  48. return n.shape[-1] % __q8_block_size == 0
  49. # round away from zero
  50. # ref: https://stackoverflow.com/a/59143326/22827863
  51. def np_roundf(n: np.ndarray) -> np.ndarray:
  52. a = abs(n)
  53. floored = np.floor(a)
  54. b = floored + np.floor(2 * (a - floored))
  55. return np.sign(n) * b
  56. def __quantize_q8_0_shape_change(s: tuple[int, ...]) -> tuple[int, ...]:
  57. return (*s[:-1], s[-1] // __q8_block_size * __q8_type_size)
  58. # Implementation of Q8_0 with bit-exact same results as reference implementation in ggml-quants.c
  59. def __quantize_q8_0_rows(n: np.ndarray) -> np.ndarray:
  60. shape = n.shape
  61. assert shape[-1] % __q8_block_size == 0
  62. n_blocks = n.size // __q8_block_size
  63. blocks = n.reshape((n_blocks, __q8_block_size)).astype(np.float32, copy=False)
  64. d = abs(blocks).max(axis=1, keepdims=True) / 127
  65. with np.errstate(divide="ignore"):
  66. id = np.where(d == 0, 0, 1 / d)
  67. qs = np_roundf(blocks * id)
  68. # (n_blocks, 2)
  69. d = d.astype(np.float16).view(np.uint8)
  70. # (n_blocks, block_size)
  71. qs = qs.astype(np.int8).view(np.uint8)
  72. assert d.shape[1] + qs.shape[1] == __q8_type_size
  73. return np.concatenate([d, qs], axis=1).reshape(__quantize_q8_0_shape_change(shape))
  74. def __quantize_q8_0_array(n: np.ndarray) -> np.ndarray:
  75. return __apply_over_grouped_rows(__quantize_q8_0_rows, arr=n, otype=np.uint8, oshape=__quantize_q8_0_shape_change(n.shape))
  76. __quantize_q8_0_lazy = LazyNumpyTensor._wrap_fn(
  77. __quantize_q8_0_array,
  78. meta_noop=(np.uint8, __quantize_q8_0_shape_change),
  79. )
  80. def quantize_q8_0(data: np.ndarray):
  81. if type(data) is LazyNumpyTensor:
  82. return __quantize_q8_0_lazy(data)
  83. else:
  84. return __quantize_q8_0_array(data)