Add BF16 to GGUF (#2877)

This commit is contained in:
Mathieu Croquelois 2025-05-19 05:06:23 +01:00 committed by GitHub
parent 0ced1d0cd0
commit 17a42e5877
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 4 additions and 0 deletions

View File

@ -13,6 +13,7 @@ quants_mapping = {
gguf.GGMLQuantizationType.Q5_K: gguf.Q5_K,
gguf.GGMLQuantizationType.Q6_K: gguf.Q6_K,
gguf.GGMLQuantizationType.Q8_0: gguf.Q8_0,
gguf.GGMLQuantizationType.BF16: gguf.BF16,
}

View File

@ -268,6 +268,9 @@ class BF16(__Quant, qtype=GGMLQuantizationType.BF16):
def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray:
return (blocks.view(np.int16).astype(np.int32) << 16).view(np.float32)
@classmethod
def dequantize_blocks_pytorch(cls, blocks, block_size, type_size, parameter) -> torch.Tensor:
return (blocks.view(torch.int16).to(torch.int32) << 16).view(torch.float32)
class Q4_0(__Quant, qtype=GGMLQuantizationType.Q4_0):
@classmethod