From 17a42e5877b02452b52c85497a21c49ecab2197c Mon Sep 17 00:00:00 2001 From: Mathieu Croquelois Date: Mon, 19 May 2025 05:06:23 +0100 Subject: [PATCH] Add BF16 to GGUF (#2877) --- backend/operations_gguf.py | 1 + packages_3rdparty/gguf/quants.py | 3 +++ 2 files changed, 4 insertions(+) diff --git a/backend/operations_gguf.py b/backend/operations_gguf.py index f30ef7dd..468e4991 100644 --- a/backend/operations_gguf.py +++ b/backend/operations_gguf.py @@ -13,6 +13,7 @@ quants_mapping = { gguf.GGMLQuantizationType.Q5_K: gguf.Q5_K, gguf.GGMLQuantizationType.Q6_K: gguf.Q6_K, gguf.GGMLQuantizationType.Q8_0: gguf.Q8_0, + gguf.GGMLQuantizationType.BF16: gguf.BF16, } diff --git a/packages_3rdparty/gguf/quants.py b/packages_3rdparty/gguf/quants.py index abe52d54..cfd4d21b 100644 --- a/packages_3rdparty/gguf/quants.py +++ b/packages_3rdparty/gguf/quants.py @@ -268,6 +268,9 @@ class BF16(__Quant, qtype=GGMLQuantizationType.BF16): def dequantize_blocks(cls, blocks: np.ndarray) -> np.ndarray: return (blocks.view(np.int16).astype(np.int32) << 16).view(np.float32) + @classmethod + def dequantize_blocks_pytorch(cls, blocks, block_size, type_size, parameter) -> torch.Tensor: + return (blocks.view(torch.int16).to(torch.int32) << 16).view(torch.float32) class Q4_0(__Quant, qtype=GGMLQuantizationType.Q4_0): @classmethod