diff --git a/python/llm/src/ipex_llm/ggml/quantize.py b/python/llm/src/ipex_llm/ggml/quantize.py index a95e3464e32..f86ee12245f 100644 --- a/python/llm/src/ipex_llm/ggml/quantize.py +++ b/python/llm/src/ipex_llm/ggml/quantize.py @@ -53,6 +53,7 @@ "sym_int4_rtn": 31, "sym_int8_rtn": 32, "asym_int4_rtn": 33, + "woq_int4": 34, } # mixed precison from llama.cpp diff --git a/python/llm/src/ipex_llm/transformers/low_bit_linear.py b/python/llm/src/ipex_llm/transformers/low_bit_linear.py index 78e10c2c7ac..f78a5168aec 100644 --- a/python/llm/src/ipex_llm/transformers/low_bit_linear.py +++ b/python/llm/src/ipex_llm/transformers/low_bit_linear.py @@ -84,6 +84,7 @@ SYM_INT4_RTN = ggml_tensor_qtype["sym_int4_rtn"] SYM_INT8_RTN = ggml_tensor_qtype["sym_int8_rtn"] ASYM_INT4_RTN = ggml_tensor_qtype["asym_int4_rtn"] +WOQ_INT4 = ggml_tensor_qtype["woq_int4"] RTN_DTYPE = { SYM_INT4_RTN: torch.uint8, ASYM_INT4_RTN: torch.uint8, @@ -187,7 +188,7 @@ def ggml_q_format_convet_cpu2xpu(tensor: torch.Tensor, num_elem: int, qtype: int src = ctypes.c_void_p(tensor.data.data_ptr()) if qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, NF4, NF3, FP4, FP6, FP8E4, FP8E5, - Q4_K, Q6_K, FP6_K]: + Q4_K, Q6_K, FP6_K, WOQ_INT4]: dst_tensor = torch.empty_like(tensor) elif qtype == ggml_tensor_qtype["sym_int5"]: QK = ggml.ggml_qk_size(qtype) @@ -213,7 +214,7 @@ def ggml_q_format_convet_xpu2cpu(tensor: torch.Tensor, num_elem: int, qtype: int src = ctypes.c_void_p(tensor.data.data_ptr()) if qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, NF4, NF3, FP4, FP6, FP8E4, FP8E5, - Q4_K, Q6_K, FP6_K]: + Q4_K, Q6_K, FP6_K, WOQ_INT4]: dst_tensor = torch.empty_like(tensor) elif qtype == ggml_tensor_qtype["sym_int5"]: QK = ggml.ggml_qk_size(ggml_tensor_qtype["asym_int5"]) diff --git a/python/llm/src/ipex_llm/transformers/models/utils.py b/python/llm/src/ipex_llm/transformers/models/utils.py index 3e92146ae3d..9cde0af4056 100644 --- a/python/llm/src/ipex_llm/transformers/models/utils.py +++ b/python/llm/src/ipex_llm/transformers/models/utils.py @@ -21,7 +21,7 @@ from ipex_llm.ggml.quantize import ggml_tensor_qtype from ipex_llm.transformers.utils import get_xpu_device_name from ipex_llm.transformers.low_bit_linear import SYM_INT4, SYM_INT8, FP8E5, IQ2_XXS, FP4, FP8E4,\ - FP6, ASYM_INT4 + FP6, ASYM_INT4, WOQ_INT4 FP8_KV_ALLOC_LENGTH = 512 KV_CACHE_ALLOC_BLOCK_LENGTH = int(os.environ.get("KV_CACHE_ALLOC_BLOCK_LENGTH", 256)) @@ -33,7 +33,7 @@ def decoding_fast_path_qtype_check(proj): qtype = getattr(proj, "qtype", None) - return qtype in [SYM_INT4, FP8E5, FP4] + return qtype in [SYM_INT4, FP8E5, FP4, WOQ_INT4] def init_kv_cache(batch_size, num_heads, head_dim, current_length, max_length, dtype, device): @@ -248,7 +248,7 @@ def mlp_fusion_check(x, qtype, training): return False if x.device.type != 'xpu': return False - if qtype not in [SYM_INT4, FP8E5, FP4, IQ2_XXS, FP6]: + if qtype not in [SYM_INT4, FP8E5, FP4, IQ2_XXS, FP6, WOQ_INT4]: return False if training or x.requires_grad: return False @@ -263,7 +263,7 @@ def use_xmx(x: torch.Tensor, qtype: int): device = get_xpu_device_name(x.device) return ( device in ["arc", "pvc"] - and qtype in [SYM_INT4, SYM_INT8, FP8E4, FP8E5] + and qtype in [SYM_INT4, SYM_INT8, FP8E4, FP8E5, WOQ_INT4] and ( (device == "pvc" and 1 < x.size(0) <= 16) or