support batch forward for q4_k, q6_k (#11325)

This commit is contained in:
Ruonan Wang 2024-06-14 18:25:50 +08:00 committed by GitHub
parent e8dd8e97ef
commit 8a3247ac71
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -332,7 +332,7 @@ def use_batch_forward(x: torch.Tensor, qtype: int, output_len: int):
and output_len % 32 == 0
and device in ["arc", "flex", "pvc", "mtl"]
and qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, FP4,
FP8E5, FP6, FP8E4]
FP8E5, FP6, FP8E4, Q4_K, Q6_K]
and batch_size <= 64
)
if hard_condition: