support batch forward for q4_k, q6_k (#11325)
This commit is contained in:
parent
e8dd8e97ef
commit
8a3247ac71
1 changed files with 1 additions and 1 deletions
|
|
@ -332,7 +332,7 @@ def use_batch_forward(x: torch.Tensor, qtype: int, output_len: int):
|
||||||
and output_len % 32 == 0
|
and output_len % 32 == 0
|
||||||
and device in ["arc", "flex", "pvc", "mtl"]
|
and device in ["arc", "flex", "pvc", "mtl"]
|
||||||
and qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, FP4,
|
and qtype in [SYM_INT4, ASYM_INT4, SYM_INT8, FP4,
|
||||||
FP8E5, FP6, FP8E4]
|
FP8E5, FP6, FP8E4, Q4_K, Q6_K]
|
||||||
and batch_size <= 64
|
and batch_size <= 64
|
||||||
)
|
)
|
||||||
if hard_condition:
|
if hard_condition:
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue