small fix and add comment (#12670)
This commit is contained in:
parent
ccf618ff4a
commit
7dd156d292
2 changed files with 3 additions and 1 deletions
|
|
@ -286,7 +286,7 @@ def use_batch_forward(x: torch.Tensor, qtype: int, output_len: int):
|
|||
or (
|
||||
qtype in [SYM_INT8, FP4, FP6, Q4_K, Q6_K]
|
||||
and batch_size <= 48
|
||||
and device_name in ["arc", "pvc", "mtl", "lnl", "arl"]
|
||||
and device_name in ["arc", "pvc", "mtl", "arl"]
|
||||
and x.shape[1] % 256 == 0
|
||||
and output_len % 32 == 0
|
||||
)
|
||||
|
|
|
|||
|
|
@ -172,6 +172,8 @@ def get_xpu_device_name(device: torch.device):
|
|||
if device.type != "xpu":
|
||||
return device.type
|
||||
else:
|
||||
# possiable device name:
|
||||
# ["arc", "pvc", "mtl", "lnl", "bmg", "arl", "legacy", "unknown"]
|
||||
import xe_linear
|
||||
return xe_linear.get_xpu_device_name(device)
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue