Enable use_batch_forward Optimization on Battlemage GPU (#12516)

* Update get_xpu_device_type() to support bmg

* enable use_batch_forward for bmg

* Update low_bit_linear.py

* Update utils.py

* use batch kernel for fp8e5
This commit is contained in:
Shaojun Liu 2024-12-12 12:44:36 +08:00 committed by GitHub
parent 6fc27da9c1
commit 2cce89691a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 3 additions and 0 deletions

View file

@ -405,6 +405,7 @@ def use_batch_forward(x: torch.Tensor, qtype: int, output_len: int):
or (device in ["arc", "flex"] and qtype in [SYM_INT8, FP4])
or (device in ["arc", "flex", "mtl"] and qtype in [FP8E4])
or (device in ["lnl"] and qtype in [SYM_INT4] and x.shape[1] % 512 == 0)
or (device in ["bmg"] and qtype in [SYM_INT4, FP8E5])
)
return False

View file

@ -174,6 +174,8 @@ def get_xpu_device_type(x):
name = torch.xpu.get_device_name(x.device.index)
if name.startswith("Intel(R) Arc(TM) A"):
return "arc"
elif name.startswith("Intel(R) Graphics [0xe20b]"):
return "bmg"
elif name.startswith("Intel(R) Arc(TM)"):
if 'V' in name:
return "lnl"