Migrate harness to ipexllm (#10703)
* migrate to ipexlm * fix workflow * fix run_multi * fix precision map * rename ipexlm to ipexllm * rename bigdl to ipex in comments
This commit is contained in:
parent
8cf26d8d08
commit
d59e0cce5c
5 changed files with 9 additions and 8 deletions
2
.github/workflows/llm-harness-evaluation.yml
vendored
2
.github/workflows/llm-harness-evaluation.yml
vendored
|
|
@ -189,7 +189,7 @@ jobs:
|
|||
fi
|
||||
|
||||
python run_llb.py \
|
||||
--model bigdl-llm \
|
||||
--model ipex-llm \
|
||||
--pretrained ${MODEL_PATH} \
|
||||
--precision ${{ matrix.precision }} \
|
||||
--device ${{ matrix.device }} \
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ task_to_metric = dict(
|
|||
drop='f1'
|
||||
)
|
||||
|
||||
def parse_precision(precision, model="bigdl-llm"):
|
||||
def parse_precision(precision, model="ipex-llm"):
|
||||
result = match(r"([a-zA-Z_]+)(\d+)([a-zA-Z_\d]*)", precision)
|
||||
datatype = result.group(1)
|
||||
bit = int(result.group(2))
|
||||
|
|
@ -62,6 +62,6 @@ def parse_precision(precision, model="bigdl-llm"):
|
|||
else:
|
||||
if model == "hf-causal":
|
||||
return f"bnb_type={precision}"
|
||||
if model == "bigdl-llm":
|
||||
if model == "ipex-llm":
|
||||
return f"load_in_low_bit={precision}"
|
||||
raise RuntimeError(f"invald precision {precision}")
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ def force_decrease_order(Reorderer):
|
|||
utils.Reorderer = force_decrease_order(utils.Reorderer)
|
||||
|
||||
|
||||
class BigDLLM(AutoCausalLM):
|
||||
class IPEXLLM(AutoCausalLM):
|
||||
AUTO_MODEL_CLASS = AutoModelForCausalLM
|
||||
AutoCausalLM_ARGS = inspect.getfullargspec(AutoCausalLM.__init__).args
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
|
@ -20,8 +20,8 @@ import os
|
|||
from harness_to_leaderboard import *
|
||||
from lm_eval import tasks, evaluator, utils, models
|
||||
|
||||
from bigdl_llm import BigDLLM
|
||||
models.MODEL_REGISTRY['bigdl-llm'] = BigDLLM # patch bigdl-llm to harness
|
||||
from ipexllm import IPEXLLM
|
||||
models.MODEL_REGISTRY['ipex-llm'] = IPEXLLM # patch ipex-llm to harness
|
||||
|
||||
logging.getLogger("openai").setLevel(logging.WARNING)
|
||||
|
||||
|
|
|
|||
|
|
@ -22,8 +22,9 @@ from lm_eval import tasks, evaluator, utils, models
|
|||
from multiprocessing import Queue, Process
|
||||
import multiprocessing as mp
|
||||
from contextlib import redirect_stdout, redirect_stderr
|
||||
from bigdl_llm import BigDLLM
|
||||
models.MODEL_REGISTRY['bigdl-llm'] = BigDLLM # patch bigdl-llm to harness
|
||||
|
||||
from ipexllm import IPEXLLM
|
||||
models.MODEL_REGISTRY['ipex-llm'] = IPEXLLM # patch ipex-llm to harness
|
||||
|
||||
logging.getLogger("openai").setLevel(logging.WARNING)
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue