diff --git a/python/llm/dev/benchmark/all-in-one/run.py b/python/llm/dev/benchmark/all-in-one/run.py index 25d3b608..7168e587 100644 --- a/python/llm/dev/benchmark/all-in-one/run.py +++ b/python/llm/dev/benchmark/all-in-one/run.py @@ -372,8 +372,12 @@ def run_transformer_int4_gpu(repo_id, tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True) model = model.to('xpu') else: - model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, - trust_remote_code=True, use_cache=True).eval() + if 'starcoder' in repo_id: + model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, + trust_remote_code=True, use_cache=True, torch_dtype=torch.bfloat16).eval() + else: + model = AutoModelForCausalLM.from_pretrained(model_path, optimize_model=True, load_in_low_bit=low_bit, + trust_remote_code=True, use_cache=True).eval() tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) model = model.to('xpu') if isinstance(model, GPTJForCausalLM): diff --git a/python/llm/test/benchmark/arc-perf-test.yaml b/python/llm/test/benchmark/arc-perf-test.yaml index c5235419..a5fbe22f 100644 --- a/python/llm/test/benchmark/arc-perf-test.yaml +++ b/python/llm/test/benchmark/arc-perf-test.yaml @@ -5,7 +5,7 @@ repo_id: - 'tiiuae/falcon-7b-instruct-with-patch' - 'mosaicml/mpt-7b-chat' - 'redpajama/gptneox-7b-redpajama-bf16' - # - 'bigcode/starcoder-15.5b' + - 'bigcode/starcoder-15.5b' - 'databricks/dolly-v1-6b' - 'databricks/dolly-v2-7b' - 'databricks/dolly-v2-12b'