From 75f836f288341d998f539aa4ed5cf2ba29b399b7 Mon Sep 17 00:00:00 2001 From: Yuwen Hu <54161268+Oscilloscope98@users.noreply.github.com> Date: Mon, 24 Jun 2024 18:08:05 +0800 Subject: [PATCH] Add extra warmup for THUDM/glm-4-9b-chat in igpu-performance test (#11417) --- .github/workflows/llm_performance_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/llm_performance_tests.yml b/.github/workflows/llm_performance_tests.yml index 57ab09b0..cddbb526 100644 --- a/.github/workflows/llm_performance_tests.yml +++ b/.github/workflows/llm_performance_tests.yml @@ -562,7 +562,7 @@ jobs: shell: bash run: | sed -i '/^\s*result = run_transformer_int4_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming)/ i\ - if repo_id in ["THUDM/chatglm3-6b"]:\ + if repo_id in ["THUDM/chatglm3-6b", "THUDM/glm-4-9b-chat"]:\ run_transformer_int4_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming) ' python/llm/dev/benchmark/all-in-one/run.py