From f36d7b2d598953800d30c80b405d03f7bbd6abd7 Mon Sep 17 00:00:00 2001 From: "Chen, Zhentao" Date: Mon, 13 Nov 2023 15:29:53 +0800 Subject: [PATCH] Fix harness stuck (#9435) * remove env to avoid being stuck * use small model for test --- .github/workflows/llm-harness-evaluation.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/llm-harness-evaluation.yml b/.github/workflows/llm-harness-evaluation.yml index b91371db..fe0e0f4f 100644 --- a/.github/workflows/llm-harness-evaluation.yml +++ b/.github/workflows/llm-harness-evaluation.yml @@ -26,10 +26,10 @@ jobs: fail-fast: false matrix: python-version: ["3.9"] - model_name: ["Llama-2-7b-chat-hf"] + model_name: [stablelm-3b-4e1t] task: ["truthfulqa"] precision: ["int4"] - runs-on: [self-hosted, llm, accuracy] + runs-on: [self-hosted, llm, accuracy, temp-arc01] env: ANALYTICS_ZOO_ROOT: ${{ github.workspace }} steps: @@ -95,7 +95,6 @@ jobs: shell: bash run: | export USE_XETLA=OFF - export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 source /opt/intel/oneapi/setvars.sh cd python/llm/dev/benchmark/harness python llb.py --model bigdl-llm --pretrained ${MODEL_PATH} --precision ${{ matrix.precision }} --device xpu --tasks ${{ matrix.task }} --output_dir results/${{ matrix.model_name }} --batch 1