From 5dbbe1a82643d8bb3e39a8e59ebfb13fb596d625 Mon Sep 17 00:00:00 2001 From: Yuwen Hu <54161268+Oscilloscope98@users.noreply.github.com> Date: Mon, 4 Mar 2024 18:42:02 +0800 Subject: [PATCH] [LLM] Support for new arc ut runner (#10311) * Support for new arc ut runner * Comment unnecessary OMP_NUM_THREADS related settings for arc uts --- .github/workflows/llm_unit_tests.yml | 18 +++++++++--------- .../test/run-llm-inference-tests-gpu-434.sh | 8 ++++---- python/llm/test/run-llm-inference-tests-gpu.sh | 8 ++++---- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/.github/workflows/llm_unit_tests.yml b/.github/workflows/llm_unit_tests.yml index 01155781..17e8f1fc 100644 --- a/.github/workflows/llm_unit_tests.yml +++ b/.github/workflows/llm_unit_tests.yml @@ -216,10 +216,10 @@ jobs: matrix: pytorch-version: ['2.1', '2.0'] python-version: ${{ fromJson(needs.setup-python-version.outputs.python-version) }} - runs-on: [self-hosted, llm, arc] + runs-on: [self-hosted, llm, arc-ut] env: - OMP_NUM_THREADS: 16 - THREAD_NUM: 16 + # OMP_NUM_THREADS: 16 + # THREAD_NUM: 16 ANALYTICS_ZOO_ROOT: ${{ github.workspace }} steps: - name: Set environment variables @@ -266,9 +266,9 @@ jobs: run: | # Specific oneapi position on arc ut test machines if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then - source /home/arda/intel/oneapi/setvars.sh - elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then source /opt/intel/oneapi/setvars.sh + elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then + source /home/arda/intel/oneapi/setvars.sh fi bash python/llm/test/run-llm-install-tests.sh @@ -324,9 +324,9 @@ jobs: run: | # Specific oneapi position on arc ut test machines if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then - source /home/arda/intel/oneapi/setvars.sh - elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then source /opt/intel/oneapi/setvars.sh + elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then + source /home/arda/intel/oneapi/setvars.sh fi python -m pip install datasets librosa soundfile einops tiktoken transformers_stream_generator bash python/llm/test/run-llm-inference-tests-gpu.sh @@ -341,8 +341,8 @@ jobs: python -m pip install bitsandbytes scipy # Specific oneapi position on arc ut test machines if [[ '${{ matrix.pytorch-version }}' == '2.1' ]]; then - source /home/arda/intel/oneapi/setvars.sh - elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then source /opt/intel/oneapi/setvars.sh + elif [[ '${{ matrix.pytorch-version }}' == '2.0' ]]; then + source /home/arda/intel/oneapi/setvars.sh fi bash python/llm/test/run-llm-example-tests-gpu.sh diff --git a/python/llm/test/run-llm-inference-tests-gpu-434.sh b/python/llm/test/run-llm-inference-tests-gpu-434.sh index ea5fe8b0..a88bb637 100644 --- a/python/llm/test/run-llm-inference-tests-gpu-434.sh +++ b/python/llm/test/run-llm-inference-tests-gpu-434.sh @@ -13,10 +13,10 @@ set -e echo "# Start testing inference" start=$(date "+%s") -if [ -z "$THREAD_NUM" ]; then - THREAD_NUM=2 -fi -export OMP_NUM_THREADS=$THREAD_NUM +# if [ -z "$THREAD_NUM" ]; then +# THREAD_NUM=2 +# fi +# export OMP_NUM_THREADS=$THREAD_NUM export BIGDL_LLM_XMX_DISABLED=1 pytest ${LLM_INFERENCE_TEST_DIR}/test_transformers_api_mlp.py -v -s -k "Mistral" unset BIGDL_LLM_XMX_DISABLED diff --git a/python/llm/test/run-llm-inference-tests-gpu.sh b/python/llm/test/run-llm-inference-tests-gpu.sh index fc4b6f90..351b6ec7 100644 --- a/python/llm/test/run-llm-inference-tests-gpu.sh +++ b/python/llm/test/run-llm-inference-tests-gpu.sh @@ -13,10 +13,10 @@ set -e echo "# Start testing inference" start=$(date "+%s") -if [ -z "$THREAD_NUM" ]; then - THREAD_NUM=2 -fi -export OMP_NUM_THREADS=$THREAD_NUM +# if [ -z "$THREAD_NUM" ]; then +# THREAD_NUM=2 +# fi +# export OMP_NUM_THREADS=$THREAD_NUM pytest ${LLM_INFERENCE_TEST_DIR}/test_transformers_api.py -v -s export BIGDL_LLM_XMX_DISABLED=1 pytest ${LLM_INFERENCE_TEST_DIR}/test_transformers_api_final_logits.py -v -s