diff --git a/.github/workflows/llm_performance_tests.yml b/.github/workflows/llm_performance_tests.yml index f2cae092..9be781c6 100644 --- a/.github/workflows/llm_performance_tests.yml +++ b/.github/workflows/llm_performance_tests.yml @@ -118,6 +118,20 @@ jobs: sed -i 's/test1/test2/g' run.py python run.py + - name: Test on xpu(transformers==4.37.0) + shell: bash + run: | + source /opt/intel/oneapi/setvars.sh + export USE_XETLA=OFF + export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 + # upgrade transformers for model Qwen/Qwen1.5-7B-Chat + python -m pip install transformers==4.37.0 + cp python/llm/test/benchmark/arc-perf-transformers-437.yaml python/llm/dev/benchmark/all-in-one/config.yaml + cd python/llm/dev/benchmark/all-in-one + # change csv name + sed -i 's/test2/test3/g' run.py + python run.py + - name: Concat csv and generate html shell: bash run: | diff --git a/python/llm/test/benchmark/arc-perf-transformers-437.yaml b/python/llm/test/benchmark/arc-perf-transformers-437.yaml new file mode 100644 index 00000000..68f1fa03 --- /dev/null +++ b/python/llm/test/benchmark/arc-perf-transformers-437.yaml @@ -0,0 +1,16 @@ +# For the models that require transformers 4.37.0 +repo_id: + - 'Qwen/Qwen1.5-7B-Chat' +local_model_hub: '/mnt/disk1/models' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +batch_size: 1 # default to 1 +in_out_pairs: + - '32-32' + - '1024-128' + - '2048-256' +test_api: + - "transformer_int4_gpu" # on Intel GPU +cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api)