Use to test llm-performance on spr-perf (#9316)

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update action.yml

* Create cpu-perf-test.yaml

* Update action.yml

* Update action.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml

* Update llm_performance_tests.yml
This commit is contained in:
ZehuaCao 2023-11-03 11:17:16 +08:00 committed by GitHub
parent a0150bb205
commit ef83c3302e
2 changed files with 66 additions and 1 deletions

View file

@ -137,4 +137,51 @@ jobs:
curl -T ./*.csv ${LLM_FTP_URL}/llm/ggml-actions/perf/
cp ./*.csv /mnt/disk1/nightly_perf/
cd ../../../test/benchmark
python csv_to_html.py -f /mnt/disk1/nightly_perf/
python csv_to_html.py -f ../../dev/benchmark/all-in-one
cp ./*.html /mnt/disk1/nightly_perf/
llm-performance-test-on-spr:
needs: llm-cpp-build
strategy:
fail-fast: false
matrix:
python-version: ["3.9"]
runs-on: [self-hosted, llm, spr-perf]
env:
OMP_NUM_THREADS: 16
THREAD_NUM: 16
ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
shell: bash
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade wheel
python -m pip install --upgrade omegaconf
python -m pip install --upgrade pandas
python -m pip install --upgrade einops
- name: Download llm binary
uses: ./.github/actions/llm/download-llm-binary
- name: Run LLM install (all) test
uses: ./.github/actions/llm/setup-llm-env
- name: Test on cpu
shell: bash
run: |
mv python/llm/test/benchmark/cpu-perf-test.yaml python/llm/dev/benchmark/all-in-one/config.yaml
cd python/llm/dev/benchmark/all-in-one
export http_proxy=${HTTP_PROXY}
export https_proxy=${HTTPS_PROXY}
python run.py
cp ./*.csv /mnt/disk1/nightly_perf_cpu/
cd ../../../test/benchmark
python csv_to_html.py -f /mnt/disk1/nightly_perf_cpu/

View file

@ -0,0 +1,18 @@
repo_id:
- 'meta-llama/Llama-2-7b-chat-hf'
local_model_hub: '/mnt/disk1/models'
warm_up: 1
num_trials: 3
num_beams: 1 # default to greedy search
low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4)
in_out_pairs:
- '32-32'
- '1024-128'
test_api:
- "transformer_int4"
# - "native_int4"
# - "optimize_model"
# - "pytorch_autocast_bf16"
# - "ipex_fp16_gpu" # on Intel GPU
# - "transformer_int4_gpu" # on Intel GPU
# - "optimize_model_gpu" # on Intel GPU