From 4faf5af8f14201a1858c4dad25cf6e4ab69916c3 Mon Sep 17 00:00:00 2001 From: Yuwen Hu <54161268+Oscilloscope98@users.noreply.github.com> Date: Mon, 13 Nov 2023 13:58:40 +0800 Subject: [PATCH] [LLM] Add perf test for core on Windows (#9397) * temporary stop other perf test * Add framework for core performance test with one test model * Small fix and add platform control * Comment out lp for now * Add missing ymal file * Small fix * Fix sed contents * Small fix * Small path fixes * Small fix * Add update to ftp * Small upload fix * add chatglm3-6b * LLM: add model names * Keep repo id same as ftp and temporary make baichuan2 first priority * change order * Remove temp if false and separate pr and nightly results * Small fix --------- Co-authored-by: jinbridge <2635480475@qq.com> --- .github/workflows/llm_performance_tests.yml | 56 +++++++++++++++++++ python/llm/test/benchmark/core-perf-test.yaml | 28 ++++++++++ 2 files changed, 84 insertions(+) create mode 100644 python/llm/test/benchmark/core-perf-test.yaml diff --git a/.github/workflows/llm_performance_tests.yml b/.github/workflows/llm_performance_tests.yml index 00312b71..ced9d75d 100644 --- a/.github/workflows/llm_performance_tests.yml +++ b/.github/workflows/llm_performance_tests.yml @@ -188,3 +188,59 @@ jobs: cp ./*.csv /mnt/disk1/nightly_perf_cpu/ cd ../../../test/benchmark python csv_to_html.py -f /mnt/disk1/nightly_perf_cpu/ + + llm-performance-test-on-core: + needs: llm-cpp-build + strategy: + fail-fast: false + matrix: + include: + - os: windows + platform: dp + python-version: "3.9" + # - os: windows + # platform: lp + # python-version: "3.9" + runs-on: [self-hosted, "${{ matrix.os }}", llm, perf-core, "${{ matrix.platform }}"] + env: + ANALYTICS_ZOO_ROOT: ${{ github.workspace }} + CSV_SAVE_PATH: ${{ github.event.schedule && 'D:/action-runners/nightly_perf_core_' || 'D:/action-runners/pr_perf_core_' }}${{ matrix.platform }}/ + steps: + - uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Install dependencies + shell: bash + run: | + python -m pip install --upgrade pip + python -m pip install --upgrade wheel + python -m pip install --upgrade omegaconf pandas + python -m pip install --upgrade tiktoken einops transformers_stream_generator + + - name: Download llm binary + uses: ./.github/actions/llm/download-llm-binary + + - name: Run LLM install (all) test + uses: ./.github/actions/llm/setup-llm-env + + - name: Test on core ${{ matrix.platform }} + shell: bash + run: | + mv python/llm/test/benchmark/core-perf-test.yaml python/llm/dev/benchmark/all-in-one/config.yaml + cd python/llm/dev/benchmark/all-in-one + export http_proxy=${HTTP_PROXY} + export https_proxy=${HTTPS_PROXY} + # hide time info + sed -i 's/str(end - st)/"xxxxxx"/g' run.py + python run.py + cp ./*.csv $CSV_SAVE_PATH + cd ../../../test/benchmark + python csv_to_html.py -f $CSV_SAVE_PATH + cd ../../dev/benchmark/all-in-one/ + if [ ${{ github.event.schedule}} ]; then + curl -T ./*.csv ${LLM_FTP_URL}/llm/nightly_perf/core_${{ matrix.platform }}/ + fi diff --git a/python/llm/test/benchmark/core-perf-test.yaml b/python/llm/test/benchmark/core-perf-test.yaml new file mode 100644 index 00000000..deb2ca53 --- /dev/null +++ b/python/llm/test/benchmark/core-perf-test.yaml @@ -0,0 +1,28 @@ +repo_id: + - 'THUDM/chatglm2-6b' + - 'THUDM/chatglm3-6b' + - 'baichuan-inc/Baichuan2-7B-Chat' + - 'internlm/internlm-chat-7b-8k' + - 'Qwen/Qwen-7B-Chat-10-12' + - 'BAAI/AquilaChat2-7B' + - 'meta-llama/Llama-2-7b-chat-hf' + - 'WisdomShell/CodeShell-7B' + - 'tiiuae/falcon-7b-instruct-with-patch' +local_model_hub: 'D:\llm-models' +warm_up: 1 +num_trials: 3 +num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) +in_out_pairs: + - '32-32' + - '1024-128' +test_api: + - "transformer_int4" + # - "native_int4" + # - "optimize_model" + # - "pytorch_autocast_bf16" + # - "ipex_fp16_gpu" # on Intel GPU + # - "transformer_int4_gpu" # on Intel GPU + # - "optimize_model_gpu" # on Intel GPU + # - "deepspeed_transformer_int4_cpu" # on Intel SPR Server +