[LLM] Performance test (#8796)
This commit is contained in:
		
							parent
							
								
									9d0f6a8cce
								
							
						
					
					
						commit
						b8b1b6888b
					
				
					 5 changed files with 191 additions and 4 deletions
				
			
		
							
								
								
									
										66
									
								
								.github/workflows/llm_performance_tests.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										66
									
								
								.github/workflows/llm_performance_tests.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,66 @@
 | 
			
		|||
name: LLM Performance Test
 | 
			
		||||
 | 
			
		||||
# Cancel previous runs in the PR when you push new commits
 | 
			
		||||
concurrency:
 | 
			
		||||
  group: ${{ github.workflow }}-llm-performance-tests-${{ github.event.pull_request.number || github.run_id }}
 | 
			
		||||
  cancel-in-progress: true
 | 
			
		||||
 | 
			
		||||
# Controls when the action will run. 
 | 
			
		||||
on:
 | 
			
		||||
  schedule:
 | 
			
		||||
    - cron: '00 13 * * *' # GMT time, 13:00 GMT == 21:00 China
 | 
			
		||||
  pull_request:
 | 
			
		||||
    branches: [ main ]
 | 
			
		||||
    paths:
 | 
			
		||||
      - '.github/workflows/llm_performance_tests.yml'
 | 
			
		||||
      - '.github/workflows/llm-binary-build.yml'
 | 
			
		||||
      - '.github/actions/llm/setup-llm-env/action.yml'
 | 
			
		||||
      - '.github/actions/llm/remove-llm-env/action.yml'
 | 
			
		||||
      - '.github/actions/llm/download-llm-binary/action.yml'
 | 
			
		||||
  workflow_dispatch:
 | 
			
		||||
  workflow_call:
 | 
			
		||||
 | 
			
		||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
 | 
			
		||||
jobs:
 | 
			
		||||
  llm-cpp-build:
 | 
			
		||||
    uses: ./.github/workflows/llm-binary-build.yml
 | 
			
		||||
  llm-performance-test:
 | 
			
		||||
    needs: llm-cpp-build
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        python-version: ["3.9"]
 | 
			
		||||
        instruction: ["AVX512"]
 | 
			
		||||
    runs-on: [ self-hosted, llm, perf ]
 | 
			
		||||
    env:
 | 
			
		||||
      THREAD_NUM: 24
 | 
			
		||||
    steps:
 | 
			
		||||
      - uses: actions/checkout@v2
 | 
			
		||||
      - name: Set up Python ${{ matrix.python-version }}
 | 
			
		||||
        uses: actions/setup-python@v2
 | 
			
		||||
        with:
 | 
			
		||||
          python-version: ${{ matrix.python-version }}
 | 
			
		||||
      - name: Install dependencies
 | 
			
		||||
        run: |
 | 
			
		||||
          python -m pip install --upgrade pip
 | 
			
		||||
          python -m pip install --upgrade setuptools==58.0.4
 | 
			
		||||
          python -m pip install --upgrade wheel
 | 
			
		||||
 | 
			
		||||
      - name: Download llm binary
 | 
			
		||||
        uses: ./.github/actions/llm/download-llm-binary
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM install (all) test
 | 
			
		||||
        uses: ./.github/actions/llm/setup-llm-env
 | 
			
		||||
        env:
 | 
			
		||||
          ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
 | 
			
		||||
 | 
			
		||||
      - name: Run LLM Performance test        
 | 
			
		||||
        env:
 | 
			
		||||
          ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
 | 
			
		||||
        run:
 | 
			
		||||
          bash python/llm/dev/benchmark/run-benchmark-tests.sh
 | 
			
		||||
 | 
			
		||||
      # - name: Clean up test environment
 | 
			
		||||
      #   uses: ./.github/actions/llm/remove-llm-env
 | 
			
		||||
      #   env:
 | 
			
		||||
      #     ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
 | 
			
		||||
| 
						 | 
				
			
			@ -510,8 +510,9 @@ class BenchmarkWrapper:
 | 
			
		|||
    learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies).
 | 
			
		||||
    """
 | 
			
		||||
    
 | 
			
		||||
    def __init__(self, model):
 | 
			
		||||
    def __init__(self, model, do_print=True):
 | 
			
		||||
        self.model = model
 | 
			
		||||
        self.do_print = do_print
 | 
			
		||||
        print(self.model.__class__)
 | 
			
		||||
 | 
			
		||||
    def __getattr__(self, attr):
 | 
			
		||||
| 
						 | 
				
			
			@ -2445,9 +2446,13 @@ class BenchmarkWrapper:
 | 
			
		|||
            if this_peer_finished and not synced_gpus:
 | 
			
		||||
                break
 | 
			
		||||
        
 | 
			
		||||
        if self.do_print:
 | 
			
		||||
            print(f"=========First token cost {first_token_time:.4f}s=========")
 | 
			
		||||
        if len(last_token_time) > 1:
 | 
			
		||||
            print(f"=========Rest tokens cost average {np.mean(last_token_time):.4f}s ({len(last_token_time)} tokens in all)=========")
 | 
			
		||||
            self.first_cost = first_token_time
 | 
			
		||||
            self.rest_cost_mean = np.mean(last_token_time)
 | 
			
		||||
            if self.do_print:
 | 
			
		||||
                print(f"=========Rest tokens cost average {self.rest_cost_mean:.4f}s ({len(last_token_time)} tokens in all)=========")
 | 
			
		||||
 | 
			
		||||
        if streamer is not None:
 | 
			
		||||
            streamer.end()
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										94
									
								
								python/llm/dev/benchmark/pipelines/llama2_test.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										94
									
								
								python/llm/dev/benchmark/pipelines/llama2_test.py
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,94 @@
 | 
			
		|||
#
 | 
			
		||||
# Copyright 2016 The BigDL Authors.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
#
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# this code is copied from llama2 example test, and added performance test
 | 
			
		||||
import torch
 | 
			
		||||
import time
 | 
			
		||||
import argparse
 | 
			
		||||
 | 
			
		||||
from bigdl.llm.transformers import AutoModelForCausalLM
 | 
			
		||||
from transformers import LlamaTokenizer
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
benchmark_util_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
 | 
			
		||||
import sys
 | 
			
		||||
sys.path.append(benchmark_util_path)
 | 
			
		||||
from benchmark_util import BenchmarkWrapper
 | 
			
		||||
 | 
			
		||||
# you could tune the prompt based on your own model,
 | 
			
		||||
# here the prompt tuning refers to https://huggingface.co/georgesung/llama2_7b_chat_uncensored#prompt-style
 | 
			
		||||
LLAMA2_PROMPT_FORMAT = """### HUMAN:
 | 
			
		||||
{prompt}
 | 
			
		||||
 | 
			
		||||
### RESPONSE:
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
 | 
			
		||||
    parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for Llama2 model')
 | 
			
		||||
    parser.add_argument('--repo-id-or-model-path', type=str, default="meta-llama/Llama-2-7b-chat-hf",
 | 
			
		||||
                        help='The huggingface repo id for the Llama2 (e.g. `meta-llama/Llama-2-7b-chat-hf` and `meta-llama/Llama-2-13b-chat-hf`) to be downloaded'
 | 
			
		||||
                                ', or the path to the huggingface checkpoint folder')
 | 
			
		||||
    parser.add_argument('--prompt', type=str, default="What is AI?",
 | 
			
		||||
                        help='Prompt to infer')
 | 
			
		||||
    parser.add_argument('--n-predict', type=int, default=32,
 | 
			
		||||
                        help='Max tokens to predict')
 | 
			
		||||
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
    model_path = args.repo_id_or_model_path
 | 
			
		||||
 | 
			
		||||
    # Load model in 4 bit,
 | 
			
		||||
    # which convert the relevant layers in the model into INT4 format
 | 
			
		||||
    model = AutoModelForCausalLM.from_pretrained(model_path,
 | 
			
		||||
                                                    load_in_4bit=True,
 | 
			
		||||
                                                    trust_remote_code=True)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    model = BenchmarkWrapper(model, do_print=False)
 | 
			
		||||
 | 
			
		||||
    # Load tokenizer
 | 
			
		||||
    tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
			
		||||
 | 
			
		||||
    # Generate predicted tokens
 | 
			
		||||
    with torch.inference_mode():
 | 
			
		||||
        prompt = LLAMA2_PROMPT_FORMAT.format(prompt=args.prompt)
 | 
			
		||||
        input_ids = tokenizer.encode(prompt, return_tensors="pt")
 | 
			
		||||
        st = time.time()
 | 
			
		||||
        # if your selected model is capable of utilizing previous key/value attentions
 | 
			
		||||
        # to enhance decoding speed, but has `"use_cache": false` in its model config,
 | 
			
		||||
        # it is important to set `use_cache=True` explicitly in the `generate` function
 | 
			
		||||
        # to obtain optimal performance with BigDL-LLM INT4 optimizations
 | 
			
		||||
        output = model.generate(input_ids,
 | 
			
		||||
                                max_new_tokens=args.n_predict)
 | 
			
		||||
        end = time.time()
 | 
			
		||||
        output_str = tokenizer.decode(output[0], skip_special_tokens=True)
 | 
			
		||||
        print(f'Inference time: {end-st} s')
 | 
			
		||||
        print('-'*20, 'Prompt', '-'*20)
 | 
			
		||||
        print(prompt)
 | 
			
		||||
        print('-'*20, 'Output', '-'*20)
 | 
			
		||||
        print(output_str)
 | 
			
		||||
        
 | 
			
		||||
        assert "AI is a term" in output_str, "output is not as expected, the correctness may be wrong."
 | 
			
		||||
        llama2_baseline = os.getenv('LLAMA2_BASELINE')
 | 
			
		||||
        if llama2_baseline is None:
 | 
			
		||||
            print('baseline is not set, skipping baseline validation')
 | 
			
		||||
        else:
 | 
			
		||||
            llama2_baseline = float(llama2_baseline)
 | 
			
		||||
            ratio = model.rest_cost_mean / llama2_baseline
 | 
			
		||||
            assert ratio < 1.1, f"performance did not meet baseline, the cost is {(ratio - 1) * 100}% higher than the baseline"
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										22
									
								
								python/llm/dev/benchmark/run-benchmark-tests.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								python/llm/dev/benchmark/run-benchmark-tests.sh
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,22 @@
 | 
			
		|||
# Performance tests usually use dedicated machines, see below to set env vars, e.g. model paths
 | 
			
		||||
# The following environment variables should be ready
 | 
			
		||||
# ORIGINAL_LLAMA2_PATH
 | 
			
		||||
# LLAMA2_BASELINE
 | 
			
		||||
# LLM_DIR
 | 
			
		||||
 | 
			
		||||
if [ -z "$THREAD_NUM" ]; then
 | 
			
		||||
  THREAD_NUM=2
 | 
			
		||||
fi
 | 
			
		||||
export OMP_NUM_THREADS=$THREAD_NUM
 | 
			
		||||
 | 
			
		||||
######## LLAMA2
 | 
			
		||||
# transformers
 | 
			
		||||
 | 
			
		||||
if [ ! -d $ORIGINAL_LLAMA2_PATH ]; then
 | 
			
		||||
    echo "Directory $ORIGINAL_LLAMA2_PATH not found. Downloading from FTP server..."
 | 
			
		||||
    wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/${ORIGINAL_LLAMA2_PATH:2} -P $LLM_DIR
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
echo ">>> Testing LLAMA2 transformers API"
 | 
			
		||||
taskset -c 0-$((THREAD_NUM - 1)) python python/llm/dev/benchmark/pipelines/llama2_test.py --repo-id-or-model-path $ORIGINAL_LLAMA2_PATH
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -50,7 +50,7 @@ llm_home = os.path.join(os.path.dirname(os.path.abspath(__file__)), "src")
 | 
			
		|||
github_artifact_dir = os.path.join(llm_home, '../llm-binary')
 | 
			
		||||
libs_dir = os.path.join(llm_home, "bigdl", "llm", "libs")
 | 
			
		||||
CONVERT_DEP = ['numpy >= 1.22', 'torch',
 | 
			
		||||
               'transformers >= 4.31.0', 'sentencepiece',
 | 
			
		||||
               'transformers == 4.31.0', 'sentencepiece',
 | 
			
		||||
               'accelerate', 'tabulate']
 | 
			
		||||
windows_binarys = [
 | 
			
		||||
    "llama.dll",
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue