From be00380f1a92eefed68b8fe523e99d8d9a612b67 Mon Sep 17 00:00:00 2001 From: SONG Ge <38711238+sgwhat@users.noreply.github.com> Date: Mon, 17 Jun 2024 09:29:32 +0800 Subject: [PATCH] Fix pipeline parallel inference past_key_value error in Baichuan (#11318) * fix past_key_value error * add baichuan2 example * fix style * update doc * add script link in doc * fix import error * update --- .../GPU/Pipeline-Parallel-Inference/README.md | 18 ++++++++++ .../run_baichuan2_arc_2_card.sh | 36 +++++++++++++++++++ .../run_llama_arc_2_card.sh | 1 + .../transformers/pipeline_parallel.py | 17 ++++++++- 4 files changed, 71 insertions(+), 1 deletion(-) create mode 100644 python/llm/example/GPU/Pipeline-Parallel-Inference/run_baichuan2_arc_2_card.sh diff --git a/python/llm/example/GPU/Pipeline-Parallel-Inference/README.md b/python/llm/example/GPU/Pipeline-Parallel-Inference/README.md index 2005fe0f..52919ee7 100644 --- a/python/llm/example/GPU/Pipeline-Parallel-Inference/README.md +++ b/python/llm/example/GPU/Pipeline-Parallel-Inference/README.md @@ -11,6 +11,8 @@ To run this example with IPEX-LLM on Intel GPUs, we have some recommended requir - [meta-llama/Meta-Llama-3-8B-Instruct](./run_llama_arc_2_card.sh) - [Qwen/Qwen1.5-7B-Chat](./run_qwen1.5_arc_2_card.sh) - [Qwen/Qwen1.5-14B-Chat](./run_qwen1.5_arc_2_card.sh) +- [baichuan-inc/Baichuan2-7B-Chat](./run_baichuan2_arc_2_card.sh) +- [baichuan-inc/Baichuan2-13B-Chat](./run_baichuan2_arc_2_card.sh) ## Example: Run pipeline parallel inference on multiple GPUs @@ -63,6 +65,22 @@ bash run_qwen1.5_arc_2_card.sh + + +
+ Show Baichuan2 example + +#### Run Baichuan2-7B-Chat / Baichuan2-13B-Chat on two Intel Arc A770 + +You could specify `--repo-id-or-model-path` in the test script to be the huggingface repo id for Baichuan2 to be downloaded, or the path to the huggingface checkpoint folder. Besides, you could change `NUM_GPUS` to the number of GPUs you have on your machine. + +```bash +pip install transformers==4.37.0 +bash run_baichuan2_arc_2_card.sh +``` + +
+ ### 3. Sample Output #### [meta-llama/Llama-2-13b-chat-hf](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf) ```log diff --git a/python/llm/example/GPU/Pipeline-Parallel-Inference/run_baichuan2_arc_2_card.sh b/python/llm/example/GPU/Pipeline-Parallel-Inference/run_baichuan2_arc_2_card.sh new file mode 100644 index 00000000..10eb12ea --- /dev/null +++ b/python/llm/example/GPU/Pipeline-Parallel-Inference/run_baichuan2_arc_2_card.sh @@ -0,0 +1,36 @@ +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +source /opt/intel/oneapi/setvars.sh +export MASTER_ADDR=127.0.0.1 +export MASTER_PORT=9090 +export FI_PROVIDER=tcp +export USE_XETLA=OFF +export OMP_NUM_THREADS=6 +if [[ $KERNEL_VERSION != *"6.5"* ]]; then + export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 +fi +export TORCH_LLM_ALLREDUCE=0 + +NUM_GPUS=2 # number of used GPU + +# To run Baichuan2-7B-Chat +CCL_ZE_IPC_EXCHANGE=sockets torchrun --standalone --nnodes=1 --nproc-per-node $NUM_GPUS \ + generate.py --repo-id-or-model-path 'baichuan-inc/Baichuan2-7B-Chat' --gpu-num $NUM_GPUS + +# # To run Baichuan2-13B-Chat +# CCL_ZE_IPC_EXCHANGE=sockets torchrun --standalone --nnodes=1 --nproc-per-node $NUM_GPUS \ +# generate.py --repo-id-or-model-path 'baichuan-inc/Baichuan2-13B-Chat' --gpu-num $NUM_GPUS diff --git a/python/llm/example/GPU/Pipeline-Parallel-Inference/run_llama_arc_2_card.sh b/python/llm/example/GPU/Pipeline-Parallel-Inference/run_llama_arc_2_card.sh index 7f7a467c..c6002e67 100644 --- a/python/llm/example/GPU/Pipeline-Parallel-Inference/run_llama_arc_2_card.sh +++ b/python/llm/example/GPU/Pipeline-Parallel-Inference/run_llama_arc_2_card.sh @@ -20,6 +20,7 @@ export MASTER_PORT=9090 export FI_PROVIDER=tcp export USE_XETLA=OFF export OMP_NUM_THREADS=6 +export IPEX_LLM_QUANTIZE_KV_CACHE=1 if [[ $KERNEL_VERSION != *"6.5"* ]]; then export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 fi diff --git a/python/llm/src/ipex_llm/transformers/pipeline_parallel.py b/python/llm/src/ipex_llm/transformers/pipeline_parallel.py index d750cc1b..9b7d0fa3 100644 --- a/python/llm/src/ipex_llm/transformers/pipeline_parallel.py +++ b/python/llm/src/ipex_llm/transformers/pipeline_parallel.py @@ -79,6 +79,9 @@ def pipeline_parallel(model, pipeline_parallel_stages): pipeline_parallel_stages local_rank = dist.get_rank() + + global layer_start + global layer_end layer_start = slice_size * local_rank layer_end = layer_start + min(slice_size, model.config.num_hidden_layers - layer_start) @@ -144,6 +147,9 @@ def pipeline_parallel_generate(self, pre_rank = (local_rank - 1) % self.pipeline_parallel_stages next_rank = (local_rank + 1) % self.pipeline_parallel_stages + global layer_start + global layer_end + self.first_token_time = 0 self.next_token_time = [] @@ -182,7 +188,16 @@ def pipeline_parallel_generate(self, _input_ids = next_ids output_ids = torch.cat([output_ids, next_ids], dim=-1) - _past_key_values = outputs.past_key_values + + if isinstance(outputs.past_key_values, tuple) and local_rank != 0: + value_placeholder = torch.empty_like((outputs.past_key_values)[-1][0]) + past_key_values_placeholder = tuple( + (value_placeholder, value_placeholder) for _ in range(layer_start) + ) + (outputs.past_key_values)[layer_start:] + _past_key_values = past_key_values_placeholder + else: + _past_key_values = outputs.past_key_values + toc = time.time() if step == 0: self.first_token_time = toc - tic