Fix pipeline parallel inference past_key_value error in Baichuan (#11318)
* fix past_key_value error * add baichuan2 example * fix style * update doc * add script link in doc * fix import error * update
This commit is contained in:
		
							parent
							
								
									0af0102e61
								
							
						
					
					
						commit
						be00380f1a
					
				
					 4 changed files with 71 additions and 1 deletions
				
			
		| 
						 | 
					@ -11,6 +11,8 @@ To run this example with IPEX-LLM on Intel GPUs, we have some recommended requir
 | 
				
			||||||
- [meta-llama/Meta-Llama-3-8B-Instruct](./run_llama_arc_2_card.sh)
 | 
					- [meta-llama/Meta-Llama-3-8B-Instruct](./run_llama_arc_2_card.sh)
 | 
				
			||||||
- [Qwen/Qwen1.5-7B-Chat](./run_qwen1.5_arc_2_card.sh)
 | 
					- [Qwen/Qwen1.5-7B-Chat](./run_qwen1.5_arc_2_card.sh)
 | 
				
			||||||
- [Qwen/Qwen1.5-14B-Chat](./run_qwen1.5_arc_2_card.sh)
 | 
					- [Qwen/Qwen1.5-14B-Chat](./run_qwen1.5_arc_2_card.sh)
 | 
				
			||||||
 | 
					- [baichuan-inc/Baichuan2-7B-Chat](./run_baichuan2_arc_2_card.sh)
 | 
				
			||||||
 | 
					- [baichuan-inc/Baichuan2-13B-Chat](./run_baichuan2_arc_2_card.sh)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
## Example: Run pipeline parallel inference on multiple GPUs
 | 
					## Example: Run pipeline parallel inference on multiple GPUs
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -63,6 +65,22 @@ bash run_qwen1.5_arc_2_card.sh
 | 
				
			||||||
 | 
					
 | 
				
			||||||
</details>
 | 
					</details>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					</details>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					<details>
 | 
				
			||||||
 | 
					  <summary> Show Baichuan2 example </summary>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#### Run Baichuan2-7B-Chat / Baichuan2-13B-Chat on two Intel Arc A770
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					You could specify `--repo-id-or-model-path` in the test script to be the huggingface repo id for Baichuan2 to be downloaded, or the path to the huggingface checkpoint folder. Besides, you could change `NUM_GPUS` to the number of GPUs you have on your machine.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```bash
 | 
				
			||||||
 | 
					pip install transformers==4.37.0
 | 
				
			||||||
 | 
					bash run_baichuan2_arc_2_card.sh
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					</details>
 | 
				
			||||||
 | 
					
 | 
				
			||||||
### 3. Sample Output
 | 
					### 3. Sample Output
 | 
				
			||||||
#### [meta-llama/Llama-2-13b-chat-hf](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf)
 | 
					#### [meta-llama/Llama-2-13b-chat-hf](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf)
 | 
				
			||||||
```log
 | 
					```log
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -0,0 +1,36 @@
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Copyright 2016 The BigDL Authors.
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Licensed under the Apache License, Version 2.0 (the "License");
 | 
				
			||||||
 | 
					# you may not use this file except in compliance with the License.
 | 
				
			||||||
 | 
					# You may obtain a copy of the License at
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					#     http://www.apache.org/licenses/LICENSE-2.0
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Unless required by applicable law or agreed to in writing, software
 | 
				
			||||||
 | 
					# distributed under the License is distributed on an "AS IS" BASIS,
 | 
				
			||||||
 | 
					# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
				
			||||||
 | 
					# See the License for the specific language governing permissions and
 | 
				
			||||||
 | 
					# limitations under the License.
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					source /opt/intel/oneapi/setvars.sh
 | 
				
			||||||
 | 
					export MASTER_ADDR=127.0.0.1
 | 
				
			||||||
 | 
					export MASTER_PORT=9090
 | 
				
			||||||
 | 
					export FI_PROVIDER=tcp
 | 
				
			||||||
 | 
					export USE_XETLA=OFF
 | 
				
			||||||
 | 
					export OMP_NUM_THREADS=6
 | 
				
			||||||
 | 
					if [[ $KERNEL_VERSION != *"6.5"* ]]; then
 | 
				
			||||||
 | 
					    export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
 | 
				
			||||||
 | 
					fi
 | 
				
			||||||
 | 
					export TORCH_LLM_ALLREDUCE=0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					NUM_GPUS=2 # number of used GPU
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# To run Baichuan2-7B-Chat
 | 
				
			||||||
 | 
					CCL_ZE_IPC_EXCHANGE=sockets torchrun --standalone --nnodes=1 --nproc-per-node $NUM_GPUS \
 | 
				
			||||||
 | 
					    generate.py --repo-id-or-model-path 'baichuan-inc/Baichuan2-7B-Chat' --gpu-num $NUM_GPUS
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# # To run Baichuan2-13B-Chat
 | 
				
			||||||
 | 
					# CCL_ZE_IPC_EXCHANGE=sockets torchrun --standalone --nnodes=1 --nproc-per-node $NUM_GPUS \
 | 
				
			||||||
 | 
					#     generate.py --repo-id-or-model-path 'baichuan-inc/Baichuan2-13B-Chat' --gpu-num $NUM_GPUS
 | 
				
			||||||
| 
						 | 
					@ -20,6 +20,7 @@ export MASTER_PORT=9090
 | 
				
			||||||
export FI_PROVIDER=tcp
 | 
					export FI_PROVIDER=tcp
 | 
				
			||||||
export USE_XETLA=OFF
 | 
					export USE_XETLA=OFF
 | 
				
			||||||
export OMP_NUM_THREADS=6
 | 
					export OMP_NUM_THREADS=6
 | 
				
			||||||
 | 
					export IPEX_LLM_QUANTIZE_KV_CACHE=1
 | 
				
			||||||
if [[ $KERNEL_VERSION != *"6.5"* ]]; then
 | 
					if [[ $KERNEL_VERSION != *"6.5"* ]]; then
 | 
				
			||||||
    export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
 | 
					    export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1
 | 
				
			||||||
fi
 | 
					fi
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -79,6 +79,9 @@ def pipeline_parallel(model, pipeline_parallel_stages):
 | 
				
			||||||
        pipeline_parallel_stages
 | 
					        pipeline_parallel_stages
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    local_rank = dist.get_rank()
 | 
					    local_rank = dist.get_rank()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    global layer_start
 | 
				
			||||||
 | 
					    global layer_end
 | 
				
			||||||
    layer_start = slice_size * local_rank
 | 
					    layer_start = slice_size * local_rank
 | 
				
			||||||
    layer_end = layer_start + min(slice_size, model.config.num_hidden_layers - layer_start)
 | 
					    layer_end = layer_start + min(slice_size, model.config.num_hidden_layers - layer_start)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -144,6 +147,9 @@ def pipeline_parallel_generate(self,
 | 
				
			||||||
    pre_rank = (local_rank - 1) % self.pipeline_parallel_stages
 | 
					    pre_rank = (local_rank - 1) % self.pipeline_parallel_stages
 | 
				
			||||||
    next_rank = (local_rank + 1) % self.pipeline_parallel_stages
 | 
					    next_rank = (local_rank + 1) % self.pipeline_parallel_stages
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    global layer_start
 | 
				
			||||||
 | 
					    global layer_end
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    self.first_token_time = 0
 | 
					    self.first_token_time = 0
 | 
				
			||||||
    self.next_token_time = []
 | 
					    self.next_token_time = []
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -182,7 +188,16 @@ def pipeline_parallel_generate(self,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        _input_ids = next_ids
 | 
					        _input_ids = next_ids
 | 
				
			||||||
        output_ids = torch.cat([output_ids, next_ids], dim=-1)
 | 
					        output_ids = torch.cat([output_ids, next_ids], dim=-1)
 | 
				
			||||||
        _past_key_values = outputs.past_key_values
 | 
					
 | 
				
			||||||
 | 
					        if isinstance(outputs.past_key_values, tuple) and local_rank != 0:
 | 
				
			||||||
 | 
					            value_placeholder = torch.empty_like((outputs.past_key_values)[-1][0])
 | 
				
			||||||
 | 
					            past_key_values_placeholder = tuple(
 | 
				
			||||||
 | 
					                (value_placeholder, value_placeholder) for _ in range(layer_start)
 | 
				
			||||||
 | 
					            ) + (outputs.past_key_values)[layer_start:]
 | 
				
			||||||
 | 
					            _past_key_values = past_key_values_placeholder
 | 
				
			||||||
 | 
					        else:
 | 
				
			||||||
 | 
					            _past_key_values = outputs.past_key_values
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        toc = time.time()
 | 
					        toc = time.time()
 | 
				
			||||||
        if step == 0:
 | 
					        if step == 0:
 | 
				
			||||||
            self.first_token_time = toc - tic
 | 
					            self.first_token_time = toc - tic
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue