LLM: add benchmark script for deepspeed autotp on gpu (#10380)
This commit is contained in:
		
							parent
							
								
									f9c144dc4c
								
							
						
					
					
						commit
						5d996a5caf
					
				
					 4 changed files with 149 additions and 1 deletions
				
			
		| 
						 | 
					@ -25,5 +25,6 @@ test_api:
 | 
				
			||||||
  # - "deepspeed_transformer_int4_cpu" # on Intel SPR Server
 | 
					  # - "deepspeed_transformer_int4_cpu" # on Intel SPR Server
 | 
				
			||||||
  # - "transformer_int4_gpu_win" # on Intel GPU for Windows
 | 
					  # - "transformer_int4_gpu_win" # on Intel GPU for Windows
 | 
				
			||||||
  # - "transformer_int4_loadlowbit_gpu_win" # on Intel GPU for Windows using load_low_bit API. Please make sure you have used the save.py to save the converted low bit model
 | 
					  # - "transformer_int4_loadlowbit_gpu_win" # on Intel GPU for Windows using load_low_bit API. Please make sure you have used the save.py to save the converted low bit model
 | 
				
			||||||
 | 
					  # - "deepspeed_optimize_model_gpu" # deepspeed autotp on Intel GPU
 | 
				
			||||||
cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api)
 | 
					cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api)
 | 
				
			||||||
streaming: False # whether output in streaming way (only avaiable now for gpu win related test_api)
 | 
					streaming: False # whether output in streaming way (only avaiable now for gpu win related test_api)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										16
									
								
								python/llm/dev/benchmark/all-in-one/run-deepspeed-arc.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								python/llm/dev/benchmark/all-in-one/run-deepspeed-arc.sh
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,16 @@
 | 
				
			||||||
 | 
					export MASTER_ADDR=127.0.0.1
 | 
				
			||||||
 | 
					export FI_PROVIDER=tcp
 | 
				
			||||||
 | 
					export CCL_ATL_TRANSPORT=ofi
 | 
				
			||||||
 | 
					export CCL_ZE_IPC_EXCHANGE=sockets
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libtcmalloc.so:${LD_PRELOAD}
 | 
				
			||||||
 | 
					basekit_root=/opt/intel/oneapi
 | 
				
			||||||
 | 
					source $basekit_root/setvars.sh --force
 | 
				
			||||||
 | 
					source $basekit_root/ccl/latest/env/vars.sh --force
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					NUM_GPUS=2 # number of used GPU
 | 
				
			||||||
 | 
					export USE_XETLA=OFF
 | 
				
			||||||
 | 
					export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=2
 | 
				
			||||||
 | 
					export TORCH_LLM_ALLREDUCE=0 # Different from PVC
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					mpirun -np $NUM_GPUS --prepend-rank python run.py
 | 
				
			||||||
							
								
								
									
										16
									
								
								python/llm/dev/benchmark/all-in-one/run-deepspeed-pvc.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								python/llm/dev/benchmark/all-in-one/run-deepspeed-pvc.sh
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,16 @@
 | 
				
			||||||
 | 
					export ZE_AFFINITY_MASK="0,1" # specify the used GPU
 | 
				
			||||||
 | 
					NUM_GPUS=2 # number of used GPU
 | 
				
			||||||
 | 
					export MASTER_ADDR=127.0.0.1
 | 
				
			||||||
 | 
					export FI_PROVIDER=tcp
 | 
				
			||||||
 | 
					export CCL_ATL_TRANSPORT=ofi
 | 
				
			||||||
 | 
					export CCL_ZE_IPC_EXCHANGE=sockets
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libtcmalloc.so:${LD_PRELOAD}
 | 
				
			||||||
 | 
					basekit_root=/opt/intel/oneapi
 | 
				
			||||||
 | 
					source $basekit_root/setvars.sh --force
 | 
				
			||||||
 | 
					source $basekit_root/ccl/latest/env/vars.sh --force
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					export OMP_NUM_THREADS=$((56/$NUM_GPUS))
 | 
				
			||||||
 | 
					export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=2
 | 
				
			||||||
 | 
					export TORCH_LLM_ALLREDUCE=1
 | 
				
			||||||
 | 
					mpirun -np $NUM_GPUS --prepend-rank python run.py
 | 
				
			||||||
| 
						 | 
					@ -37,7 +37,7 @@ from bigdl.llm.utils.common.log4Error import invalidInputError
 | 
				
			||||||
LLAMA_IDS = ['meta-llama/Llama-2-7b-chat-hf','meta-llama/Llama-2-13b-chat-hf',
 | 
					LLAMA_IDS = ['meta-llama/Llama-2-7b-chat-hf','meta-llama/Llama-2-13b-chat-hf',
 | 
				
			||||||
             'meta-llama/Llama-2-70b-chat-hf','decapoda-research/llama-7b-hf',
 | 
					             'meta-llama/Llama-2-70b-chat-hf','decapoda-research/llama-7b-hf',
 | 
				
			||||||
             'decapoda-research/llama-65b-hf','lmsys/vicuna-7b-v1.5',
 | 
					             'decapoda-research/llama-65b-hf','lmsys/vicuna-7b-v1.5',
 | 
				
			||||||
             'lmsys/vicuna-13b-v1.3','project-baize/merged-baize-30b']
 | 
					             'lmsys/vicuna-13b-v1.3','lmsys/vicuna-33b-v1.3','project-baize/merged-baize-30b']
 | 
				
			||||||
 | 
					
 | 
				
			||||||
CHATGLM_IDS = ['THUDM/chatglm-6b', 'THUDM/chatglm2-6b', 'THUDM/chatglm3-6b']
 | 
					CHATGLM_IDS = ['THUDM/chatglm-6b', 'THUDM/chatglm2-6b', 'THUDM/chatglm3-6b']
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -92,6 +92,8 @@ def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1,
 | 
				
			||||||
        result = run_transformer_int4_loadlowbit_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming)
 | 
					        result = run_transformer_int4_loadlowbit_gpu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, cpu_embedding, batch_size, streaming)
 | 
				
			||||||
    elif test_api == 'transformer_autocast_bf16':
 | 
					    elif test_api == 'transformer_autocast_bf16':
 | 
				
			||||||
        result = run_transformer_autocast_bf16(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, batch_size)
 | 
					        result = run_transformer_autocast_bf16(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, batch_size)
 | 
				
			||||||
 | 
					    elif test_api == 'deepspeed_optimize_model_gpu':
 | 
				
			||||||
 | 
					        result = run_deepspeed_optimize_model_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    for in_out_pair in in_out_pairs:
 | 
					    for in_out_pair in in_out_pairs:
 | 
				
			||||||
        if result and result[in_out_pair]:
 | 
					        if result and result[in_out_pair]:
 | 
				
			||||||
| 
						 | 
					@ -1077,6 +1079,119 @@ def run_transformer_autocast_bf16( repo_id,
 | 
				
			||||||
                                          actual_in_len, actual_out_len, load_time])
 | 
					                                          actual_in_len, actual_out_len, load_time])
 | 
				
			||||||
    return result
 | 
					    return result
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					def run_deepspeed_optimize_model_gpu(repo_id,
 | 
				
			||||||
 | 
					                                     local_model_hub,
 | 
				
			||||||
 | 
					                                     in_out_pairs,
 | 
				
			||||||
 | 
					                                     warm_up,
 | 
				
			||||||
 | 
					                                     num_trials,
 | 
				
			||||||
 | 
					                                     num_beams,
 | 
				
			||||||
 | 
					                                     low_bit,
 | 
				
			||||||
 | 
					                                     batch_size):
 | 
				
			||||||
 | 
					    def get_int_from_env(env_keys, default):
 | 
				
			||||||
 | 
					        for e in env_keys:
 | 
				
			||||||
 | 
					            val = int(os.environ.get(e, -1))
 | 
				
			||||||
 | 
					            if val >= 0:
 | 
				
			||||||
 | 
					                return val
 | 
				
			||||||
 | 
					        return int(default)
 | 
				
			||||||
 | 
					    local_rank = get_int_from_env(["LOCAL_RANK","PMI_RANK"], "0")
 | 
				
			||||||
 | 
					    world_size = get_int_from_env(["WORLD_SIZE","PMI_SIZE"], "1")
 | 
				
			||||||
 | 
					    os.environ["RANK"] = str(local_rank)
 | 
				
			||||||
 | 
					    os.environ["WORLD_SIZE"] = str(world_size)
 | 
				
			||||||
 | 
					    os.environ["MASTER_PORT"] = os.environ.get("MASTER_PORT", "29500")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer, GPTJForCausalLM, LlamaTokenizer
 | 
				
			||||||
 | 
					    from bigdl.llm import optimize_model
 | 
				
			||||||
 | 
					    import intel_extension_for_pytorch as ipex
 | 
				
			||||||
 | 
					    import deepspeed
 | 
				
			||||||
 | 
					    from deepspeed.accelerator.cpu_accelerator import CPU_Accelerator
 | 
				
			||||||
 | 
					    from deepspeed.accelerator import set_accelerator, get_accelerator
 | 
				
			||||||
 | 
					    from intel_extension_for_deepspeed import XPU_Accelerator
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    model_path = get_model_path(repo_id, local_model_hub)
 | 
				
			||||||
 | 
					    print('model_path:', model_path)
 | 
				
			||||||
 | 
					    # First use CPU as accelerator
 | 
				
			||||||
 | 
					    # Convert to deepspeed model and apply bigdl-llm optimization on CPU to decrease GPU memory usage
 | 
				
			||||||
 | 
					    current_accel = CPU_Accelerator()
 | 
				
			||||||
 | 
					    set_accelerator(current_accel)
 | 
				
			||||||
 | 
					    st = time.perf_counter()
 | 
				
			||||||
 | 
					    if repo_id in CHATGLM_IDS:
 | 
				
			||||||
 | 
					        model = AutoModel.from_pretrained(model_path, device_map={"": "cpu"}, low_cpu_mem_usage=True,
 | 
				
			||||||
 | 
					                                          torch_dtype=torch.float16, trust_remote_code=True, use_cache=True).eval()
 | 
				
			||||||
 | 
					        tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
				
			||||||
 | 
					    elif repo_id in LLAMA_IDS:
 | 
				
			||||||
 | 
					        model = AutoModelForCausalLM.from_pretrained(model_path, device_map={"": "cpu"}, low_cpu_mem_usage=True,
 | 
				
			||||||
 | 
					                                                     torch_dtype=torch.float16, trust_remote_code=True, use_cache=True).eval()
 | 
				
			||||||
 | 
					        tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
				
			||||||
 | 
					    else:
 | 
				
			||||||
 | 
					        model = AutoModelForCausalLM.from_pretrained(model_path, device_map={"": "cpu"}, low_cpu_mem_usage=True,
 | 
				
			||||||
 | 
					                                                     torch_dtype=torch.float16, trust_remote_code=True, use_cache=True).eval()
 | 
				
			||||||
 | 
					        tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
 | 
				
			||||||
 | 
					    model = deepspeed.init_inference(model, mp_size=world_size,
 | 
				
			||||||
 | 
					                                     dtype=torch.float16, replace_method="auto",)
 | 
				
			||||||
 | 
					    end = time.perf_counter()
 | 
				
			||||||
 | 
					    load_time = end - st
 | 
				
			||||||
 | 
					    print(">> loading of model costs {}s".format(load_time))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    # Use bigdl-llm `optimize_model` to convert the model into optimized low bit format
 | 
				
			||||||
 | 
					    # Convert the rest of the model into float16 to reduce allreduce traffic
 | 
				
			||||||
 | 
					    model = optimize_model(model.module.to(f'cpu'), low_bit=low_bit).to(torch.float16)
 | 
				
			||||||
 | 
					    # Next, use XPU as accelerator to speed up inference
 | 
				
			||||||
 | 
					    current_accel = XPU_Accelerator()
 | 
				
			||||||
 | 
					    set_accelerator(current_accel)
 | 
				
			||||||
 | 
					    # Move model back to xpu
 | 
				
			||||||
 | 
					    model = model.to(f'xpu:{local_rank}')
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    # Modify backend related settings 
 | 
				
			||||||
 | 
					    if world_size > 1:
 | 
				
			||||||
 | 
					        get_accelerator().set_device(local_rank)
 | 
				
			||||||
 | 
					    dist_backend = get_accelerator().communication_backend_name()
 | 
				
			||||||
 | 
					    import deepspeed.comm.comm
 | 
				
			||||||
 | 
					    deepspeed.comm.comm.cdb = None
 | 
				
			||||||
 | 
					    from deepspeed.comm.comm import init_distributed
 | 
				
			||||||
 | 
					    init_distributed()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    model = BenchmarkWrapper(model)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    result = {}
 | 
				
			||||||
 | 
					    with torch.inference_mode():
 | 
				
			||||||
 | 
					        for in_out in in_out_pairs:
 | 
				
			||||||
 | 
					            in_out_len = in_out.split("-")
 | 
				
			||||||
 | 
					            in_len = int(in_out_len[0])
 | 
				
			||||||
 | 
					            out_len = int(in_out_len[1])
 | 
				
			||||||
 | 
					            # As different tokenizer has different encodings,
 | 
				
			||||||
 | 
					            # in_len.txt maybe shorter than we need,
 | 
				
			||||||
 | 
					            # use much longer context to make sure input length
 | 
				
			||||||
 | 
					            test_length = min(in_len*2, 8192)
 | 
				
			||||||
 | 
					            while test_length not in [32, 256, 1024, 2048, 8192]:
 | 
				
			||||||
 | 
					                test_length = test_length * 2
 | 
				
			||||||
 | 
					            input_str = open(f"prompt/{test_length}.txt", 'r').read()
 | 
				
			||||||
 | 
					            # As different tokenizer has different encodings,
 | 
				
			||||||
 | 
					            # slice the input_ids to ensure the prompt length is required length.
 | 
				
			||||||
 | 
					            input_ids = tokenizer.encode(input_str, return_tensors="pt")
 | 
				
			||||||
 | 
					            input_ids = input_ids[:, :in_len]
 | 
				
			||||||
 | 
					            true_str = tokenizer.batch_decode(input_ids)[0]
 | 
				
			||||||
 | 
					            input_list = [true_str] * batch_size
 | 
				
			||||||
 | 
					            input_ids = tokenizer(input_list, return_tensors="pt").input_ids.to(f'xpu:{local_rank}')
 | 
				
			||||||
 | 
					            actual_in_len = input_ids.shape[1]
 | 
				
			||||||
 | 
					            result[in_out] = []
 | 
				
			||||||
 | 
					            for i in range(num_trials + warm_up):
 | 
				
			||||||
 | 
					                st = time.perf_counter()
 | 
				
			||||||
 | 
					                output_ids = model.generate(input_ids, do_sample=False, max_new_tokens=out_len,
 | 
				
			||||||
 | 
					                                            num_beams=num_beams)
 | 
				
			||||||
 | 
					                torch.xpu.synchronize()
 | 
				
			||||||
 | 
					                end = time.perf_counter()
 | 
				
			||||||
 | 
					                output_ids = output_ids.cpu()
 | 
				
			||||||
 | 
					                print("model generate cost: " + str(end - st))
 | 
				
			||||||
 | 
					                output = tokenizer.batch_decode(output_ids)
 | 
				
			||||||
 | 
					                actual_out_len = output_ids.shape[1] - actual_in_len
 | 
				
			||||||
 | 
					                print(output[0])
 | 
				
			||||||
 | 
					                if i >= warm_up:
 | 
				
			||||||
 | 
					                    result[in_out].append([model.first_cost, model.rest_cost_mean, model.encoder_time,
 | 
				
			||||||
 | 
					                                           actual_in_len, actual_out_len, load_time])
 | 
				
			||||||
 | 
					    del model
 | 
				
			||||||
 | 
					    torch.xpu.empty_cache()
 | 
				
			||||||
 | 
					    return result
 | 
				
			||||||
 | 
					
 | 
				
			||||||
if __name__ == '__main__':
 | 
					if __name__ == '__main__':
 | 
				
			||||||
    from omegaconf import OmegaConf
 | 
					    from omegaconf import OmegaConf
 | 
				
			||||||
    conf = OmegaConf.load(f'{current_dir}/config.yaml')
 | 
					    conf = OmegaConf.load(f'{current_dir}/config.yaml')
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue