Add Deepspeed Example of FLEX Mistral (#10640)
This commit is contained in:
parent
d18dbfb097
commit
6000241b10
2 changed files with 36 additions and 0 deletions
|
|
@ -0,0 +1,33 @@
|
|||
#
|
||||
# Copyright 2016 The BigDL Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
export MASTER_ADDR=127.0.0.1
|
||||
export FI_PROVIDER=tcp
|
||||
export CCL_ATL_TRANSPORT=ofi
|
||||
export CCL_ZE_IPC_EXCHANGE=sockets
|
||||
|
||||
export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libtcmalloc.so:${LD_PRELOAD}
|
||||
basekit_root=/opt/intel/oneapi
|
||||
source $basekit_root/setvars.sh --force
|
||||
source $basekit_root/ccl/latest/env/vars.sh --force
|
||||
|
||||
NUM_GPUS=2 # number of used GPU
|
||||
export USE_XETLA=OFF
|
||||
export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=2
|
||||
export TORCH_LLM_ALLREDUCE=0 # Different from PVC
|
||||
|
||||
mpirun -np $NUM_GPUS --prepend-rank \
|
||||
python deepspeed_autotp.py --repo-id-or-model-path 'mistralai/Mistral-7B-Instruct-v0.1' --low-bit 'sym_int4'
|
||||
|
|
@ -132,3 +132,6 @@ Inference time: xxxx s
|
|||
|
||||
[INST] Artificial Intelligence (AI) is a branch of computer science that deals with the simulation of intelligent behavior in computers. It is a broad
|
||||
```
|
||||
|
||||
#### 2.4 Deepspeed Tensor Parallel Inference
|
||||
For platforms without sufficient GPU memory to serve Mistral on one signle GPU card, **Deepspeed Tensor Parallel** can help to scale the inference to multiple cards, which enables large model and increases resource utilization. Please to refer to [here](../../../Deepspeed-AutoTP).
|
||||
|
|
|
|||
Loading…
Reference in a new issue