[LLM] Add current Linux UT inference tests to nightly tests (#8578)

* Add current inference uts to nightly tests

* Change test model from chatglm-6b to chatglm2-6b

* Add thread num env variable for nightly test

* Fix urls

* Small fix
This commit is contained in:
Yuwen Hu 2023-07-21 13:26:38 +08:00 committed by GitHub
parent feb3af0567
commit bbde423349
3 changed files with 93 additions and 11 deletions

View file

@ -31,6 +31,7 @@ jobs:
GPTNEOX_ORIGIN_PATH: ./llm/models/gptneox-7b-redpajama-bf16
BLOOM_ORIGIN_PATH: ./llm/models/bloomz-7b1
STARCODER_ORIGIN_PATH: ./llm/models/gpt_bigcode-santacoder
INT4_CKPT_DIR: ./llm/ggml-actions/nightly
LLAMA_INT4_CKPT_PATH: ./llm/ggml-actions/nightly/bigdl_llm_llama_q4_0.bin
GPTNEOX_INT4_CKPT_PATH: ./llm/ggml-actions/nightly/bigdl_llm_gptneox_q4_0.bin
@ -73,3 +74,84 @@ jobs:
uses: ./.github/actions/llm/remove-llm-env
env:
ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
llm-nightly-inference-test-avx512:
runs-on: [ self-hosted, llm, AVX512, ubuntu-20.04-lts ]
needs: llm-cpp-build
strategy:
fail-fast: false
matrix:
python-version: ["3.9"]
env:
INT4_CKPT_DIR: ./llm/ggml-actions/stable
LLAMA_INT4_CKPT_PATH: ./llm/ggml-actions/stable/bigdl_llm_llama_7b_q4_0.bin
GPTNEOX_INT4_CKPT_PATH: ./llm/ggml-actions/stable/bigdl_llm_redpajama_7b_q4_0.bin
BLOOM_INT4_CKPT_PATH: ./llm/ggml-actions/stable/bigdl_llm_bloom_7b_q4_0.bin
STARCODER_INT4_CKPT_PATH: ./llm/ggml-actions/stable/bigdl_llm_santacoder_1b_q4_0.bin
LLM_DIR: ./llm
ORIGINAL_CHATGLM2_6B_PATH: ./llm/chatglm2-6b/
THREAD_NUM: 24
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade setuptools==58.0.4
python -m pip install --upgrade wheel
- name: Download llm binary
uses: ./.github/actions/llm/download-llm-binary
- name: Install BigDL-LLM
uses: ./.github/actions/llm/setup-llm-env
env:
ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
- name: Download ckpt & original models
run: |
if [ ! -d $LLAMA_INT4_CKPT_PATH ]; then
echo "Directory $LLAMA_INT4_CKPT_PATH not found. Downloading from FTP server..."
wget --no-verbose $LLM_FTP_URL/${LLAMA_INT4_CKPT_PATH:2} -P $INT4_CKPT_DIR
fi
if [ ! -d $GPTNEOX_INT4_CKPT_PATH ]; then
echo "Directory $GPTNEOX_INT4_CKPT_PATH not found. Downloading from FTP server..."
wget --no-verbose $LLM_FTP_URL/${GPTNEOX_INT4_CKPT_PATH:2} -P $INT4_CKPT_DIR
fi
if [ ! -d $BLOOM_INT4_CKPT_PATH ]; then
echo "Directory $BLOOM_INT4_CKPT_PATH not found. Downloading from FTP server..."
wget --no-verbose $LLM_FTP_URL/${BLOOM_INT4_CKPT_PATH:2} -P $INT4_CKPT_DIR
fi
if [ ! -d $STARCODER_INT4_CKPT_PATH ]; then
echo "Directory $STARCODER_INT4_CKPT_PATH not found. Downloading from FTP server..."
wget --no-verbose $LLM_FTP_URL/${STARCODER_INT4_CKPT_PATH:2} -P $INT4_CKPT_DIR
fi
if [ ! -d $ORIGINAL_CHATGLM2_6B_PATH ]; then
echo "Directory $ORIGINAL_CHATGLM2_6B_PATH not found. Downloading from FTP server..."
wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/${ORIGINAL_CHATGLM2_6B_PATH:2} -P $LLM_DIR
fi
- name: Run LLM cli test
uses: ./.github/actions/llm/cli-test
env:
ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
- name: Run LLM inference test
uses: ./.github/actions/llm/inference-test
env:
ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
- name: Run LLM langchain test
uses: ./.github/actions/llm/langchain-test
env:
ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
- name: Clean up test environment
uses: ./.github/actions/llm/remove-llm-env
env:
ANALYTICS_ZOO_ROOT: ${{ github.workspace }}

View file

@ -42,7 +42,7 @@ env:
STARCODER_INT4_CKPT_PATH: ./llm/ggml-actions/stable/bigdl_llm_santacoder_1b_q4_0.bin
LLM_DIR: ./llm
ORIGINAL_CHATGLM_6B_PATH: ./llm/chatglm-6b/
ORIGINAL_CHATGLM2_6B_PATH: ./llm/chatglm2-6b/
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
@ -77,27 +77,27 @@ jobs:
env:
ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
- name: Download ckpt models
- name: Download ckpt & original models
run: |
if [ ! -d $LLAMA_INT4_CKPT_PATH ]; then
echo "Directory $LLAMA_INT4_CKPT_PATH not found. Downloading from FTP server..."
wget --no-verbose $LLM_FTP_URL/${LLAMA_INT4_CKPT_PATH:1} -P $INT4_CKPT_DIR
wget --no-verbose $LLM_FTP_URL/${LLAMA_INT4_CKPT_PATH:2} -P $INT4_CKPT_DIR
fi
if [ ! -d $GPTNEOX_INT4_CKPT_PATH ]; then
echo "Directory $GPTNEOX_INT4_CKPT_PATH not found. Downloading from FTP server..."
wget --no-verbose $LLM_FTP_URL/${GPTNEOX_INT4_CKPT_PATH:1} -P $INT4_CKPT_DIR
wget --no-verbose $LLM_FTP_URL/${GPTNEOX_INT4_CKPT_PATH:2} -P $INT4_CKPT_DIR
fi
if [ ! -d $BLOOM_INT4_CKPT_PATH ]; then
echo "Directory $BLOOM_INT4_CKPT_PATH not found. Downloading from FTP server..."
wget --no-verbose $LLM_FTP_URL/${BLOOM_INT4_CKPT_PATH:1} -P $INT4_CKPT_DIR
wget --no-verbose $LLM_FTP_URL/${BLOOM_INT4_CKPT_PATH:2} -P $INT4_CKPT_DIR
fi
if [ ! -d $STARCODER_INT4_CKPT_PATH ]; then
echo "Directory $STARCODER_INT4_CKPT_PATH not found. Downloading from FTP server..."
wget --no-verbose $LLM_FTP_URL/${STARCODER_INT4_CKPT_PATH:1} -P $INT4_CKPT_DIR
wget --no-verbose $LLM_FTP_URL/${STARCODER_INT4_CKPT_PATH:2} -P $INT4_CKPT_DIR
fi
if [ ! -d $ORIGINAL_CHATGLM_6B_PATH ]; then
echo "Directory $ORIGINAL_CHATGLM_6B_PATH not found. Downloading from FTP server..."
wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/${ORIGINAL_CHATGLM_6B_PATH:1} -P $LLM_DIR
if [ ! -d $ORIGINAL_CHATGLM2_6B_PATH ]; then
echo "Directory $ORIGINAL_CHATGLM2_6B_PATH not found. Downloading from FTP server..."
wget -r -nH --no-verbose --cut-dirs=1 $LLM_FTP_URL/${ORIGINAL_CHATGLM2_6B_PATH:2} -P $LLM_DIR
fi
- name: Run LLM cli test

View file

@ -33,7 +33,7 @@ class TestTransformersAPI(unittest.TestCase):
self.n_threads = 2
def test_transformers_int4(self):
model_path = os.environ.get('ORIGINAL_CHATGLM_6B_PATH')
model_path = os.environ.get('ORIGINAL_CHATGLM2_6B_PATH')
model = AutoModel.from_pretrained(model_path, trust_remote_code=True, load_in_4bit=True)
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
input_str = "晚上睡不着应该怎么办"