[LLM]Windows unittest (#8356)

* win-unittest

* update

* update

* try llama 7b

* delete llama

* update

* add red-3b

* only test red-3b

* revert

* add langchain

* add dependency

* delete langchain
This commit is contained in:
Yina Chen 2023-06-29 14:03:12 +08:00 committed by GitHub
parent 783aea3309
commit 6251ad8934
3 changed files with 95 additions and 0 deletions

View file

@ -60,3 +60,15 @@ jobs:
bash python/llm/test/run-llm-install-tests.sh
env:
ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
- name: Run LLM unittests
shell: bash
run: |
bash python/llm/test/run-llm-windows-tests.sh
env:
ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
- name: Clean up
shell: bash
run: |
rm -rf models

View file

@ -0,0 +1,46 @@
#!/bin/bash
set -e
export HF_TOKEN=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
export ANALYTICS_ZOO_ROOT=${ANALYTICS_ZOO_ROOT}
export LLM_HOME=${ANALYTICS_ZOO_ROOT}/python/llm/src
export BLOOM_ORIGIN_PATH=${ANALYTICS_ZOO_ROOT}/models/bloom-560m
# export LLAMA_ORIGIN_PATH=${ANALYTICS_ZOO_ROOT}/models/llama-7b-hf
export GPTNEOX_ORIGIN_PATH=${ANALYTICS_ZOO_ROOT}/models/redpajama-3b
export INT4_CKPT_DIR=${ANALYTICS_ZOO_ROOT}/models/converted_models
# export LLAMA_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_llama_7b_q4_0.bin
export GPTNEOX_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_redpajama_q4_0.bin
export BLOOM_INT4_CKPT_PATH=${INT4_CKPT_DIR}/bigdl_llm_bloom_q4_0.bin
echo "# Download the models"
start=$(date "+%s")
echo ${ANALYTICS_ZOO_ROOT}
python ${ANALYTICS_ZOO_ROOT}/python/llm/test/win/download_from_huggingface.py
now=$(date "+%s")
time=$((now-start))
echo "Models downloaded in:$time seconds"
echo "# Start testing convert model"
start=$(date "+%s")
python -m pytest -s ${ANALYTICS_ZOO_ROOT}/python/llm/test/convert/test_convert_model.py -k 'test_convert_bloom'
now=$(date "+%s")
time=$((now-start))
echo "Bigdl-llm convert model test finished"
echo "Time used:$time seconds"
echo "# Start testing inference"
start=$(date "+%s")
python -m pytest -s ${ANALYTICS_ZOO_ROOT}/python/llm/test/inference/test_call_models.py -k 'test_bloom_completion_success or test_bloom_completion_with_stream_success'
now=$(date "+%s")
time=$((now-start))
echo "Bigdl-llm inference test finished"
echo "Time used:$time seconds"

View file

@ -0,0 +1,37 @@
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from huggingface_hub import login
from huggingface_hub import snapshot_download
if __name__ == '__main__':
access_token_read = os.environ.get('HF_TOKEN')
login(token = access_token_read)
# Download bigscience/bloom-560m
snapshot_download(repo_id="bigscience/bloom-560m", local_dir="models/bloom-560m",
local_dir_use_symlinks=False, ignore_patterns="*.safetensors")
# # Download decapoda-research/llama-7b-hf
# snapshot_download(repo_id="decapoda-research/llama-7b-hf", local_dir="models/llama-7b-hf",
# local_dir_use_symlinks=False, ignore_patterns="*.safetensors")
# Download togethercomputer/RedPajama-INCITE-Chat-3B-v1
# snapshot_download(repo_id="togethercomputer/RedPajama-INCITE-Chat-3B-v1", local_dir="models/redpajama-3b",
# local_dir_use_symlinks=False, ignore_patterns="*.safetensors")