diff --git a/.github/workflows/llm_unit_tests.yml b/.github/workflows/llm_unit_tests.yml index 69a91566..905ba5a3 100644 --- a/.github/workflows/llm_unit_tests.yml +++ b/.github/workflows/llm_unit_tests.yml @@ -76,6 +76,7 @@ jobs: shell: bash run: | echo "SPEECH_DATASET_PATH=${DATASET_DIR}/librispeech_asr_dummy" >> "$GITHUB_ENV" + echo "COMMON_VOICE_PATH=${DATASET_DIR}/common_voice" >> "$GITHUB_ENV" echo "LLAMA_ORIGIN_PATH=${ORIGIN_DIR}/llama-7b-hf" >> "$GITHUB_ENV" echo "BLOOM_ORIGIN_PATH=${ORIGIN_DIR}/bloom-7b1" >> "$GITHUB_ENV" @@ -160,6 +161,11 @@ jobs: echo "wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/librispeech_asr_dummy -P $DATASET_DIR" wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/librispeech_asr_dummy -P $DATASET_DIR fi + if [ ! -d $COMMON_VOICE_PATH ]; then + echo "Directory $COMMON_VOICE_PATH not found. Downloading from FTP server..." + echo "wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/common_voice -P $DATASET_DIR" + wget -r -nH --no-verbose --cut-dirs=2 $LLM_FTP_URL/llm/datasets/common_voice -P $DATASET_DIR + fi - name: Run LLM cli test (Linux) if: runner.os == 'Linux' @@ -167,13 +173,11 @@ jobs: - name: Run LLM cli test (Windows) if: runner.os == 'Windows' uses: ./.github/actions/llm/cli-test-windows - - name: Run LLM inference test shell: bash run: | - python -m pip install einops datasets librosa + python -m pip install einops datasets librosa openai-whisper bash python/llm/test/run-llm-inference-tests.sh - - name: Run LLM langchain test shell: bash run: | diff --git a/python/llm/src/bigdl/llm/__init__.py b/python/llm/src/bigdl/llm/__init__.py index a9c71a16..815be311 100644 --- a/python/llm/src/bigdl/llm/__init__.py +++ b/python/llm/src/bigdl/llm/__init__.py @@ -20,3 +20,4 @@ # only search the first bigdl package and end up finding only one sub-package. from .convert_model import llm_convert +from .optimize import optimize_model diff --git a/python/llm/src/bigdl/llm/optimize.py b/python/llm/src/bigdl/llm/optimize.py new file mode 100644 index 00000000..334ad0e9 --- /dev/null +++ b/python/llm/src/bigdl/llm/optimize.py @@ -0,0 +1,37 @@ +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .transformers import ggml_convert_quant +from bigdl.llm.ggml.quantize import ggml_tensor_qtype +from bigdl.llm.utils.common import invalidInputError + + +def optimize_model(model, low_bit='sym_int4', optimize_llm=True): + """ + A method to optimize any pytorch models. + + :param model: The original PyTorch model (nn.module) + :param low_bit: Supported low-bit options are "sym_int4", "asym_int4", "sym_int5", + "asym_int5" or "sym_int8". + :param optimize_llm: Whether to further optimize llm model. + + return: The optimized model. + """ + invalidInputError(low_bit in ggml_tensor_qtype, + f"Unknown load_in_low_bit value: {low_bit}, expected:" + f" sym_int4, asym_int4, sym_int5, asym_int5 or sym_int8.") + qtype = ggml_tensor_qtype[low_bit] + return ggml_convert_quant(model, qtype=qtype, optimize_model=optimize_llm) diff --git a/python/llm/test/inference/test_optimize.py b/python/llm/test/inference/test_optimize.py new file mode 100644 index 00000000..ff1269e5 --- /dev/null +++ b/python/llm/test/inference/test_optimize.py @@ -0,0 +1,44 @@ +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import unittest +import os +import pytest +import time +import torch +from bigdl.llm import optimize_model + +class TestOptimizeAPI(unittest.TestCase): + + def setUp(self): + thread_num = os.environ.get('THREAD_NUM') + if thread_num is not None: + self.n_threads = int(thread_num) + else: + self.n_threads = 2 + + def test_optimize_whisper(self): + # dataset_path = os.environ.get('COMMON_VOICE_PATH') + # reservation_audio = os.path.join(dataset_path,'reservation.mp3') + import whisper + model = whisper.load_model("tiny") + model = optimize_model(model, low_bit="sym_int4", optimize_llm=False) + # result = model.transcribe(reservation_audio, verbose=True, language="English") + # assert "Reservation" or "reservation" in result["text"] + + +if __name__ == '__main__': + pytest.main([__file__]) \ No newline at end of file