From 730d9ec8115df3ee1cecba10b440312513790d3d Mon Sep 17 00:00:00 2001 From: Ch1y0q Date: Tue, 27 Aug 2024 13:35:24 +0800 Subject: [PATCH] Add Qwen2-audio example (#11835) * add draft for qwen2-audio * update example for `Qwen2-Audio` * update * update * add warmup --- README.md | 1 + .../Multimodal/qwen2-audio/README.md | 127 ++++++++++++++++++ .../Multimodal/qwen2-audio/generate.py | 75 +++++++++++ 3 files changed, 203 insertions(+) create mode 100644 python/llm/example/GPU/HuggingFace/Multimodal/qwen2-audio/README.md create mode 100644 python/llm/example/GPU/HuggingFace/Multimodal/qwen2-audio/generate.py diff --git a/README.md b/README.md index f53052d9..3c767128 100644 --- a/README.md +++ b/README.md @@ -276,6 +276,7 @@ Over 50 models have been optimized/verified on `ipex-llm`, including *LLaMA/LLaM | Qwen1.5 | [link](python/llm/example/CPU/HF-Transformers-AutoModels/Model/qwen1.5) | [link](python/llm/example/GPU/HuggingFace/LLM/qwen1.5) | | Qwen2 | [link](python/llm/example/CPU/HF-Transformers-AutoModels/Model/qwen2) | [link](python/llm/example/GPU/HuggingFace/LLM/qwen2) | | Qwen-VL | [link](python/llm/example/CPU/HF-Transformers-AutoModels/Model/qwen-vl) | [link](python/llm/example/GPU/HuggingFace/Multimodal/qwen-vl) | +| Qwen2-Audio | | [link](python/llm/example/GPU/HuggingFace/Multimodal/qwen2-audio) | | Aquila | [link](python/llm/example/CPU/HF-Transformers-AutoModels/Model/aquila) | [link](python/llm/example/GPU/HuggingFace/LLM/aquila) | | Aquila2 | [link](python/llm/example/CPU/HF-Transformers-AutoModels/Model/aquila2) | [link](python/llm/example/GPU/HuggingFace/LLM/aquila2) | | MOSS | [link](python/llm/example/CPU/HF-Transformers-AutoModels/Model/moss) | | diff --git a/python/llm/example/GPU/HuggingFace/Multimodal/qwen2-audio/README.md b/python/llm/example/GPU/HuggingFace/Multimodal/qwen2-audio/README.md new file mode 100644 index 00000000..b201467a --- /dev/null +++ b/python/llm/example/GPU/HuggingFace/Multimodal/qwen2-audio/README.md @@ -0,0 +1,127 @@ +# Qwen2-Audio +In this directory, you will find examples on how you could apply IPEX-LLM INT4 optimizations on Qwen2-Audio models on [Intel GPUs](../../../README.md). For illustration purposes, we utilize [Qwen/Qwen2-Audio-7B-Instruct](https://huggingface.co/Qwen/Qwen2-Audio-7B-Instruct) as reference model. + +## 0. Requirements +To run these examples with IPEX-LLM on Intel GPUs, we have some recommended requirements for your machine, please refer to [here](../../../README.md#requirements) for more information. + + +## Example: Predict Tokens using `generate()` API +In the example [generate.py](./generate.py), we show a basic use case for a Qwen2-Audio model to conduct transcription using `processor` API, then use the recoginzed text as the input for Qwen2-Audio model to perform an English-Chinese translation using `generate()` API, with IPEX-LLM INT4 optimizations on Intel GPUs. +### 1. Install + +> [!NOTE] +> Qwen2-Audio requires minimal `transformers` version of 4.35.0, which is not yet released. Currently, you can install the latest version of `transformers` from GitHub. When such a version is released, you can install it using `pip install transformers==4.35.0`. + +#### 1.1 Installation on Linux +We suggest using conda to manage environment: +```bash +conda create -n llm python=3.11 +conda activate llm +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default +pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ + +pip install librosa +pip install git+https://github.com/huggingface/transformers +``` + +#### 1.2 Installation on Windows +We suggest using conda to manage environment: +```bash +conda create -n llm python=3.11 libuv +conda activate llm + +# below command will install intel_extension_for_pytorch==2.1.10+xpu as default +pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/ + +pip install librosa +pip install git+https://github.com/huggingface/transformers +``` + +### 2. Configures OneAPI environment variables for Linux + +> [!NOTE] +> Skip this step if you are running on Windows. + +This is a required step on Linux for APT or offline installed oneAPI. Skip this step for PIP-installed oneAPI. + +```bash +source /opt/intel/oneapi/setvars.sh +``` + +### 3. Runtime Configurations +For optimal performance, it is recommended to set several environment variables. Please check out the suggestions based on your device. +#### 3.1 Configurations for Linux +
+ +For Intel Arc™ A-Series Graphics and Intel Data Center GPU Flex Series + +```bash +export USE_XETLA=OFF +export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 +export SYCL_CACHE_PERSISTENT=1 +``` + +
+ +
+ +For Intel Data Center GPU Max Series + +```bash +export LD_PRELOAD=${LD_PRELOAD}:${CONDA_PREFIX}/lib/libtcmalloc.so +export SYCL_PI_LEVEL_ZERO_USE_IMMEDIATE_COMMANDLISTS=1 +export SYCL_CACHE_PERSISTENT=1 +export ENABLE_SDP_FUSION=1 +``` +> Note: Please note that `libtcmalloc.so` can be installed by `conda install -c conda-forge -y gperftools=2.10`. +
+ +
+ +For Intel iGPU + +```bash +export SYCL_CACHE_PERSISTENT=1 +export BIGDL_LLM_XMX_DISABLED=1 +``` + +
+ +#### 3.2 Configurations for Windows +
+ +For Intel iGPU + +```cmd +set SYCL_CACHE_PERSISTENT=1 +set BIGDL_LLM_XMX_DISABLED=1 +``` + +
+ +
+ +For Intel Arc™ A-Series Graphics + +```cmd +set SYCL_CACHE_PERSISTENT=1 +``` + +
+ +> [!NOTE] +> For the first time that each model runs on Intel iGPU/Intel Arc™ A300-Series or Pro A60, it may take several minutes to compile. +### 4. Running examples + +``` +python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH +``` + +Arguments info: +- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the Qwen2-Audio model (e.g. `Qwen/Qwen2-Audio-7B-Instruct`) to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'Qwen/Qwen2-Audio-7B-Instruct'`. + +#### Sample Output +In `generate.py`, [an audio clip](https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/translate_to_chinese.wav) is used as the input, which asks the model to translate an English sentence into Chinese. The response from the model is expected to be similar to: +```bash +['每个人都希望被赏识,所以如果你欣赏某人,不要保密。'] +``` diff --git a/python/llm/example/GPU/HuggingFace/Multimodal/qwen2-audio/generate.py b/python/llm/example/GPU/HuggingFace/Multimodal/qwen2-audio/generate.py new file mode 100644 index 00000000..fd186f3e --- /dev/null +++ b/python/llm/example/GPU/HuggingFace/Multimodal/qwen2-audio/generate.py @@ -0,0 +1,75 @@ +# +# Copyright 2016 The BigDL Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +from io import BytesIO +from urllib.request import urlopen +import librosa +import torch +from transformers import Qwen2AudioForConditionalGeneration, AutoProcessor +from ipex_llm import optimize_model + +def main(args): + model_path = args.repo_id_or_model_path + max_length = args.max_length + audio_url = args.audio_url + + processor = AutoProcessor.from_pretrained(model_path) + model = Qwen2AudioForConditionalGeneration.from_pretrained(model_path) + model = optimize_model(model, low_bit='sym_int4', optimize_llm=True) + model = model.half().to('xpu') + + conversation = [ + {"role": "user", "content": [ + {"type": "audio", "audio_url": audio_url}, + ]}, + ] + text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False) + audios = [] + for message in conversation: + if isinstance(message["content"], list): + for ele in message["content"]: + if ele["type"] == "audio": + audios.append(librosa.load( + BytesIO(urlopen(ele['audio_url']).read()), + sr=processor.feature_extractor.sampling_rate)[0] + ) + + inputs = processor(text=text, audios=audios, return_tensors="pt", padding=True) + inputs = inputs.to('xpu') + + with torch.inference_mode(): + generate_ids = model.generate(**inputs, max_length=max_length) # warmup + import time + st = time.time() + generate_ids = model.generate(**inputs, max_length=max_length) + generate_ids = generate_ids[:, inputs.input_ids.size(1):] + et = time.time() + print(f'Inference time: {et-st} s') + + response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False) + print(response) + +if __name__=="__main__": + parser = argparse.ArgumentParser(description="Qwen2-Audio") + parser.add_argument('--repo-id-or-model-path', type=str, default="Qwen/Qwen2-Audio-7B-Instruct", + help='The huggingface repo id for the Qwen2-Audio model checkpoint') + parser.add_argument('--max-length', type=int, default=256, + help='The max length of input text') + parser.add_argument('--audio-url', type=str, default="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/translate_to_chinese.wav", + help='The URL to the input audio file') + args = parser.parse_args() + main(args)