From 1de13ea57801b9d6e339ac05ad6484588b122424 Mon Sep 17 00:00:00 2001 From: "Wang, Jian4" <61138589+hzjane@users.noreply.github.com> Date: Mon, 18 Mar 2024 10:45:14 +0800 Subject: [PATCH] LLM: remove CPU english_quotes dataset and update docker example (#10399) * update dataset * update readme * update docker cpu * update xpu docker --- docker/llm/README.md | 30 +++---- .../llm/finetune/qlora/cpu/docker/Dockerfile | 3 + .../llm/finetune/qlora/cpu/docker/README.md | 62 ++++--------- .../qlora/cpu/docker/qlora_finetuning_cpu.py | 87 ------------------- .../docker/start-qlora-finetuning-on-cpu.sh | 4 +- .../llm/finetune/qlora/xpu/docker/Dockerfile | 3 + .../llm/finetune/qlora/xpu/docker/README.md | 30 +++---- .../docker/start-qlora-finetuning-on-xpu.sh | 4 +- .../example/CPU/QLoRA-FineTuning/README.md | 52 +++-------- .../QLoRA-FineTuning/qlora_finetuning_cpu.py | 34 +++++--- 10 files changed, 92 insertions(+), 217 deletions(-) delete mode 100644 docker/llm/finetune/qlora/cpu/docker/qlora_finetuning_cpu.py diff --git a/docker/llm/README.md b/docker/llm/README.md index e6c3aa53..2460077b 100644 --- a/docker/llm/README.md +++ b/docker/llm/README.md @@ -467,7 +467,7 @@ docker build \ ### 2. Prepare Base Model, Data and Container -Here, we try to fine-tune a [Llama2-7b](https://huggingface.co/meta-llama/Llama-2-7b) with [English Quotes](https://huggingface.co/datasets/Abirate/english_quotes) dataset, and please download them and start a docker container with files mounted like below: +Here, we try to fine-tune a [Llama2-7b](https://huggingface.co/meta-llama/Llama-2-7b) with [yahma/alpaca-cleaned](https://huggingface.co/datasets/yahma/alpaca-cleaned) dataset, and please download them and start a docker container with files mounted like below: ```bash export BASE_MODE_PATH=your_downloaded_base_model_path @@ -483,7 +483,7 @@ docker run -itd \ -e http_proxy=${HTTP_PROXY} \ -e https_proxy=${HTTPS_PROXY} \ -v $BASE_MODE_PATH:/model \ - -v $DATA_PATH:/data/english_quotes \ + -v $DATA_PATH:/data/alpaca-cleaned \ --shm-size="16g" \ intelanalytics/bigdl-llm-fintune-qlora-xpu:2.5.0-SNAPSHOT ``` @@ -524,17 +524,17 @@ bash start-qlora-finetuning-on-xpu.sh After minutes, it is expected to get results like: ```bash -{'loss': 2.256, 'learning_rate': 0.0002, 'epoch': 0.03} -{'loss': 1.8869, 'learning_rate': 0.00017777777777777779, 'epoch': 0.06} -{'loss': 1.5334, 'learning_rate': 0.00015555555555555556, 'epoch': 0.1} -{'loss': 1.4975, 'learning_rate': 0.00013333333333333334, 'epoch': 0.13} -{'loss': 1.3245, 'learning_rate': 0.00011111111111111112, 'epoch': 0.16} -{'loss': 1.2622, 'learning_rate': 8.888888888888889e-05, 'epoch': 0.19} -{'loss': 1.3944, 'learning_rate': 6.666666666666667e-05, 'epoch': 0.22} -{'loss': 1.2481, 'learning_rate': 4.4444444444444447e-05, 'epoch': 0.26} -{'loss': 1.3442, 'learning_rate': 2.2222222222222223e-05, 'epoch': 0.29} -{'loss': 1.3256, 'learning_rate': 0.0, 'epoch': 0.32} -{'train_runtime': 204.4633, 'train_samples_per_second': 3.913, 'train_steps_per_second': 0.978, 'train_loss': 1.5072882556915284, 'epoch': 0.32} -100%|██████████████████████████████████████████████████████████████████████████████████████| 200/200 [03:24<00:00, 1.02s/it] -TrainOutput(global_step=200, training_loss=1.5072882556915284, metrics={'train_runtime': 204.4633, 'train_samples_per_second': 3.913, 'train_steps_per_second': 0.978, 'train_loss': 1.5072882556915284, 'epoch': 0.32}) +{'loss': 2.0251, 'learning_rate': 0.0002, 'epoch': 0.02} +{'loss': 1.2389, 'learning_rate': 0.00017777777777777779, 'epoch': 0.03} +{'loss': 1.032, 'learning_rate': 0.00015555555555555556, 'epoch': 0.05} +{'loss': 0.9141, 'learning_rate': 0.00013333333333333334, 'epoch': 0.06} +{'loss': 0.8505, 'learning_rate': 0.00011111111111111112, 'epoch': 0.08} +{'loss': 0.8713, 'learning_rate': 8.888888888888889e-05, 'epoch': 0.09} +{'loss': 0.8635, 'learning_rate': 6.666666666666667e-05, 'epoch': 0.11} +{'loss': 0.8853, 'learning_rate': 4.4444444444444447e-05, 'epoch': 0.12} +{'loss': 0.859, 'learning_rate': 2.2222222222222223e-05, 'epoch': 0.14} +{'loss': 0.8608, 'learning_rate': 0.0, 'epoch': 0.15} +{'train_runtime': xxxx, 'train_samples_per_second': xxxx, 'train_steps_per_second': xxxx, 'train_loss': 1.0400420665740966, 'epoch': 0.15} +100%|███████████████████████████████████████████████████████████████████████████████████| 200/200 [07:16<00:00, 2.18s/it] +TrainOutput(global_step=200, training_loss=1.0400420665740966, metrics={'train_runtime': xxxx, 'train_samples_per_second': xxxx, 'train_steps_per_second': xxxx, 'train_loss': 1.0400420665740966, 'epoch': 0.15}) ``` diff --git a/docker/llm/finetune/qlora/cpu/docker/Dockerfile b/docker/llm/finetune/qlora/cpu/docker/Dockerfile index f47a4c88..68db2af6 100644 --- a/docker/llm/finetune/qlora/cpu/docker/Dockerfile +++ b/docker/llm/finetune/qlora/cpu/docker/Dockerfile @@ -36,6 +36,7 @@ RUN mkdir -p /bigdl/data && mkdir -p /bigdl/model && \ pip install datasets transformers==4.35.0 && \ pip install fire peft==0.5.0 && \ pip install accelerate==0.23.0 && \ + pip install bitsandbytes && \ # install basic dependencies apt-get update && apt-get install -y curl wget gpg gpg-agent software-properties-common libunwind8-dev && \ # get qlora example code @@ -43,6 +44,8 @@ RUN mkdir -p /bigdl/data && mkdir -p /bigdl/model && \ cd /bigdl && \ git clone https://github.com/intel-analytics/BigDL.git && \ mv BigDL/python/llm/example/CPU/QLoRA-FineTuning/* . && \ + mkdir -p /GPU/LLM-Finetuning && \ + mv BigDL/python/llm/example/GPU/LLM-Finetuning/common /GPU/LLM-Finetuning/common && \ rm -r BigDL && \ chown -R mpiuser /bigdl diff --git a/docker/llm/finetune/qlora/cpu/docker/README.md b/docker/llm/finetune/qlora/cpu/docker/README.md index 665f5829..f508f00a 100644 --- a/docker/llm/finetune/qlora/cpu/docker/README.md +++ b/docker/llm/finetune/qlora/cpu/docker/README.md @@ -40,7 +40,7 @@ docker build \ ### 2. Prepare Base Model, Data and Container -Here, we try to fine-tune a [Llama2-7b](https://huggingface.co/meta-llama/Llama-2-7b) with [English Quotes](https://huggingface.co/datasets/Abirate/english_quotes) dataset, and please download them and start a docker container with files mounted like below: +Here, we try to fine-tune a [Llama2-7b](https://huggingface.co/meta-llama/Llama-2-7b) with [yahma/alpaca-cleaned](https://huggingface.co/datasets/yahma/alpaca-cleaned) dataset, and please download them and start a docker container with files mounted like below: ```bash export BASE_MODE_PATH=your_downloaded_base_model_path @@ -54,7 +54,7 @@ docker run -itd \ -e http_proxy=${HTTP_PROXY} \ -e https_proxy=${HTTPS_PROXY} \ -v $BASE_MODE_PATH:/bigdl/model \ - -v $DATA_PATH:/bigdl/data/english_quotes \ + -v $DATA_PATH:/bigdl/data/alpaca-cleaned \ intelanalytics/bigdl-llm-finetune-qlora-cpu-standalone:2.5.0-SNAPSHOT ``` @@ -92,19 +92,19 @@ bash start-qlora-finetuning-on-cpu.sh After minutes, it is expected to get results like: ```bash -{'loss': 2.256, 'learning_rate': 0.0002, 'epoch': 0.03} -{'loss': 1.8869, 'learning_rate': 0.00017777777777777779, 'epoch': 0.06} -{'loss': 1.5334, 'learning_rate': 0.00015555555555555556, 'epoch': 0.1} -{'loss': 1.4975, 'learning_rate': 0.00013333333333333334, 'epoch': 0.13} -{'loss': 1.3245, 'learning_rate': 0.00011111111111111112, 'epoch': 0.16} -{'loss': 1.2622, 'learning_rate': 8.888888888888889e-05, 'epoch': 0.19} -{'loss': 1.3944, 'learning_rate': 6.666666666666667e-05, 'epoch': 0.22} -{'loss': 1.2481, 'learning_rate': 4.4444444444444447e-05, 'epoch': 0.26} -{'loss': 1.3442, 'learning_rate': 2.2222222222222223e-05, 'epoch': 0.29} -{'loss': 1.3256, 'learning_rate': 0.0, 'epoch': 0.32} -{'train_runtime': xxx, 'train_samples_per_second': xxx, 'train_steps_per_second': xxx, 'train_loss': 1.5072882556915284, 'epoch': 0.32} -100%|██████████████████████████████████████████████████████████████████████████████████████| 200/200 [xx:xx [?]` to verify. For example, using `“QLoRA fine-tuning using BigDL-LLM 4bit optimizations on Intel CPU is Efficient and convenient” ->: ` to inference. -BigDL-LLM llama2 example [link](https://github.com/intel-analytics/BigDL/tree/main/python/llm/example/CPU/HF-Transformers-AutoModels/Model/llama2). Update the `LLAMA2_PROMPT_FORMAT = "{prompt}"`. - -```bash -python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt "“QLoRA fine-tuning using BigDL-LLM 4bit optimizations on Intel CPU is Efficient and convenient” ->:" --n-predict 20 -``` - -#### Sample Output - -Base_model output - -```log -Inference time: xxx s --------------------- Prompt -------------------- -“QLoRA fine-tuning using BigDL-LLM 4bit optimizations on Intel CPU is Efficient and convenient” ->: --------------------- Output -------------------- -“QLoRA fine-tuning using BigDL-LLM 4bit optimizations on Intel CPU is Efficient and convenient” ->: 💻 Fine-tuning a language model on a powerful device like an Intel CPU -``` - -Merged_model output - -```log -Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. -Inference time: xxx s --------------------- Prompt -------------------- -“QLoRA fine-tuning using BigDL-LLM 4bit optimizations on Intel CPU is Efficient and convenient” ->: --------------------- Output -------------------- -“QLoRA fine-tuning using BigDL-LLM 4bit optimizations on Intel CPU is Efficient and convenient” ->: ['bigdl'] ['deep-learning'] ['distributed-computing'] ['intel'] ['optimization'] ['training'] ['training-speed'] -``` - ### 4. Start Multi-Porcess Fine-Tuning in One Docker
diff --git a/docker/llm/finetune/qlora/cpu/docker/qlora_finetuning_cpu.py b/docker/llm/finetune/qlora/cpu/docker/qlora_finetuning_cpu.py deleted file mode 100644 index d7d24c39..00000000 --- a/docker/llm/finetune/qlora/cpu/docker/qlora_finetuning_cpu.py +++ /dev/null @@ -1,87 +0,0 @@ -# -# Copyright 2016 The BigDL Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -import torch -import os - -import transformers -from transformers import LlamaTokenizer - -from peft import LoraConfig -from bigdl.llm.transformers.qlora import get_peft_model, prepare_model_for_kbit_training -from bigdl.llm.transformers import AutoModelForCausalLM -from datasets import load_dataset -import argparse - -if __name__ == "__main__": - - parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for Llama2 model') - parser.add_argument('--repo-id-or-model-path', type=str, default="meta-llama/Llama-2-7b-hf", - help='The huggingface repo id for the Llama2 (e.g. `meta-llama/Llama-2-7b-hf` and `meta-llama/Llama-2-13b-chat-hf`) to be downloaded' - ', or the path to the huggingface checkpoint folder') - parser.add_argument('--dataset', type=str, default="Abirate/english_quotes") - - args = parser.parse_args() - model_path = args.repo_id_or_model_path - dataset_path = args.dataset - tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True) - - data = load_dataset(dataset_path) - def merge(row): - row['prediction'] = row['quote'] + ' ->: ' + str(row['tags']) - return row - data = data.map(lambda samples: tokenizer(samples["prediction"]), batched=True) - model = AutoModelForCausalLM.from_pretrained(model_path, - load_in_low_bit="sym_int4", - optimize_model=False, - torch_dtype=torch.float16, - modules_to_not_convert=["lm_head"],) - model = model.to('cpu') - # model.gradient_checkpointing_enable() - model = prepare_model_for_kbit_training(model, use_gradient_checkpointing=False) - model.enable_input_require_grads() - config = LoraConfig( - r=8, - lora_alpha=32, - target_modules=["q_proj", "k_proj", "v_proj"], - lora_dropout=0.05, - bias="none", - task_type="CAUSAL_LM" - ) - model = get_peft_model(model, config) - tokenizer.pad_token_id = 0 - tokenizer.padding_side = "left" - trainer = transformers.Trainer( - model=model, - train_dataset=data["train"], - args=transformers.TrainingArguments( - per_device_train_batch_size=4, - gradient_accumulation_steps= 1, - warmup_steps=20, - max_steps=200, - learning_rate=2e-4, - save_steps=100, - bf16=True, - logging_steps=20, - output_dir="outputs", - optim="adamw_hf", # paged_adamw_8bit is not supported yet - # gradient_checkpointing=True, # can further reduce memory but slower - ), - data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False), - ) - model.config.use_cache = False # silence the warnings. Please re-enable for inference! - result = trainer.train() - print(result) diff --git a/docker/llm/finetune/qlora/cpu/docker/start-qlora-finetuning-on-cpu.sh b/docker/llm/finetune/qlora/cpu/docker/start-qlora-finetuning-on-cpu.sh index 32da28e4..0a428334 100644 --- a/docker/llm/finetune/qlora/cpu/docker/start-qlora-finetuning-on-cpu.sh +++ b/docker/llm/finetune/qlora/cpu/docker/start-qlora-finetuning-on-cpu.sh @@ -11,9 +11,9 @@ then MODEL_PARAM="--repo-id-or-model-path ./model" # otherwise, default to download from HF repo fi -if [ -d "./data/english_quotes" ]; +if [ -d "./data/alpaca-cleaned" ]; then - DATA_PARAM="--dataset ./data/english_quotes" # otherwise, default to download from HF dataset + DATA_PARAM="--dataset ./data/alpaca-cleaned" # otherwise, default to download from HF dataset fi if [ "$STANDALONE_DOCKER" = "TRUE" ] diff --git a/docker/llm/finetune/qlora/xpu/docker/Dockerfile b/docker/llm/finetune/qlora/xpu/docker/Dockerfile index 57676df9..415581ad 100644 --- a/docker/llm/finetune/qlora/xpu/docker/Dockerfile +++ b/docker/llm/finetune/qlora/xpu/docker/Dockerfile @@ -34,6 +34,9 @@ RUN curl -fsSL https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-P pip install git+https://github.com/huggingface/transformers.git@${TRANSFORMERS_COMMIT_ID} && \ pip install peft==0.5.0 datasets accelerate==0.23.0 && \ pip install bitsandbytes scipy && \ + git clone https://github.com/intel-analytics/BigDL.git && \ + mv BigDL/python/llm/example/GPU/LLM-Finetuning/common /common && \ + rm -r BigDL && \ wget https://raw.githubusercontent.com/intel-analytics/BigDL/main/python/llm/example/GPU/LLM-Finetuning/QLoRA/simple-example/qlora_finetuning.py COPY ./start-qlora-finetuning-on-xpu.sh /start-qlora-finetuning-on-xpu.sh diff --git a/docker/llm/finetune/qlora/xpu/docker/README.md b/docker/llm/finetune/qlora/xpu/docker/README.md index 1371a513..13e5fbab 100644 --- a/docker/llm/finetune/qlora/xpu/docker/README.md +++ b/docker/llm/finetune/qlora/xpu/docker/README.md @@ -25,7 +25,7 @@ docker build \ ### 2. Prepare Base Model, Data and Container -Here, we try to fine-tune a [Llama2-7b](https://huggingface.co/meta-llama/Llama-2-7b) with [English Quotes](https://huggingface.co/datasets/Abirate/english_quotes) dataset, and please download them and start a docker container with files mounted like below: +Here, we try to fine-tune a [Llama2-7b](https://huggingface.co/meta-llama/Llama-2-7b) with [yahma/alpaca-cleaned](https://huggingface.co/datasets/yahma/alpaca-cleaned) dataset, and please download them and start a docker container with files mounted like below: ```bash export BASE_MODE_PATH=your_downloaded_base_model_path @@ -41,7 +41,7 @@ docker run -itd \ -e http_proxy=${HTTP_PROXY} \ -e https_proxy=${HTTPS_PROXY} \ -v $BASE_MODE_PATH:/model \ - -v $DATA_PATH:/data/english_quotes \ + -v $DATA_PATH:/data/alpaca-cleaned \ --shm-size="16g" \ intelanalytics/bigdl-llm-fintune-qlora-xpu:2.5.0-SNAPSHOT ``` @@ -82,17 +82,17 @@ bash start-qlora-finetuning-on-xpu.sh After minutes, it is expected to get results like: ```bash -{'loss': 2.256, 'learning_rate': 0.0002, 'epoch': 0.03} -{'loss': 1.8869, 'learning_rate': 0.00017777777777777779, 'epoch': 0.06} -{'loss': 1.5334, 'learning_rate': 0.00015555555555555556, 'epoch': 0.1} -{'loss': 1.4975, 'learning_rate': 0.00013333333333333334, 'epoch': 0.13} -{'loss': 1.3245, 'learning_rate': 0.00011111111111111112, 'epoch': 0.16} -{'loss': 1.2622, 'learning_rate': 8.888888888888889e-05, 'epoch': 0.19} -{'loss': 1.3944, 'learning_rate': 6.666666666666667e-05, 'epoch': 0.22} -{'loss': 1.2481, 'learning_rate': 4.4444444444444447e-05, 'epoch': 0.26} -{'loss': 1.3442, 'learning_rate': 2.2222222222222223e-05, 'epoch': 0.29} -{'loss': 1.3256, 'learning_rate': 0.0, 'epoch': 0.32} -{'train_runtime': 204.4633, 'train_samples_per_second': 3.913, 'train_steps_per_second': 0.978, 'train_loss': 1.5072882556915284, 'epoch': 0.32} -100%|██████████████████████████████████████████████████████████████████████████████████████| 200/200 [03:24<00:00, 1.02s/it] -TrainOutput(global_step=200, training_loss=1.5072882556915284, metrics={'train_runtime': 204.4633, 'train_samples_per_second': 3.913, 'train_steps_per_second': 0.978, 'train_loss': 1.5072882556915284, 'epoch': 0.32}) +{'loss': 2.0251, 'learning_rate': 0.0002, 'epoch': 0.02} +{'loss': 1.2389, 'learning_rate': 0.00017777777777777779, 'epoch': 0.03} +{'loss': 1.032, 'learning_rate': 0.00015555555555555556, 'epoch': 0.05} +{'loss': 0.9141, 'learning_rate': 0.00013333333333333334, 'epoch': 0.06} +{'loss': 0.8505, 'learning_rate': 0.00011111111111111112, 'epoch': 0.08} +{'loss': 0.8713, 'learning_rate': 8.888888888888889e-05, 'epoch': 0.09} +{'loss': 0.8635, 'learning_rate': 6.666666666666667e-05, 'epoch': 0.11} +{'loss': 0.8853, 'learning_rate': 4.4444444444444447e-05, 'epoch': 0.12} +{'loss': 0.859, 'learning_rate': 2.2222222222222223e-05, 'epoch': 0.14} +{'loss': 0.8608, 'learning_rate': 0.0, 'epoch': 0.15} +{'train_runtime': xxxx, 'train_samples_per_second': xxxx, 'train_steps_per_second': xxxx, 'train_loss': 1.0400420665740966, 'epoch': 0.15} +100%|███████████████████████████████████████████████████████████████████████████████████| 200/200 [07:16<00:00, 2.18s/it] +TrainOutput(global_step=200, training_loss=1.0400420665740966, metrics={'train_runtime': xxxx, 'train_samples_per_second': xxxx, 'train_steps_per_second': xxxx, 'train_loss': 1.0400420665740966, 'epoch': 0.15}) ``` diff --git a/docker/llm/finetune/qlora/xpu/docker/start-qlora-finetuning-on-xpu.sh b/docker/llm/finetune/qlora/xpu/docker/start-qlora-finetuning-on-xpu.sh index 06ffb527..bdc2741b 100644 --- a/docker/llm/finetune/qlora/xpu/docker/start-qlora-finetuning-on-xpu.sh +++ b/docker/llm/finetune/qlora/xpu/docker/start-qlora-finetuning-on-xpu.sh @@ -9,9 +9,9 @@ then MODEL_PARAM="--repo-id-or-model-path ./model" # otherwise, default to download from HF repo fi -if [ -d "./data/english_quotes" ]; +if [ -d "./data/alpaca-cleaned" ]; then - DATA_PARAM="--dataset ./data/english_quotes" # otherwise, default to download from HF dataset + DATA_PARAM="--dataset ./data/alpaca-cleaned" # otherwise, default to download from HF dataset fi python qlora_finetuning.py $MODEL_PARAM $DATA_PARAM diff --git a/python/llm/example/CPU/QLoRA-FineTuning/README.md b/python/llm/example/CPU/QLoRA-FineTuning/README.md index 6868ee91..f8296e40 100644 --- a/python/llm/example/CPU/QLoRA-FineTuning/README.md +++ b/python/llm/example/CPU/QLoRA-FineTuning/README.md @@ -38,19 +38,19 @@ python ./qlora_finetuning_cpu.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH - #### Sample Output ```log -{'loss': 2.5668, 'learning_rate': 0.0002, 'epoch': 0.03} -{'loss': 1.6988, 'learning_rate': 0.00017777777777777779, 'epoch': 0.06} -{'loss': 1.3073, 'learning_rate': 0.00015555555555555556, 'epoch': 0.1} -{'loss': 1.3495, 'learning_rate': 0.00013333333333333334, 'epoch': 0.13} -{'loss': 1.1746, 'learning_rate': 0.00011111111111111112, 'epoch': 0.16} -{'loss': 1.0794, 'learning_rate': 8.888888888888889e-05, 'epoch': 0.19} -{'loss': 1.2214, 'learning_rate': 6.666666666666667e-05, 'epoch': 0.22} -{'loss': 1.1698, 'learning_rate': 4.4444444444444447e-05, 'epoch': 0.26} -{'loss': 1.2044, 'learning_rate': 2.2222222222222223e-05, 'epoch': 0.29} -{'loss': 1.1516, 'learning_rate': 0.0, 'epoch': 0.32} -{'train_runtime': xxx, 'train_samples_per_second': xxx, 'train_steps_per_second': xxx, 'train_loss': 1.3923714351654053, 'epoch': 0.32} -100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 200/200 [xx:xx [?]` to verify. For example, using `“QLoRA fine-tuning using BigDL-LLM 4bit optimizations on Intel CPU is Efficient and convenient” ->: ` to inference. -BigDL-LLM llama2 example [link](https://github.com/intel-analytics/BigDL/tree/main/python/llm/example/CPU/HF-Transformers-AutoModels/Model/llama2). Update the `LLAMA2_PROMPT_FORMAT = "{prompt}"`. -```bash -python ./generate.py --repo-id-or-model-path REPO_ID_OR_MODEL_PATH --prompt "“QLoRA fine-tuning using BigDL-LLM 4bit optimizations on Intel CPU is Efficient and convenient” ->:" --n-predict 20 -``` - -#### Sample Output -Base_model output -```log -Inference time: xxx s --------------------- Prompt -------------------- -“QLoRA fine-tuning using BigDL-LLM 4bit optimizations on Intel CPU is Efficient and convenient” ->: --------------------- Output -------------------- -“QLoRA fine-tuning using BigDL-LLM 4bit optimizations on Intel CPU is Efficient and convenient” ->: 💻 Fine-tuning a language model on a powerful device like an Intel CPU -``` -Merged_model output -```log -Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. -Inference time: xxx s --------------------- Prompt -------------------- -“QLoRA fine-tuning using BigDL-LLM 4bit optimizations on Intel CPU is Efficient and convenient” ->: --------------------- Output -------------------- -“QLoRA fine-tuning using BigDL-LLM 4bit optimizations on Intel CPU is Efficient and convenient” ->: ['bigdl'] ['deep-learning'] ['distributed-computing'] ['intel'] ['optimization'] ['training'] ['training-speed'] -``` diff --git a/python/llm/example/CPU/QLoRA-FineTuning/qlora_finetuning_cpu.py b/python/llm/example/CPU/QLoRA-FineTuning/qlora_finetuning_cpu.py index d0d47152..1a8c6054 100644 --- a/python/llm/example/CPU/QLoRA-FineTuning/qlora_finetuning_cpu.py +++ b/python/llm/example/CPU/QLoRA-FineTuning/qlora_finetuning_cpu.py @@ -27,25 +27,37 @@ from datasets import load_dataset import argparse from bigdl.llm.utils.isa_checker import ISAChecker +current_dir = os.path.dirname(os.path.realpath(__file__)) +common_util_path = os.path.join(current_dir, '..', '..', 'GPU', 'LLM-Finetuning') +import sys +sys.path.append(common_util_path) +from common.utils import Prompter, get_train_val_data + if __name__ == "__main__": parser = argparse.ArgumentParser(description='Predict Tokens using `generate()` API for Llama2 model') parser.add_argument('--repo-id-or-model-path', type=str, default="meta-llama/Llama-2-7b-hf", help='The huggingface repo id for the Llama2 (e.g. `meta-llama/Llama-2-7b-hf` and `meta-llama/Llama-2-13b-chat-hf`) to be downloaded' ', or the path to the huggingface checkpoint folder') - parser.add_argument('--dataset', type=str, default="Abirate/english_quotes") + parser.add_argument('--dataset', type=str, default="yahma/alpaca-cleaned") args = parser.parse_args() model_path = args.repo_id_or_model_path dataset_path = args.dataset tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True) - data = load_dataset(dataset_path) - def merge(row): - row['prediction'] = row['quote'] + ' ->: ' + str(row['tags']) - return row - data['train'] = data['train'].map(merge) - # use the max_length to reduce memory usage, should be adjusted by different datasets - data = data.map(lambda samples: tokenizer(samples["prediction"], max_length=256), batched=True) + if dataset_path.endswith(".json") or dataset_path.endswith(".jsonl"): + data = load_dataset("json", data_files=dataset_path) + else: + data = load_dataset(dataset_path) + + # For illustration purpose, only use part of data to train + data = data["train"].train_test_split(train_size=0.1, shuffle=False) + + # Data processing + prompter = Prompter("alpaca") + train_data, _ = get_train_val_data(data, tokenizer, prompter, train_on_inputs=True, + add_eos_token=False, cutoff_len=256, val_set_size=0, seed=42) + bnb_config = BitsAndBytesConfig( load_in_4bit=True, @@ -85,7 +97,7 @@ if __name__ == "__main__": trainer = transformers.Trainer( model=model, - train_dataset=data["train"], + train_dataset=train_data, args=transformers.TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=1, @@ -100,7 +112,9 @@ if __name__ == "__main__": # gradient_checkpointing=True, # can further reduce memory but slower ), # Inputs are dynamically padded to the maximum length of a batch - data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False), + data_collator=transformers.DataCollatorForSeq2Seq( + tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True + ), ) model.config.use_cache = False # silence the warnings. Please re-enable for inference! result = trainer.train()