diff --git a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md
index 4a4ffa9e..55675e28 100644
--- a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md
+++ b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md
@@ -142,6 +142,45 @@ bash qlora_finetune_llama3_8b_arc_1_card.sh
+
+ Show ChatGLM3-6B examples
+
+##### Finetuning ChatGLM3-6B examples on single Arc A770
+
+```bash
+bash qlora_finetune_chatglm3_6b_arc_1_card.sh
+```
+
+
+
+
+ Show Qwen-1.5-7B examples
+
+##### Finetuning Qwen-1.5-7B examples on single Arc A770
+
+Install transformers 4.37.0
+
+```bash
+pip install transformers==4.37.0
+```
+
+```bash
+bash qlora_finetune_qwen15_7b_arc_1_card.sh
+```
+
+
+
+
+ Show Baichuan2-7B examples
+
+##### Finetuning Baichuan2-7B examples on single Arc A770
+
+```bash
+bash qlora_finetune_baichuan2_7b_arc_1_card.sh
+```
+
+
+
### 4. (Optional) Resume Training
If you fail to complete the whole finetuning process, it is suggested to resume training from a previously saved checkpoint by specifying `resume_from_checkpoint` to the local checkpoint folder as following:**
diff --git a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/alpaca_qlora_finetuning.py b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/alpaca_qlora_finetuning.py
index 9156462a..ecd3d4da 100644
--- a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/alpaca_qlora_finetuning.py
+++ b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/alpaca_qlora_finetuning.py
@@ -173,6 +173,7 @@ def train(
bnb_4bit_compute_dtype=torch.bfloat16
)
model = AutoModelForCausalLM.from_pretrained(base_model,
+ torch_dtype=torch.bfloat16,
quantization_config=bnb_config,
trust_remote_code=True)
# below is also supported
@@ -191,12 +192,6 @@ def train(
tokenizer = AutoTokenizer.from_pretrained(base_model, trust_remote_code=True)
print(f"Tokenizer loaded on rank {os.environ.get('LOCAL_RANK')}")
-
- tokenizer.pad_token_id = (
- 0 # unk. we want this to be different from the eos token
- )
- tokenizer.padding_side = "left" # Allow batched inference
-
print(model)
# Prepare a IPEX-LLM compatible Peft model
diff --git a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/qlora_finetune_baichuan2_7b_arc_1_card.sh b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/qlora_finetune_baichuan2_7b_arc_1_card.sh
new file mode 100644
index 00000000..3b66ac5c
--- /dev/null
+++ b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/qlora_finetune_baichuan2_7b_arc_1_card.sh
@@ -0,0 +1,21 @@
+#
+# Copyright 2016 The BigDL Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# You could also specify `--base_model` to the local path of the huggingface model checkpoint folder and `--data_path` to the local path of the dataset JSON file
+python ./alpaca_qlora_finetuning.py \
+ --base_model "baichuan-inc/Baichuan2-7B-Chat" \
+ --data_path "yahma/alpaca-cleaned" \
+ --output_dir "./ipex-llm-qlora-alpaca"
diff --git a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/qlora_finetune_chatglm3_6b_arc_1_card.sh b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/qlora_finetune_chatglm3_6b_arc_1_card.sh
new file mode 100644
index 00000000..6173b64d
--- /dev/null
+++ b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/qlora_finetune_chatglm3_6b_arc_1_card.sh
@@ -0,0 +1,22 @@
+#
+# Copyright 2016 The BigDL Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# You could also specify `--base_model` to the local path of the huggingface model checkpoint folder and `--data_path` to the local path of the dataset JSON file
+python ./alpaca_qlora_finetuning.py \
+ --base_model "THUDM/chatglm3-6b" \
+ --data_path "yahma/alpaca-cleaned" \
+ --lora_target_modules '[query_key_value,dense,dense_h_to_4h,dense_4h_to_h]' \
+ --output_dir "./ipex-llm-qlora-alpaca"
diff --git a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/qlora_finetune_qwen15_7b_arc_1_card.sh b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/qlora_finetune_qwen15_7b_arc_1_card.sh
new file mode 100644
index 00000000..3168bb8a
--- /dev/null
+++ b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/qlora_finetune_qwen15_7b_arc_1_card.sh
@@ -0,0 +1,21 @@
+#
+# Copyright 2016 The BigDL Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# You could also specify `--base_model` to the local path of the huggingface model checkpoint folder and `--data_path` to the local path of the dataset JSON file
+python ./alpaca_qlora_finetuning.py \
+ --base_model "Qwen/Qwen1.5-7B-Chat" \
+ --data_path "yahma/alpaca-cleaned" \
+ --output_dir "./ipex-llm-qlora-alpaca"