diff --git a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md
index 621c478e..c859ca5a 100644
--- a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md
+++ b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/README.md
@@ -10,6 +10,8 @@ To run this example with IPEX-LLM on Intel GPUs, we have some recommended requir
### 1. Install
+For Gemma-2B, directly start from [here](README.md#3-qlora-finetune).
+
```bash
conda create -n llm python=3.11
conda activate llm
@@ -171,6 +173,35 @@ bash qlora_finetune_qwen15_7b_arc_1_card.sh
+
+ Show Gemma-2B example
+
+##### 1. Install
+
+```bash
+conda create -n llm python=3.11
+conda activate llm
+# below command will install intel_extension_for_pytorch==2.1.10+xpu as default
+pip install --pre --upgrade ipex-llm[xpu] --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/xpu/us/
+pip install datasets
+pip install transformers==4.38.1
+pip install accelerate==0.27.2
+pip install bitsandbytes==0.45.3
+```
+##### 2. Configures OneAPI environment variables
+
+```bash
+source /opt/intel/oneapi/setvars.sh
+```
+
+##### 3. Run on A770
+
+```bash
+bash qlora_finetune_gemma_2b_arc_1_card.sh
+```
+
+
+
Show Baichuan2-7B examples
diff --git a/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/qlora_finetune_gemma_2b_arc_1_card.sh b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/qlora_finetune_gemma_2b_arc_1_card.sh
new file mode 100644
index 00000000..95d4be2f
--- /dev/null
+++ b/python/llm/example/GPU/LLM-Finetuning/QLoRA/alpaca-qlora/qlora_finetune_gemma_2b_arc_1_card.sh
@@ -0,0 +1,21 @@
+#
+# Copyright 2016 The BigDL Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# You could also specify `--base_model` to the local path of the huggingface model checkpoint folder and `--data_path` to the local path of the dataset JSON file
+python ./alpaca_qlora_finetuning.py \
+ --base_model "google/gemma-2b-it" \
+ --data_path "yahma/alpaca-cleaned" \
+ --output_dir "./ipex-llm-qlora-alpaca"