From 2b9c7d2a596108cf7302adc6d9e9a1df0a4ea20f Mon Sep 17 00:00:00 2001 From: binbin Deng <108676127+plusbang@users.noreply.github.com> Date: Mon, 27 Nov 2023 11:04:27 +0800 Subject: [PATCH] LLM: quick fix alpaca qlora finetuning script (#9534) --- .../alpaca-qlora/finetune_llama2_7b_arc_2_card.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/llm/example/GPU/QLoRA-FineTuning/alpaca-qlora/finetune_llama2_7b_arc_2_card.sh b/python/llm/example/GPU/QLoRA-FineTuning/alpaca-qlora/finetune_llama2_7b_arc_2_card.sh index ccb30e42..4a3667c7 100644 --- a/python/llm/example/GPU/QLoRA-FineTuning/alpaca-qlora/finetune_llama2_7b_arc_2_card.sh +++ b/python/llm/example/GPU/QLoRA-FineTuning/alpaca-qlora/finetune_llama2_7b_arc_2_card.sh @@ -21,6 +21,6 @@ export CCL_ATL_TRANSPORT=ofi mpirun -n 2 \ python -u ./alpaca_qlora_finetuning.py \ - --base_model /mnt/disk1/models/Llama-2-7b-hf \ - --data_path '/home/arda/binbin/dataset/alpaca-cleaned/alpaca_data_cleaned.json' \ + --base_model "meta-llama/Llama-2-7b-hf" \ + --data_path "yahma/alpaca-cleaned" \ --output_dir "./bigdl-qlora-alpaca" > training.log