Fix LLAVA example on CPU (#11271)

* update

* update

* update

* update
This commit is contained in:
Jiao Wang 2024-06-25 20:04:59 -07:00 committed by GitHub
parent ca0e69c3a7
commit 40fa23560e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 6 additions and 8 deletions

View file

@ -20,13 +20,11 @@ conda activate llm
# install the latest ipex-llm nightly build with 'all' option # install the latest ipex-llm nightly build with 'all' option
pip install --pre --upgrade ipex-llm[all] --extra-index-url https://download.pytorch.org/whl/cpu pip install --pre --upgrade ipex-llm[all] --extra-index-url https://download.pytorch.org/whl/cpu
pip install einops # install dependencies required by llava
pip install transformers==4.36.2
git clone https://github.com/haotian-liu/LLaVA.git # clone the llava libary git clone https://github.com/haotian-liu/LLaVA.git # clone the llava libary
cp generate.py ./LLaVA/ # copy our example to the LLaVA folder
cd LLaVA # change the working directory to the LLaVA folder cd LLaVA # change the working directory to the LLaVA folder
git checkout tags/v1.2.0 -b 1.2.0 # Get the branch which is compatible with transformers 4.36 git checkout tags/v1.2.0 -b 1.2.0 # Get the branch which is compatible with transformers 4.36
pip install -e . # Install llava
cd ..
``` ```
On Windows: On Windows:
@ -36,13 +34,12 @@ conda create -n llm python=3.11
conda activate llm conda activate llm
pip install --pre --upgrade ipex-llm[all] pip install --pre --upgrade ipex-llm[all]
pip install einops
pip install transformers==4.36.2
git clone https://github.com/haotian-liu/LLaVA.git git clone https://github.com/haotian-liu/LLaVA.git
copy generate.py .\LLaVA\
cd LLaVA cd LLaVA
git checkout tags/v1.2.0 -b 1.2.0 git checkout tags/v1.2.0 -b 1.2.0
pip install -e .
cd ..
``` ```
### 2. Run ### 2. Run

View file

@ -291,7 +291,8 @@ if __name__ == '__main__':
# Load model # Load model
tokenizer, model, image_processor, _ = load_pretrained_model(model_path=model_path, tokenizer, model, image_processor, _ = load_pretrained_model(model_path=model_path,
model_base=None, model_base=None,
model_name=model_name) model_name=model_name,
device_map=None)
# With only one line to enable IPEX-LLM optimization on model # With only one line to enable IPEX-LLM optimization on model
model = optimize_model(model) model = optimize_model(model)