add Stable diffusion examples (#12418)
* add openjourney example * add timing * add stable diffusion to model page * 4.1 fix * small fix
This commit is contained in:
parent
54c62feb74
commit
d2a37b6ab2
6 changed files with 96 additions and 8 deletions
|
|
@ -330,6 +330,7 @@ Over 50 models have been optimized/verified on `ipex-llm`, including *LLaMA/LLaM
|
|||
| MiniCPM-V-2 | [link](python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v-2) | [link](python/llm/example/GPU/HuggingFace/Multimodal/MiniCPM-V-2) |
|
||||
| MiniCPM-Llama3-V-2_5 | | [link](python/llm/example/GPU/HuggingFace/Multimodal/MiniCPM-Llama3-V-2_5) |
|
||||
| MiniCPM-V-2_6 | [link](python/llm/example/CPU/HF-Transformers-AutoModels/Model/minicpm-v-2_6) | [link](python/llm/example/GPU/HuggingFace/Multimodal/MiniCPM-V-2_6) |
|
||||
| StableDiffusion | | [link](python/llm/example/GPU/HuggingFace/Multimodal/StableDiffusion) |
|
||||
|
||||
## Get Support
|
||||
- Please report a bug or raise a feature request by opening a [Github Issue](https://github.com/intel-analytics/ipex-llm/issues)
|
||||
|
|
|
|||
|
|
@ -329,6 +329,7 @@ See the demo of running [*Text-Generation-WebUI*](https://ipex-llm.readthedocs.i
|
|||
| MiniCPM-V-2 | | [link](python/llm/example/GPU/HuggingFace/Multimodal/MiniCPM-V-2) |
|
||||
| MiniCPM-Llama3-V-2_5 | | [link](python/llm/example/GPU/HuggingFace/Multimodal/MiniCPM-Llama3-V-2_5) |
|
||||
| MiniCPM-V-2_6 | | [link](python/llm/example/GPU/HuggingFace/Multimodal/MiniCPM-V-2_6) |
|
||||
| StableDiffusion | | [link](python/llm/example/GPU/HuggingFace/Multimodal/StableDiffusion) |
|
||||
|
||||
## 官方支持
|
||||
- 如果遇到问题,或者请求新功能支持,请提交 [Github Issue](https://github.com/intel-analytics/ipex-llm/issues) 告诉我们
|
||||
|
|
|
|||
|
|
@ -88,8 +88,19 @@ set SYCL_CACHE_PERSISTENT=1
|
|||
> For the first time that each model runs on Intel iGPU/Intel Arc™ A300-Series or Pro A60, it may take several minutes to compile.
|
||||
|
||||
### 4. Examples
|
||||
#### 4.1 Openjourney Example
|
||||
The example shows how to run Openjourney example on Intel GPU.
|
||||
```bash
|
||||
python ./openjourney.py
|
||||
```
|
||||
|
||||
#### 4.1 StableDiffusion XL Example
|
||||
Arguments info:
|
||||
- `--repo-id-or-model-path REPO_ID_OR_MODEL_PATH`: argument defining the huggingface repo id for the Openjourney model (e.g. `prompthero/openjourney`) to be downloaded, or the path to the huggingface checkpoint folder. It is default to be `'prompthero/openjourney'`.
|
||||
- `--prompt PROMPT`: argument defining the prompt to be infered. It is default to be `'An astronaut in the forest, detailed, 8k'`.
|
||||
- `--save-path`: argument defining the path to save the generated figure. It is default to be `openjourney-gpu.png`.
|
||||
- `--num-steps`: argument defining the number of inference steps. It is default to be `20`.
|
||||
|
||||
#### 4.2 StableDiffusion XL Example
|
||||
The example shows how to run StableDiffusion XL example on Intel GPU.
|
||||
```bash
|
||||
python ./sdxl.py
|
||||
|
|
@ -105,7 +116,7 @@ Arguments info:
|
|||
The sample output image looks like below.
|
||||

|
||||
|
||||
#### 4.2 LCM-LoRA Example
|
||||
#### 4.3 LCM-LoRA Example
|
||||
The example shows how to performing inference with LCM-LoRA on Intel GPU.
|
||||
```bash
|
||||
python ./lora-lcm.py
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ import torch
|
|||
from diffusers import DiffusionPipeline, LCMScheduler
|
||||
import ipex_llm
|
||||
import argparse
|
||||
import time
|
||||
|
||||
|
||||
def main(args):
|
||||
|
|
@ -34,10 +35,21 @@ def main(args):
|
|||
pipe.load_lora_weights(args.lora_weights_path)
|
||||
|
||||
generator = torch.manual_seed(42)
|
||||
image = pipe(
|
||||
prompt=args.prompt, num_inference_steps=args.num_steps, generator=generator, guidance_scale=1.0
|
||||
).images[0]
|
||||
image.save(args.save_path)
|
||||
|
||||
with torch.inference_mode():
|
||||
# warmup
|
||||
image = pipe(
|
||||
prompt=args.prompt, num_inference_steps=args.num_steps, generator=generator, guidance_scale=1.0
|
||||
).images[0]
|
||||
|
||||
# start inference
|
||||
st = time.time()
|
||||
image = pipe(
|
||||
prompt=args.prompt, num_inference_steps=args.num_steps, generator=generator, guidance_scale=1.0
|
||||
).images[0]
|
||||
end = time.time()
|
||||
print(f'Inference time: {end-st} s')
|
||||
image.save(args.save_path)
|
||||
|
||||
if __name__=="__main__":
|
||||
parser = argparse.ArgumentParser(description="Stable Diffusion lora-lcm")
|
||||
|
|
|
|||
|
|
@ -0,0 +1,54 @@
|
|||
#
|
||||
# Copyright 2016 The BigDL Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Code is adapted from https://huggingface.co/prompthero/openjourney
|
||||
|
||||
from diffusers import StableDiffusionPipeline
|
||||
import torch
|
||||
import ipex_llm
|
||||
import argparse
|
||||
import time
|
||||
|
||||
|
||||
def main(args):
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
args.repo_id_or_model_path,
|
||||
torch_dtype=torch.float16,
|
||||
use_safetensors=True)
|
||||
pipe = pipe.to("xpu")
|
||||
|
||||
with torch.inference_mode():
|
||||
# warmup
|
||||
image = pipe(args.prompt, num_inference_steps=args.num_steps).images[0]
|
||||
|
||||
# start inference
|
||||
st = time.time()
|
||||
image = pipe(args.prompt, num_inference_steps=args.num_steps).images[0]
|
||||
end = time.time()
|
||||
print(f'Inference time: {end-st} s')
|
||||
image.save(args.save_path)
|
||||
|
||||
if __name__=="__main__":
|
||||
parser = argparse.ArgumentParser(description="Stable Diffusion")
|
||||
parser.add_argument('--repo-id-or-model-path', type=str, default="prompthero/openjourney",
|
||||
help='The huggingface repo id for the stable diffusion model checkpoint')
|
||||
parser.add_argument('--prompt', type=str, default="An astronaut in the forest, detailed, 8k",
|
||||
help='Prompt to infer')
|
||||
parser.add_argument('--save-path',type=str,default="openjourney-gpu.png",
|
||||
help="Path to save the generated figure")
|
||||
parser.add_argument('--num-steps',type=int,default=20,
|
||||
help="Number of inference steps")
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
|
|
@ -21,6 +21,7 @@ import ipex_llm
|
|||
import numpy as np
|
||||
from PIL import Image
|
||||
import argparse
|
||||
import time
|
||||
|
||||
|
||||
def main(args):
|
||||
|
|
@ -30,8 +31,16 @@ def main(args):
|
|||
use_safetensors=True
|
||||
).to("xpu")
|
||||
|
||||
image = pipeline_text2image(prompt=args.prompt,num_inference_steps=args.num_steps).images[0]
|
||||
image.save(args.save_path)
|
||||
with torch.inference_mode():
|
||||
# warmup
|
||||
image = pipeline_text2image(prompt=args.prompt,num_inference_steps=args.num_steps).images[0]
|
||||
|
||||
# start inference
|
||||
st = time.time()
|
||||
image = pipeline_text2image(prompt=args.prompt,num_inference_steps=args.num_steps).images[0]
|
||||
end = time.time()
|
||||
print(f'Inference time: {end-st} s')
|
||||
image.save(args.save_path)
|
||||
|
||||
if __name__=="__main__":
|
||||
parser = argparse.ArgumentParser(description="Stable Diffusion")
|
||||
|
|
|
|||
Loading…
Reference in a new issue