[NPU GGUF] Add simple example (#12853)
This commit is contained in:
parent
348dc8056d
commit
8077850452
4 changed files with 310 additions and 5 deletions
|
|
@ -193,7 +193,8 @@ Refer to the following table for verified models:
|
|||
| LLaMA 3.2 | [meta-llama/Llama-3.2-3B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) | Meteor Lake, Lunar Lake, Arrow Lake |
|
||||
| DeepSeek-R1 | [deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B), [deepseek-ai/DeepSeek-R1-Distill-Qwen-7B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B) | Meteor Lake, Lunar Lake, Arrow Lake |
|
||||
|
||||
### Setup for running llama.cpp
|
||||
### Run GGUF model using CLI tool
|
||||
#### Setup for running llama.cpp
|
||||
|
||||
First, you should create a directory to use `llama.cpp`, for instance, use following command to create a `llama-cpp-npu` directory and enter it.
|
||||
|
||||
|
|
@ -208,11 +209,11 @@ Then, please run the following command with **administrator privilege in Minifor
|
|||
init-llama-cpp.bat
|
||||
```
|
||||
|
||||
### Model Download
|
||||
#### Model Download
|
||||
|
||||
Before running, you should download or copy community GGUF model to your current directory. For instance, `DeepSeek-R1-Distill-Qwen-7B-Q6_K.gguf` of [DeepSeek-R1-Distill-Qwen-7B-GGUF](https://huggingface.co/lmstudio-community/DeepSeek-R1-Distill-Qwen-7B-GGUF/tree/main).
|
||||
|
||||
### Run the quantized model
|
||||
#### Run the quantized model
|
||||
|
||||
Please refer to [Runtime Configurations](#runtime-configurations) before running the following command in Miniforge Prompt.
|
||||
|
||||
|
|
@ -220,11 +221,15 @@ Please refer to [Runtime Configurations](#runtime-configurations) before running
|
|||
llama-cli-npu.exe -m DeepSeek-R1-Distill-Qwen-7B-Q6_K.gguf -n 32 --prompt "What is AI?"
|
||||
```
|
||||
|
||||
And you could use `llama-cli-npu.exe -h` for more details about meaning of each parameter.
|
||||
|
||||
### Run GGUF model using llama.cpp C++ API
|
||||
|
||||
IPEX-LLM also supports `llama.cpp` C++ API for running GGUF models on Intel NPU. Refer to [Simple Example](../../../python/llm/example/NPU/llama.cpp/) for usage in details.
|
||||
|
||||
> **Note**:
|
||||
>
|
||||
> - **Warmup on first run**: When running specific GGUF models on NPU for the first time, you might notice delays up to several minutes before the first token is generated. This delay occurs because the blob compilation.
|
||||
> - For more details about meaning of each parameter, you can use `llama-cli-npu.exe -h`.
|
||||
|
||||
|
||||
## Accuracy Tuning
|
||||
|
||||
|
|
|
|||
51
python/llm/example/NPU/llama.cpp/CMakeLists.txt
Normal file
51
python/llm/example/NPU/llama.cpp/CMakeLists.txt
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
cmake_minimum_required(VERSION 3.10)
|
||||
|
||||
project(LLM_NPU_EXAMPLE VERSION 1.0.0 LANGUAGES CXX)
|
||||
|
||||
set (CMAKE_CXX_STANDARD 17)
|
||||
SET (CMAKE_CXX_STANDARD_REQUIRED True)
|
||||
|
||||
if(DEFINED ENV{CONDA_ENV_DIR})
|
||||
set(ENV_DIR $ENV{CONDA_ENV_DIR})
|
||||
set(LIBRARY_DIR ${ENV_DIR}/bigdl-core-npu)
|
||||
include_directories(${LIBRARY_DIR}/include/npu)
|
||||
include_directories(${LIBRARY_DIR}/include/llamacpp)
|
||||
set(DLL_DIR ${ENV_DIR}/intel_npu_acceleration_library/lib/Release)
|
||||
else()
|
||||
set(LIBRARY_DIR ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include)
|
||||
endif()
|
||||
|
||||
add_library(npu_llm STATIC IMPORTED)
|
||||
set_target_properties(npu_llm PROPERTIES IMPORTED_LOCATION ${LIBRARY_DIR}/npu_llm.lib)
|
||||
|
||||
add_library(llama STATIC IMPORTED)
|
||||
set_target_properties(llama PROPERTIES IMPORTED_LOCATION ${LIBRARY_DIR}/llama.lib)
|
||||
|
||||
add_library(common STATIC IMPORTED)
|
||||
set_target_properties(common PROPERTIES IMPORTED_LOCATION ${LIBRARY_DIR}/common.lib)
|
||||
|
||||
add_library(ggml STATIC IMPORTED)
|
||||
set_target_properties(ggml PROPERTIES IMPORTED_LOCATION ${LIBRARY_DIR}/ggml.lib)
|
||||
|
||||
set(TARGET simple)
|
||||
add_executable(${TARGET} simple.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE npu_llm common llama ggml ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_17)
|
||||
|
||||
add_custom_command(TARGET simple POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_if_different
|
||||
${LIBRARY_DIR}/npu_llm.dll
|
||||
${LIBRARY_DIR}/llama.dll
|
||||
${LIBRARY_DIR}/ggml.dll
|
||||
${CMAKE_BINARY_DIR}/Release/
|
||||
COMMENT "Copying npu_llm.dll llama.dll ggml.dll to build/Release\n"
|
||||
)
|
||||
|
||||
add_custom_command(TARGET simple POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory
|
||||
${DLL_DIR}/
|
||||
${CMAKE_BINARY_DIR}/Release/
|
||||
COMMENT "Copying dependency to build/Release\n"
|
||||
)
|
||||
63
python/llm/example/NPU/llama.cpp/README.md
Normal file
63
python/llm/example/NPU/llama.cpp/README.md
Normal file
|
|
@ -0,0 +1,63 @@
|
|||
# (Experimental) Example of running GGUF model using llama.cpp C++ API on NPU
|
||||
In this directory, you will find a simple C++ example on how to run GGUF models on Intel NPUs using `llama.cpp` C++ API. See the table blow for verified models.
|
||||
|
||||
## Verified Models
|
||||
|
||||
| Model | Model link |
|
||||
|:--|:--|
|
||||
| LLaMA 3.2 | [meta-llama/Llama-3.2-3B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) |
|
||||
| DeepSeek-R1 | [deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B), [deepseek-ai/DeepSeek-R1-Distill-Qwen-7B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B) |
|
||||
|
||||
Please refer to [Quickstart](../../../../../docs/mddocs/Quickstart/npu_quickstart.md#experimental-llamacpp-support) for details about verified platforms.
|
||||
|
||||
## 0. Prerequisites
|
||||
For `ipex-llm` NPU support, please refer to [Quickstart](../../../../../docs/mddocs/Quickstart/npu_quickstart.md#install-prerequisites) for details about the required preparations.
|
||||
|
||||
## 1. Install & Runtime Configurations
|
||||
### 1.1 Installation on Windows
|
||||
We suggest using conda to manage environment:
|
||||
```cmd
|
||||
conda create -n llm python=3.11
|
||||
conda activate llm
|
||||
|
||||
:: for building the example
|
||||
pip install cmake
|
||||
|
||||
:: install ipex-llm with 'npu' option
|
||||
pip install --pre --upgrade ipex-llm[npu]
|
||||
```
|
||||
|
||||
Please refer to [Quickstart](../../../../../docs/mddocs/Quickstart/npu_quickstart.md#install-prerequisites) for more details about `ipex-llm` installation on Intel NPU.
|
||||
|
||||
### 1.2 Runtime Configurations
|
||||
Please refer to [Quickstart](../../../../../docs/mddocs/Quickstart/npu_quickstart.md#runtime-configurations) for environment variables setting based on your device.
|
||||
|
||||
## 2. Build C++ Example `simple`
|
||||
|
||||
- You can run below cmake script in cmd to build `simple` by yourself, don't forget to replace below <CONDA_ENV_DIR> with your own path.
|
||||
|
||||
```cmd
|
||||
:: under current directory
|
||||
:: please replace below conda env dir with your own path
|
||||
set CONDA_ENV_DIR=C:\Users\arda\miniforge3\envs\llm\Lib\site-packages
|
||||
mkdir build
|
||||
cd build
|
||||
cmake ..
|
||||
cmake --build . --config Release -j
|
||||
cd Release
|
||||
```
|
||||
|
||||
- You can also directly use our released `simple.exe` which has the same usage as this example `simple.cpp`
|
||||
|
||||
## 3. Run `simple`
|
||||
|
||||
With built `simple`, you can run the GGUF model
|
||||
|
||||
```cmd
|
||||
# Run simple text completion
|
||||
simple.exe -m <gguf_model_path> -n 64 -p "Once upon a time,"
|
||||
```
|
||||
|
||||
> **Note**:
|
||||
>
|
||||
> **Warmup on first run**: When running specific GGUF models on NPU for the first time, you might notice delays up to several minutes before the first token is generated. This delay occurs because the blob compilation.
|
||||
186
python/llm/example/NPU/llama.cpp/simple.cpp
Normal file
186
python/llm/example/NPU/llama.cpp/simple.cpp
Normal file
|
|
@ -0,0 +1,186 @@
|
|||
//
|
||||
// Copyright 2016 The BigDL Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// This file is copied from https://github.com/ggerganov/llama.cpp/blob/3f1ae2e32cde00c39b96be6d01c2997c29bae555/examples/simple/simple.cpp
|
||||
|
||||
#include "arg.h"
|
||||
#include "common.h"
|
||||
#include "log.h"
|
||||
#include "llama.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
static void print_usage(int, char ** argv) {
|
||||
LOG("\nexample usage:\n");
|
||||
LOG("\n %s -m model.gguf -p \"Hello my name is\" -n 32\n", argv[0]);
|
||||
LOG("\n");
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
|
||||
params.prompt = "Hello my name is";
|
||||
params.n_predict = 32;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON, print_usage)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
gpt_init();
|
||||
|
||||
// total length of the sequence including the prompt
|
||||
const int n_predict = params.n_predict;
|
||||
|
||||
// init LLM
|
||||
|
||||
llama_backend_init();
|
||||
llama_numa_init(params.numa);
|
||||
|
||||
// initialize the model
|
||||
|
||||
llama_model_params model_params = llama_model_params_from_gpt_params(params);
|
||||
|
||||
llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
|
||||
|
||||
if (model == NULL) {
|
||||
fprintf(stderr , "%s: error: unable to load model\n" , __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// initialize the context
|
||||
|
||||
llama_context_params ctx_params = llama_context_params_from_gpt_params(params);
|
||||
|
||||
llama_context * ctx = llama_new_context_with_model(model, ctx_params);
|
||||
|
||||
if (ctx == NULL) {
|
||||
fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
auto sparams = llama_sampler_chain_default_params();
|
||||
|
||||
sparams.no_perf = false;
|
||||
|
||||
llama_sampler * smpl = llama_sampler_chain_init(sparams);
|
||||
|
||||
llama_sampler_chain_add(smpl, llama_sampler_init_greedy());
|
||||
|
||||
// tokenize the prompt
|
||||
|
||||
std::vector<llama_token> tokens_list;
|
||||
tokens_list = ::llama_tokenize(ctx, params.prompt, true);
|
||||
|
||||
const int n_ctx = llama_n_ctx(ctx);
|
||||
const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size());
|
||||
|
||||
LOG("\n");
|
||||
LOG_INF("%s: n_predict = %d, n_ctx = %d, n_kv_req = %d\n", __func__, n_predict, n_ctx, n_kv_req);
|
||||
|
||||
// make sure the KV cache is big enough to hold all the prompt and generated tokens
|
||||
if (n_kv_req > n_ctx) {
|
||||
LOG_ERR("%s: error: n_kv_req > n_ctx, the required KV cache size is not big enough\n", __func__);
|
||||
LOG_ERR("%s: either reduce n_predict or increase n_ctx\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// print the prompt token-by-token
|
||||
|
||||
LOG("\n");
|
||||
|
||||
for (auto id : tokens_list) {
|
||||
LOG("%s", llama_token_to_piece(ctx, id).c_str());
|
||||
}
|
||||
|
||||
// create a llama_batch with size 512
|
||||
// we use this object to submit token data for decoding
|
||||
|
||||
llama_batch batch = llama_batch_init(512, 0, 1);
|
||||
|
||||
// evaluate the initial prompt
|
||||
for (size_t i = 0; i < tokens_list.size(); i++) {
|
||||
llama_batch_add(batch, tokens_list[i], i, { 0 }, false);
|
||||
}
|
||||
|
||||
// llama_decode will output logits only for the last token of the prompt
|
||||
batch.logits[batch.n_tokens - 1] = true;
|
||||
|
||||
if (llama_decode(ctx, batch) != 0) {
|
||||
LOG("%s: llama_decode() failed\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// main loop
|
||||
|
||||
int n_cur = batch.n_tokens;
|
||||
int n_decode = 0;
|
||||
|
||||
const auto t_main_start = ggml_time_us();
|
||||
|
||||
while (n_cur <= n_predict) {
|
||||
// sample the next token
|
||||
{
|
||||
const llama_token new_token_id = llama_sampler_sample(smpl, ctx, -1);
|
||||
|
||||
// is it an end of generation?
|
||||
if (llama_token_is_eog(model, new_token_id) || n_cur == n_predict) {
|
||||
LOG("\n");
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
LOG("%s", llama_token_to_piece(ctx, new_token_id).c_str());
|
||||
fflush(stdout);
|
||||
|
||||
// prepare the next batch
|
||||
llama_batch_clear(batch);
|
||||
|
||||
// push this new token for next evaluation
|
||||
llama_batch_add(batch, new_token_id, n_cur, { 0 }, true);
|
||||
|
||||
n_decode += 1;
|
||||
}
|
||||
|
||||
n_cur += 1;
|
||||
|
||||
// evaluate the current batch with the transformer model
|
||||
if (llama_decode(ctx, batch)) {
|
||||
LOG_ERR("%s : failed to eval, return code %d\n", __func__, 1);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
LOG("\n");
|
||||
|
||||
const auto t_main_end = ggml_time_us();
|
||||
|
||||
LOG_INF("%s: decoded %d tokens in %.2f s, speed: %.2f t/s\n",
|
||||
__func__, n_decode, (t_main_end - t_main_start) / 1000000.0f, n_decode / ((t_main_end - t_main_start) / 1000000.0f));
|
||||
|
||||
LOG("\n");
|
||||
llama_perf_sampler_print(smpl);
|
||||
llama_perf_context_print(ctx);
|
||||
|
||||
LOG("\n");
|
||||
|
||||
llama_batch_free(batch);
|
||||
llama_sampler_free(smpl);
|
||||
llama_free(ctx);
|
||||
llama_free_model(model);
|
||||
|
||||
llama_backend_free();
|
||||
|
||||
return 0;
|
||||
}
|
||||
Loading…
Reference in a new issue