From 48b85593b3703a0ff537fdab7623f54cf7dbefb6 Mon Sep 17 00:00:00 2001 From: Yuwen Hu <54161268+Oscilloscope98@users.noreply.github.com> Date: Thu, 7 Dec 2023 10:32:09 +0800 Subject: [PATCH] Update all-in-one benchmark readme (#9618) --- python/llm/dev/benchmark/all-in-one/README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/python/llm/dev/benchmark/all-in-one/README.md b/python/llm/dev/benchmark/all-in-one/README.md index 06d0b5c3..35734ad4 100644 --- a/python/llm/dev/benchmark/all-in-one/README.md +++ b/python/llm/dev/benchmark/all-in-one/README.md @@ -21,10 +21,12 @@ repo_id: - 'THUDM/chatglm-6b' - 'THUDM/chatglm2-6b' - 'meta-llama/Llama-2-7b-chat-hf' + # - 'liuhaotian/llava-v1.5-7b' # requires a LLAVA_REPO_DIR env variables pointing to the llava dir; added only for gpu win related test_api now local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 num_beams: 1 # default to greedy search +low_bit: 'sym_int4' # default to use 'sym_int4' (i.e. symmetric int4) in_out_pairs: - '32-32' - '1024-128' @@ -36,6 +38,9 @@ test_api: # - "ipex_fp16_gpu" # on Intel GPU # - "transformer_int4_gpu" # on Intel GPU # - "optimize_model_gpu" # on Intel GPU + # - "deepspeed_transformer_int4_cpu" # on Intel SPR Server + # - "transformer_int4_gpu_win" # on Intel GPU for Windows (catch GPU peak memory) +cpu_embedding: False # whether put embedding to CPU (only avaiable now for gpu win related test_api) ``` ## Run