From f07937945f8eb8e56e4f310ce83382137642aa94 Mon Sep 17 00:00:00 2001 From: Jun Wang Date: Thu, 4 Jul 2024 16:38:34 +0800 Subject: [PATCH] [REMOVE] remove all useless repo-id in benchmark/igpu-perf (#11508) --- python/llm/test/benchmark/igpu-perf/1024-128.yaml | 11 ----------- python/llm/test/benchmark/igpu-perf/1024-128_437.yaml | 2 -- .../test/benchmark/igpu-perf/1024-128_int4_fp16.yaml | 9 --------- .../benchmark/igpu-perf/1024-128_int4_fp16_437.yaml | 1 - .../test/benchmark/igpu-perf/1024-128_loadlowbit.yaml | 11 ----------- .../benchmark/igpu-perf/1024-128_loadlowbit_437.yaml | 2 -- python/llm/test/benchmark/igpu-perf/2048-256.yaml | 11 ----------- python/llm/test/benchmark/igpu-perf/2048-256_437.yaml | 2 -- python/llm/test/benchmark/igpu-perf/32-32.yaml | 11 ----------- python/llm/test/benchmark/igpu-perf/32-32_437.yaml | 2 -- 10 files changed, 62 deletions(-) diff --git a/python/llm/test/benchmark/igpu-perf/1024-128.yaml b/python/llm/test/benchmark/igpu-perf/1024-128.yaml index 21757587..024a16d4 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128.yaml @@ -1,23 +1,12 @@ repo_id: - 'THUDM/chatglm3-6b' - - 'THUDM/chatglm2-6b' - 'THUDM/glm-4-9b-chat' - 'baichuan-inc/Baichuan2-7B-Chat' - 'baichuan-inc/Baichuan2-13B-Chat' - - 'internlm/internlm-chat-7b' - - 'Qwen/Qwen-7B-Chat' - - 'BAAI/AquilaChat2-7B' # - '01-ai/Yi-6B' - 'meta-llama/Llama-2-7b-chat-hf' - 'meta-llama/Llama-2-13b-chat-hf' - - 'WisdomShell/CodeShell-7B-Chat' - - 'tiiuae/falcon-7b-instruct-with-patch' - - 'mosaicml/mpt-7b-chat' # - 'liuhaotian/llava-v1.5-7b' # Cannot load using AutoModelForCausalLM in 4.36+ - - 'RWKV/rwkv-4-world-7b' - - 'RWKV/rwkv-5-world-7b' - - 'IEITYuan/Yuan2-2B-hf' - - 'mistralai/Mistral-7B-Instruct-v0.1' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml index 16189637..2257fd1f 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_437.yaml @@ -2,8 +2,6 @@ repo_id: - 'Qwen/Qwen1.5-7B-Chat' - 'Qwen/Qwen2-7B-Instruct' - 'meta-llama/Meta-Llama-3-8B-Instruct' - - '01-ai/Yi-6B-Chat' - - 'microsoft/phi-2' - 'microsoft/Phi-3-mini-4k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml index a073c5cb..527cefc9 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16.yaml @@ -1,22 +1,13 @@ repo_id: - 'THUDM/chatglm3-6b' - - 'THUDM/chatglm2-6b' - 'baichuan-inc/Baichuan2-7B-Chat' - 'baichuan-inc/Baichuan2-13B-Chat' - - 'internlm/internlm-chat-7b' - - 'Qwen/Qwen-7B-Chat' - - 'BAAI/AquilaChat2-7B' # - '01-ai/Yi-6B' - 'meta-llama/Llama-2-7b-chat-hf' - 'meta-llama/Llama-2-13b-chat-hf' - - 'WisdomShell/CodeShell-7B-Chat' - - 'tiiuae/falcon-7b-instruct-with-patch' - - 'mosaicml/mpt-7b-chat' # - 'liuhaotian/llava-v1.5-7b' # Cannot load using AutoModelForCausalLM in 4.36+ # - 'RWKV/rwkv-4-world-7b' # - 'RWKV/rwkv-5-world-7b' - - 'IEITYuan/Yuan2-2B-hf' - - 'mistralai/Mistral-7B-Instruct-v0.1' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml index 5051de78..b31716ba 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_int4_fp16_437.yaml @@ -1,7 +1,6 @@ repo_id: - 'Qwen/Qwen1.5-7B-Chat' - 'meta-llama/Meta-Llama-3-8B-Instruct' - - '01-ai/Yi-6B-Chat' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_loadlowbit.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_loadlowbit.yaml index 12d5fa7e..ffbaaa20 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_loadlowbit.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_loadlowbit.yaml @@ -1,23 +1,12 @@ repo_id: - 'THUDM/chatglm3-6b' - - 'THUDM/chatglm2-6b' - 'THUDM/glm-4-9b-chat' - 'baichuan-inc/Baichuan2-7B-Chat' - 'baichuan-inc/Baichuan2-13B-Chat' - - 'internlm/internlm-chat-7b' - - 'Qwen/Qwen-7B-Chat' - - 'BAAI/AquilaChat2-7B' # - '01-ai/Yi-6B' - 'meta-llama/Llama-2-7b-chat-hf' - 'meta-llama/Llama-2-13b-chat-hf' - - 'WisdomShell/CodeShell-7B-Chat' - - 'tiiuae/falcon-7b-instruct-with-patch' - - 'mosaicml/mpt-7b-chat' # - 'liuhaotian/llava-v1.5-7b' # Cannot load using AutoModelForCausalLM in 4.36+ - - 'RWKV/rwkv-4-world-7b' - - 'RWKV/rwkv-5-world-7b' - - 'IEITYuan/Yuan2-2B-hf' - - 'mistralai/Mistral-7B-Instruct-v0.1' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/1024-128_loadlowbit_437.yaml b/python/llm/test/benchmark/igpu-perf/1024-128_loadlowbit_437.yaml index b9a29ddd..9286f84d 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128_loadlowbit_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128_loadlowbit_437.yaml @@ -2,8 +2,6 @@ repo_id: - 'Qwen/Qwen1.5-7B-Chat' - 'Qwen/Qwen2-7B-Instruct' - 'meta-llama/Meta-Llama-3-8B-Instruct' - - '01-ai/Yi-6B-Chat' - - 'microsoft/phi-2' - 'microsoft/Phi-3-mini-4k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/2048-256.yaml b/python/llm/test/benchmark/igpu-perf/2048-256.yaml index b995c294..aaf1d326 100644 --- a/python/llm/test/benchmark/igpu-perf/2048-256.yaml +++ b/python/llm/test/benchmark/igpu-perf/2048-256.yaml @@ -1,23 +1,12 @@ repo_id: - 'THUDM/chatglm3-6b' - - 'THUDM/chatglm2-6b' - 'THUDM/glm-4-9b-chat' - 'baichuan-inc/Baichuan2-7B-Chat' - 'baichuan-inc/Baichuan2-13B-Chat' - - 'internlm/internlm-chat-7b' - - 'Qwen/Qwen-7B-Chat' - - 'BAAI/AquilaChat2-7B' # - '01-ai/Yi-6B' - 'meta-llama/Llama-2-7b-chat-hf' - 'meta-llama/Llama-2-13b-chat-hf' - - 'WisdomShell/CodeShell-7B-Chat' - - 'tiiuae/falcon-7b-instruct-with-patch' - - 'mosaicml/mpt-7b-chat' # - 'liuhaotian/llava-v1.5-7b' # Cannot load using AutoModelForCausalLM in 4.36+ - - 'RWKV/rwkv-4-world-7b' - - 'RWKV/rwkv-5-world-7b' - - 'IEITYuan/Yuan2-2B-hf' - - 'mistralai/Mistral-7B-Instruct-v0.1' local_model_hub: 'path to your local model hub' warm_up: 1 num_trials: 3 diff --git a/python/llm/test/benchmark/igpu-perf/2048-256_437.yaml b/python/llm/test/benchmark/igpu-perf/2048-256_437.yaml index c79f6616..264d6015 100644 --- a/python/llm/test/benchmark/igpu-perf/2048-256_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/2048-256_437.yaml @@ -2,8 +2,6 @@ repo_id: - 'Qwen/Qwen1.5-7B-Chat' - 'Qwen/Qwen2-7B-Instruct' - 'meta-llama/Meta-Llama-3-8B-Instruct' - - '01-ai/Yi-6B-Chat' - - 'microsoft/phi-2' - 'microsoft/Phi-3-mini-4k-instruct' local_model_hub: 'path to your local model hub' warm_up: 1 diff --git a/python/llm/test/benchmark/igpu-perf/32-32.yaml b/python/llm/test/benchmark/igpu-perf/32-32.yaml index 9ac123ec..f91210f6 100644 --- a/python/llm/test/benchmark/igpu-perf/32-32.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-32.yaml @@ -1,23 +1,12 @@ repo_id: - 'THUDM/chatglm3-6b' - - 'THUDM/chatglm2-6b' - 'THUDM/glm-4-9b-chat' - 'baichuan-inc/Baichuan2-7B-Chat' - 'baichuan-inc/Baichuan2-13B-Chat' - - 'internlm/internlm-chat-7b' - - 'Qwen/Qwen-7B-Chat' - - 'BAAI/AquilaChat2-7B' # - '01-ai/Yi-6B' - 'meta-llama/Llama-2-7b-chat-hf' - 'meta-llama/Llama-2-13b-chat-hf' - - 'WisdomShell/CodeShell-7B-Chat' - - 'tiiuae/falcon-7b-instruct-with-patch' - - 'mosaicml/mpt-7b-chat' # - 'liuhaotian/llava-v1.5-7b' # Cannot load using AutoModelForCausalLM in 4.36+ - - 'RWKV/rwkv-4-world-7b' - - 'RWKV/rwkv-5-world-7b' - - 'IEITYuan/Yuan2-2B-hf' - - 'mistralai/Mistral-7B-Instruct-v0.1' local_model_hub: 'path to your local model hub' warm_up: 3 num_trials: 5 diff --git a/python/llm/test/benchmark/igpu-perf/32-32_437.yaml b/python/llm/test/benchmark/igpu-perf/32-32_437.yaml index 13e70940..9548c4a6 100644 --- a/python/llm/test/benchmark/igpu-perf/32-32_437.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-32_437.yaml @@ -2,8 +2,6 @@ repo_id: - 'Qwen/Qwen1.5-7B-Chat' - 'Qwen/Qwen2-7B-Instruct' - 'meta-llama/Meta-Llama-3-8B-Instruct' - - '01-ai/Yi-6B-Chat' - - 'microsoft/phi-2' - 'microsoft/Phi-3-mini-4k-instruct' local_model_hub: 'path to your local model hub' warm_up: 3