From 0c498a7b6429a6003232c67bf1e2280ae9ef067d Mon Sep 17 00:00:00 2001 From: Yuwen Hu <54161268+Oscilloscope98@users.noreply.github.com> Date: Wed, 17 Jan 2024 14:58:45 +0800 Subject: [PATCH] Add llama2-13b to igpu perf test (#9920) --- python/llm/test/benchmark/igpu-perf/1024-128.yaml | 1 + python/llm/test/benchmark/igpu-perf/2048-256.yaml | 1 + python/llm/test/benchmark/igpu-perf/32-32.yaml | 1 + python/llm/test/benchmark/igpu-perf/32-512.yaml | 1 + 4 files changed, 4 insertions(+) diff --git a/python/llm/test/benchmark/igpu-perf/1024-128.yaml b/python/llm/test/benchmark/igpu-perf/1024-128.yaml index 841cc6bc..55b3bb8e 100644 --- a/python/llm/test/benchmark/igpu-perf/1024-128.yaml +++ b/python/llm/test/benchmark/igpu-perf/1024-128.yaml @@ -7,6 +7,7 @@ repo_id: - 'BAAI/AquilaChat2-7B' - '01-ai/Yi-6B' - 'meta-llama/Llama-2-7b-chat-hf' + - 'meta-llama/Llama-2-13b-chat-hf' - 'WisdomShell/CodeShell-7B-Chat' - 'tiiuae/falcon-7b-instruct-with-patch' - 'mosaicml/mpt-7b-chat' diff --git a/python/llm/test/benchmark/igpu-perf/2048-256.yaml b/python/llm/test/benchmark/igpu-perf/2048-256.yaml index 43948212..790530d3 100644 --- a/python/llm/test/benchmark/igpu-perf/2048-256.yaml +++ b/python/llm/test/benchmark/igpu-perf/2048-256.yaml @@ -7,6 +7,7 @@ repo_id: - 'BAAI/AquilaChat2-7B' - '01-ai/Yi-6B' - 'meta-llama/Llama-2-7b-chat-hf' + - 'meta-llama/Llama-2-13b-chat-hf' - 'WisdomShell/CodeShell-7B-Chat' - 'tiiuae/falcon-7b-instruct-with-patch' - 'mosaicml/mpt-7b-chat' diff --git a/python/llm/test/benchmark/igpu-perf/32-32.yaml b/python/llm/test/benchmark/igpu-perf/32-32.yaml index 93a8a918..4a91fd43 100644 --- a/python/llm/test/benchmark/igpu-perf/32-32.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-32.yaml @@ -7,6 +7,7 @@ repo_id: - 'BAAI/AquilaChat2-7B' - '01-ai/Yi-6B' - 'meta-llama/Llama-2-7b-chat-hf' + - 'meta-llama/Llama-2-13b-chat-hf' - 'WisdomShell/CodeShell-7B-Chat' - 'tiiuae/falcon-7b-instruct-with-patch' - 'mosaicml/mpt-7b-chat' diff --git a/python/llm/test/benchmark/igpu-perf/32-512.yaml b/python/llm/test/benchmark/igpu-perf/32-512.yaml index 897b76a2..b18e1407 100644 --- a/python/llm/test/benchmark/igpu-perf/32-512.yaml +++ b/python/llm/test/benchmark/igpu-perf/32-512.yaml @@ -7,6 +7,7 @@ repo_id: - 'BAAI/AquilaChat2-7B' - '01-ai/Yi-6B' - 'meta-llama/Llama-2-7b-chat-hf' + - 'meta-llama/Llama-2-13b-chat-hf' - 'WisdomShell/CodeShell-7B-Chat' - 'tiiuae/falcon-7b-instruct-with-patch' - 'mosaicml/mpt-7b-chat'