ipex-llm/python/llm/test/benchmark/igpu-perf
2024-06-13 16:16:35 +08:00
..
32-32.yaml add glm4 and qwen2 to igpu perf (#11304) 2024-06-13 16:16:35 +08:00
32-32_437.yaml add glm4 and qwen2 to igpu perf (#11304) 2024-06-13 16:16:35 +08:00
1024-128.yaml add glm4 and qwen2 to igpu perf (#11304) 2024-06-13 16:16:35 +08:00
1024-128_437.yaml add glm4 and qwen2 to igpu perf (#11304) 2024-06-13 16:16:35 +08:00
1024-128_int4_fp16.yaml Update tests for transformers 4.36 (#10858) 2024-05-24 10:26:38 +08:00
1024-128_int4_fp16_437.yaml Add Meta-llama-3-8B-Instruct and Yi-6B-Chat to igpu nightly perf (#10810) 2024-04-19 15:09:58 +08:00
1024-128_loadlowbit.yaml add glm4 and qwen2 to igpu perf (#11304) 2024-06-13 16:16:35 +08:00
1024-128_loadlowbit_437.yaml add glm4 and qwen2 to igpu perf (#11304) 2024-06-13 16:16:35 +08:00
2048-256.yaml add glm4 and qwen2 to igpu perf (#11304) 2024-06-13 16:16:35 +08:00
2048-256_437.yaml add glm4 and qwen2 to igpu perf (#11304) 2024-06-13 16:16:35 +08:00