From 1da1f1dd0edf95bca0ae393b5c2f23a73e6fba9a Mon Sep 17 00:00:00 2001 From: RyuKosei <70006706+RyuKosei@users.noreply.github.com> Date: Mon, 29 Jul 2024 00:56:16 -0700 Subject: [PATCH] Combine two versions of run_wikitext.py (#11597) * Combine two versions of run_wikitext.py * Update run_wikitext.py * Update run_wikitext.py * aligned the format * update error display * simplified argument parser --------- Co-authored-by: jenniew --- .../dev/benchmark/perplexity/run_wikitext.py | 31 +++++++++++++++---- 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/python/llm/dev/benchmark/perplexity/run_wikitext.py b/python/llm/dev/benchmark/perplexity/run_wikitext.py index 531ffff5..190d5114 100644 --- a/python/llm/dev/benchmark/perplexity/run_wikitext.py +++ b/python/llm/dev/benchmark/perplexity/run_wikitext.py @@ -20,16 +20,20 @@ import argparse import torch from tqdm import tqdm +from datasets import concatenate_datasets, load_dataset +from ipex_llm.utils.common import invalidInputError parser = argparse.ArgumentParser() parser.add_argument("--model_path", required=True, type=str) -parser.add_argument("--data_path", type=str, default='wikitext-2-raw-v1/wikitext-2-raw/wiki.test.raw') +parser.add_argument("--dataset", type=str, default=None) +parser.add_argument("--data_path", type=str, default=None) parser.add_argument("--chunk_size", type=int, default=512) parser.add_argument("--stride", type=int, default=0) parser.add_argument("--device", type=str, default="xpu") parser.add_argument("--precision", type=str, default="sym_int4") parser.add_argument("--use-cache", action="store_true") +parser.add_argument("--max_length", type=int, default=None) args = parser.parse_args() if args.precision == "fp16": # ipex fp16 @@ -46,14 +50,29 @@ else: # ipex-llm model = model.to(args.device) model = model.eval() -with open(args.data_path, "rb") as f: - data = f.read() - from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(args.model_path, trust_remote_code=True) -encodings = tokenizer(data.decode("utf-8").strip("\n"), return_tensors="pt") -max_length = model.config.max_position_embeddings +if args.dataset: + def parse_kwargs(kwstr): + kvpair = [item.split('=') for item in kwstr.split(',') if item != ""] + return {k:v for k, v in kvpair} + test = load_dataset(**parse_kwargs(args.dataset), split="test")["text"] + encodings = tokenizer("\n\n".join(test), return_tensors="pt") +elif args.data_path: + with open(args.data_path, "rb") as f: + data = f.read() + encodings = tokenizer(data.decode("utf-8").strip("\n"), return_tensors="pt") +else: + raise invalidInputError(False, "Must specify either dataset or datapath.") + +if not args.max_length: + try: + max_length = model.config.max_position_embeddings + except: + max_length = model.config.seq_length # max_length in config of chatglm is 'seq_length' +else: + max_length = args.max_length stride = args.chunk_size if args.stride <= 0 else args.stride seq_len = encodings.input_ids.size(1) num_chunks = seq_len // stride