Add html report of ppl (#10218)
* remove include and language option, select the corresponding dataset based on the model name in Run * change the nightly test time * change the nightly test time of harness and ppl * save the ppl result to json file * generate csv file and print table result * generate html * modify the way to get parent folder * update html in parent folder * add llm-ppl-summary and llm-ppl-summary-html * modify echo single result * remove download fp16.csv * change model name of PR * move ppl nightly related files to llm/test folder * reformat * seperate make_table from make_table_and_csv.py * separate make_csv from make_table_and_csv.py * update llm-ppl-html * remove comment * add Download fp16.results
This commit is contained in:
parent
6d60982746
commit
cba61a2909
8 changed files with 588 additions and 3 deletions
104
.github/workflows/llm-ppl-evaluation.yml
vendored
104
.github/workflows/llm-ppl-evaluation.yml
vendored
|
|
@ -174,4 +174,106 @@ jobs:
|
||||||
--precisions ${{ matrix.precision }} \
|
--precisions ${{ matrix.precision }} \
|
||||||
--device ${{ matrix.device }} \
|
--device ${{ matrix.device }} \
|
||||||
--dataset_path ${DATASET_DIR} \
|
--dataset_path ${DATASET_DIR} \
|
||||||
--language ${LANGUAGE}
|
--language ${LANGUAGE} \
|
||||||
|
--output_path results
|
||||||
|
|
||||||
|
- uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: ppl_results
|
||||||
|
path:
|
||||||
|
${{ github.workspace }}/python/llm/dev/benchmark/perplexity/results/**
|
||||||
|
|
||||||
|
- name: echo single result
|
||||||
|
shell: bash
|
||||||
|
|
||||||
|
working-directory: ${{ github.workspace }}/python/llm/dev/benchmark/perplexity/results/
|
||||||
|
run: |
|
||||||
|
if [[ "${{ matrix.model_name }}" == *"chatglm"* || "${{ matrix.model_name }}" == *"Baichuan"* ]]; then
|
||||||
|
LANGUAGE="zh"
|
||||||
|
else
|
||||||
|
LANGUAGE="en"
|
||||||
|
fi
|
||||||
|
cat ${{ matrix.model_name }}/${{ matrix.device }}/${{ matrix.precision }}/${LANGUAGE}/result.json
|
||||||
|
|
||||||
|
llm-ppl-summary:
|
||||||
|
if: ${{ always() }}
|
||||||
|
needs: llm-ppl-evaluation
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
|
||||||
|
- name: Set up Python 3.9
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: 3.9
|
||||||
|
- name: Install dependencies
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
pip install --upgrade pip
|
||||||
|
pip install jsonlines pytablewriter regex
|
||||||
|
- name: Download all results
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: ppl_results
|
||||||
|
path: results
|
||||||
|
- name: Summarize the results
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
ls results
|
||||||
|
python ${{ github.workspace }}/python/llm/dev/benchmark/perplexity/make_table.py results
|
||||||
|
|
||||||
|
llm-ppl-html:
|
||||||
|
if: ${{github.event_name == 'schedule' || github.event_name == 'pull_request'}}
|
||||||
|
needs: [llm-ppl-evaluation]
|
||||||
|
runs-on: ["self-hosted", "llm", "accuracy1", "accuracy-nightly"]
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # actions/checkout@v3
|
||||||
|
- name: Set up Python 3.9
|
||||||
|
uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: 3.9
|
||||||
|
- name: Install dependencies
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
pip install --upgrade pip
|
||||||
|
pip install jsonlines pytablewriter regex
|
||||||
|
pip install pandas==1.5.3
|
||||||
|
|
||||||
|
- name: Set output path
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
echo "DATE=$(date +%Y-%m-%d)" >> $GITHUB_ENV
|
||||||
|
if ${{github.event_name == 'pull_request'}}; then
|
||||||
|
echo 'ACC_FOLDER=/home/arda/ppl-action-runners/pr-accuracy-data' >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
if ${{github.event_name == 'schedule'}}; then
|
||||||
|
echo 'ACC_FOLDER=/home/arda/ppl-action-runners/nightly-accuracy-data' >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Download ppl results
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: ppl_results
|
||||||
|
path: ${{ env.ACC_FOLDER}}/${{ env.DATE }}
|
||||||
|
|
||||||
|
# Save fp16.csv in the parent folder of env.nightly_folder
|
||||||
|
- name: Download fp16.results
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
wget https://raw.githubusercontent.com/intel-analytics/BigDL/main/python/llm/test/benchmark/perplexity/fp16.csv -O $ACC_FOLDER/../fp16.csv
|
||||||
|
ls $ACC_FOLDER/..
|
||||||
|
|
||||||
|
- name: Write to CSV
|
||||||
|
working-directory: ${{ github.workspace }}/python/llm/dev/benchmark/perplexity
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
ls $ACC_FOLDER/$DATE
|
||||||
|
python make_csv.py $ACC_FOLDER/$DATE $ACC_FOLDER
|
||||||
|
|
||||||
|
- name: Update HTML
|
||||||
|
working-directory: ${{ github.workspace }}/python/llm/test/benchmark/perplexity
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
python ppl_csv_to_html.py -f $ACC_FOLDER
|
||||||
|
if ${{github.event_name == 'schedule'}}; then
|
||||||
|
python update_html_in_parent_folder.py -f $ACC_FOLDER
|
||||||
|
fi
|
||||||
|
|
@ -18,3 +18,8 @@ python run.py --model_path meta-llama/Llama-2-7b-chat-hf --precisions float16 sy
|
||||||
```
|
```
|
||||||
- The `language` argument will only take effect if `datasets` is `None`. The choices for this argument are `en, zh, all`, which stands for all the English datasets, all the Chinese datasets and all the datasets respectively during testing.
|
- The `language` argument will only take effect if `datasets` is `None`. The choices for this argument are `en, zh, all`, which stands for all the English datasets, all the Chinese datasets and all the datasets respectively during testing.
|
||||||
- If you want to test perplexity on pre-downloaded datasets, please specify the `<path/to/dataset>` in the `dataset_path` argument in your command.
|
- If you want to test perplexity on pre-downloaded datasets, please specify the `<path/to/dataset>` in the `dataset_path` argument in your command.
|
||||||
|
|
||||||
|
## Summarize the results
|
||||||
|
"""python
|
||||||
|
python make_table.py <input_dir>
|
||||||
|
"""
|
||||||
98
python/llm/dev/benchmark/perplexity/make_csv.py
Normal file
98
python/llm/dev/benchmark/perplexity/make_csv.py
Normal file
|
|
@ -0,0 +1,98 @@
|
||||||
|
#
|
||||||
|
# Copyright 2016 The BigDL Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
"""
|
||||||
|
Usage:
|
||||||
|
python make_csv.py <input_dir> <output_dir>
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from pytablewriter import MarkdownTableWriter, LatexTableWriter
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
import csv
|
||||||
|
import datetime
|
||||||
|
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def make_csv(result_dict, output_path=None):
|
||||||
|
current_date = datetime.datetime.now().strftime("%Y-%m-%d")
|
||||||
|
file_name = f'results_{current_date}.csv'
|
||||||
|
full_path = os.path.join(output_path, file_name) if output_path else file_name
|
||||||
|
print('Writing to', full_path)
|
||||||
|
file_name = full_path
|
||||||
|
headers = ["Index", "Model", "Precision", "en", "zh"]
|
||||||
|
|
||||||
|
with open(file_name, mode='w', newline='') as csv_file:
|
||||||
|
writer = csv.writer(csv_file)
|
||||||
|
writer.writerow(headers)
|
||||||
|
index = 0
|
||||||
|
for model, model_results in result_dict.items():
|
||||||
|
for precision, prec_results in model_results.items():
|
||||||
|
row = [index, model, precision]
|
||||||
|
for language in headers[3:]:
|
||||||
|
task_results = prec_results.get(language.lower(), None)
|
||||||
|
if task_results is None:
|
||||||
|
row.append("")
|
||||||
|
else:
|
||||||
|
result = task_results["results"]
|
||||||
|
row.append("%.4f" % result)
|
||||||
|
writer.writerow(row)
|
||||||
|
index += 1
|
||||||
|
|
||||||
|
|
||||||
|
def merge_results(path):
|
||||||
|
# loop dirs and subdirs in results dir
|
||||||
|
# for each dir, load json files
|
||||||
|
print('Read from', path)
|
||||||
|
merged_results = dict()
|
||||||
|
for dirpath, dirnames, filenames in os.walk(path):
|
||||||
|
# skip dirs without files
|
||||||
|
if not filenames:
|
||||||
|
continue
|
||||||
|
for filename in sorted([f for f in filenames if f.endswith("result.json")]):
|
||||||
|
path = os.path.join(dirpath, filename)
|
||||||
|
model, device, precision, language = dirpath.split('/')[-4:]
|
||||||
|
with open(path, "r") as f:
|
||||||
|
result_dict = json.load(f)
|
||||||
|
if model not in merged_results:
|
||||||
|
merged_results[model] = dict()
|
||||||
|
if precision not in merged_results[model]:
|
||||||
|
merged_results[model][precision] = dict()
|
||||||
|
merged_results[model][precision][language] = result_dict
|
||||||
|
return merged_results
|
||||||
|
|
||||||
|
|
||||||
|
def main(*args):
|
||||||
|
assert len(args) > 2, \
|
||||||
|
"""Usage:
|
||||||
|
python make_csv.py <input_dir> <output_dir>
|
||||||
|
"""
|
||||||
|
|
||||||
|
input_path = args[1]
|
||||||
|
output_path = args[2]
|
||||||
|
|
||||||
|
merged_results = merge_results(input_path)
|
||||||
|
make_csv(merged_results, output_path)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# when running from the harness, the first argument is the script name
|
||||||
|
# you must name the second argument and the third argument(optional) to be the input_dir and output_dir
|
||||||
|
main(*sys.argv)
|
||||||
101
python/llm/dev/benchmark/perplexity/make_table.py
Normal file
101
python/llm/dev/benchmark/perplexity/make_table.py
Normal file
|
|
@ -0,0 +1,101 @@
|
||||||
|
#
|
||||||
|
# Copyright 2016 The BigDL Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
"""
|
||||||
|
Usage:
|
||||||
|
python make_table.py <input_dir>
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from pytablewriter import MarkdownTableWriter, LatexTableWriter
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
import csv
|
||||||
|
import datetime
|
||||||
|
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def make_table(result_dict):
|
||||||
|
"""Generate table of results."""
|
||||||
|
md_writer = MarkdownTableWriter()
|
||||||
|
latex_writer = LatexTableWriter()
|
||||||
|
md_writer.headers = ["Model", "Precision", "en", "zh"]
|
||||||
|
latex_writer.headers = ["Model", "Precision", "en", "zh"]
|
||||||
|
|
||||||
|
languages = ["en", "zh"]
|
||||||
|
values = []
|
||||||
|
for model, model_results in result_dict.items():
|
||||||
|
for precision, prec_results in model_results.items():
|
||||||
|
value = [model, precision]
|
||||||
|
for language in languages:
|
||||||
|
task_results = prec_results.get(language, None)
|
||||||
|
if task_results is None:
|
||||||
|
value.append("")
|
||||||
|
else:
|
||||||
|
result = task_results["results"]
|
||||||
|
value.append("%.4f" % result)
|
||||||
|
values.append(value)
|
||||||
|
model = ""
|
||||||
|
precision = ""
|
||||||
|
|
||||||
|
md_writer.value_matrix = values
|
||||||
|
latex_writer.value_matrix = values
|
||||||
|
|
||||||
|
# todo: make latex table look good
|
||||||
|
# print(latex_writer.dumps())
|
||||||
|
|
||||||
|
return md_writer.dumps()
|
||||||
|
|
||||||
|
|
||||||
|
def merge_results(path):
|
||||||
|
# loop dirs and subdirs in results dir
|
||||||
|
# for each dir, load json files
|
||||||
|
print('Read from', path)
|
||||||
|
merged_results = dict()
|
||||||
|
for dirpath, dirnames, filenames in os.walk(path):
|
||||||
|
# skip dirs without files
|
||||||
|
if not filenames:
|
||||||
|
continue
|
||||||
|
for filename in sorted([f for f in filenames if f.endswith("result.json")]):
|
||||||
|
path = os.path.join(dirpath, filename)
|
||||||
|
model, device, precision, language = dirpath.split('/')[-4:]
|
||||||
|
with open(path, "r") as f:
|
||||||
|
result_dict = json.load(f)
|
||||||
|
if model not in merged_results:
|
||||||
|
merged_results[model] = dict()
|
||||||
|
if precision not in merged_results[model]:
|
||||||
|
merged_results[model][precision] = dict()
|
||||||
|
merged_results[model][precision][language] = result_dict
|
||||||
|
return merged_results
|
||||||
|
|
||||||
|
|
||||||
|
def main(*args):
|
||||||
|
if len(args) > 1:
|
||||||
|
input_path = args[1]
|
||||||
|
else:
|
||||||
|
raise ValueError("Input path is required")
|
||||||
|
|
||||||
|
merged_results = merge_results(input_path)
|
||||||
|
print(make_table(merged_results))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# when running from the harness, the first argument is the script name
|
||||||
|
# you must name the second argument and the third argument(optional) to be the input_dir and output_dir
|
||||||
|
main(*sys.argv)
|
||||||
|
|
@ -24,6 +24,7 @@ from ppl import BigDLPPL
|
||||||
from bigdl.llm.ggml.quantize import ggml_tensor_qtype
|
from bigdl.llm.ggml.quantize import ggml_tensor_qtype
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import json
|
||||||
|
|
||||||
def get_arguments():
|
def get_arguments():
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
|
|
@ -34,6 +35,7 @@ def get_arguments():
|
||||||
parser.add_argument("--language", required=False, type=str, default="en", choices=['en', 'zh', 'all'])
|
parser.add_argument("--language", required=False, type=str, default="en", choices=['en', 'zh', 'all'])
|
||||||
parser.add_argument("--precisions", required=False, type=str, default=None, nargs='+')
|
parser.add_argument("--precisions", required=False, type=str, default=None, nargs='+')
|
||||||
parser.add_argument("--device", type=str, default="xpu")
|
parser.add_argument("--device", type=str, default="xpu")
|
||||||
|
parser.add_argument("--output_path", default=None)
|
||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -77,6 +79,8 @@ def main():
|
||||||
encoded_texts.append(encoded_text)
|
encoded_texts.append(encoded_text)
|
||||||
|
|
||||||
summary = {}
|
summary = {}
|
||||||
|
output_path = args.output_path if args.output_path else "results"
|
||||||
|
model_name = os.path.basename(os.path.realpath(args.model_path))
|
||||||
for precision in args.precisions:
|
for precision in args.precisions:
|
||||||
model_kwargs = {}
|
model_kwargs = {}
|
||||||
if precision in ggml_tensor_qtype.keys():
|
if precision in ggml_tensor_qtype.keys():
|
||||||
|
|
@ -85,9 +89,21 @@ def main():
|
||||||
model_kwargs['torch_dtype'] = getattr(torch, precision)
|
model_kwargs['torch_dtype'] = getattr(torch, precision)
|
||||||
print(model_kwargs)
|
print(model_kwargs)
|
||||||
|
|
||||||
|
log_dir = f"{output_path}/{model_name}/{args.device}/{precision}/{args.language}"
|
||||||
|
os.makedirs(log_dir, exist_ok=True)
|
||||||
|
results = {}
|
||||||
ppl_evaluator = BigDLPPL(model_path=args.model_path, device=args.device, **model_kwargs)
|
ppl_evaluator = BigDLPPL(model_path=args.model_path, device=args.device, **model_kwargs)
|
||||||
ppl = ppl_evaluator.perplexity_hf(encoded_texts)
|
ppl = ppl_evaluator.perplexity_hf(encoded_texts)
|
||||||
summary[precision] = ppl
|
summary[precision] = ppl
|
||||||
|
results['results'] = ppl
|
||||||
|
results['config'] = {"model": model_name, "precision": precision, "device": args.device, "seq_len": args.seq_len, "language": args.language}
|
||||||
|
dumped = json.dumps(results, indent=2)
|
||||||
|
print(dumped)
|
||||||
|
|
||||||
|
if args.output_path:
|
||||||
|
with open(f"{log_dir}/result.json", "w") as f:
|
||||||
|
f.write(dumped)
|
||||||
|
|
||||||
print(summary)
|
print(summary)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
|
||||||
8
python/llm/test/benchmark/perplexity/fp16.csv
Normal file
8
python/llm/test/benchmark/perplexity/fp16.csv
Normal file
|
|
@ -0,0 +1,8 @@
|
||||||
|
Index,Model,Precision,en,zh
|
||||||
|
0,Llama-2-7b-chat-hf,fp16,4.7019,
|
||||||
|
1,chatglm2-6b,fp16,,22.321
|
||||||
|
2,chatglm3-6b,fp16,,30.1281
|
||||||
|
3,Baichuan2-7B-Chat,fp16,,10.7676
|
||||||
|
4,mpt-7b-chat,fp16,5.7882,
|
||||||
|
5,falcon-7b-instruct-with-patch,fp16,5.2532,
|
||||||
|
6,mistral-7b-v0.1,fp16,3.6597,
|
||||||
|
206
python/llm/test/benchmark/perplexity/ppl_csv_to_html.py
Normal file
206
python/llm/test/benchmark/perplexity/ppl_csv_to_html.py
Normal file
|
|
@ -0,0 +1,206 @@
|
||||||
|
#
|
||||||
|
# Copyright 2016 The BigDL Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
# Python program to convert CSV to HTML Table
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import argparse
|
||||||
|
import pandas as pd
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
def highlight_vals(val, max=3.0, color1='red', color2='green', color3='yellow', is_last=False):
|
||||||
|
if isinstance(val, float):
|
||||||
|
if val > max:
|
||||||
|
return 'background-color: %s' % color1
|
||||||
|
elif val <= -max:
|
||||||
|
return 'background-color: %s' % color2
|
||||||
|
elif val != 0.0 and not pd.isna(val) and is_last:
|
||||||
|
return 'background-color: %s' % color3
|
||||||
|
else:
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def nonzero_min(lst):
|
||||||
|
non_zero_lst = [num for num in lst if num > 0.0]
|
||||||
|
return min(non_zero_lst) if non_zero_lst else None
|
||||||
|
|
||||||
|
def is_diffs_within_normal_range(diff_en, diff_zh, threshold=5.0):
|
||||||
|
return not any(diff < (-threshold) for diff in diff_en + diff_zh if isinstance(diff, float))
|
||||||
|
|
||||||
|
def add_to_dict(dict, key, value):
|
||||||
|
if key not in dict:
|
||||||
|
dict[key] = []
|
||||||
|
dict[key].append(value)
|
||||||
|
|
||||||
|
def best_in_dict(dict, key, value):
|
||||||
|
if key in dict:
|
||||||
|
best_value = nonzero_min(dict[key])
|
||||||
|
if best_value < value or value <= 0.0:
|
||||||
|
return best_value
|
||||||
|
return value
|
||||||
|
return value
|
||||||
|
|
||||||
|
def create_fp16_dict(fp16_path):
|
||||||
|
fp16_df = pd.read_csv(fp16_path)
|
||||||
|
fp16_dict = {}
|
||||||
|
for _, row in fp16_df.iterrows():
|
||||||
|
model = row['Model']
|
||||||
|
# Formalize the data to have 2 decimal places
|
||||||
|
fp16_dict[model] = {
|
||||||
|
'en': "{:.2f}".format(row['en']),
|
||||||
|
'zh': "{:.2f}".format(row['zh'])
|
||||||
|
}
|
||||||
|
return fp16_dict
|
||||||
|
|
||||||
|
def calculate_percentage_difference(current, fp16):
|
||||||
|
if fp16 != 'N/A' and current != 'N/A' and float(fp16) != 0:
|
||||||
|
return (float(current) - float(fp16)) / float(fp16) * 100
|
||||||
|
else:
|
||||||
|
return 'N/A'
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(description="convert .csv file to .html file")
|
||||||
|
parser.add_argument("-f", "--folder_path", type=str, dest="folder_path",
|
||||||
|
help="The directory which stores the .csv file", default="/home/arda/yibo/BigDL/python/llm/dev/benchmark/harness")
|
||||||
|
parser.add_argument("-t", "--threshold", type=float, dest="threshold",
|
||||||
|
help="the threshold of highlight values", default=3.0)
|
||||||
|
parser.add_argument("-b", "--baseline_path", type=str, dest="baseline_path",
|
||||||
|
help="the baseline path which stores the baseline.csv file")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# fp16.csv is downloaded previously under the parent folder of the folder_path
|
||||||
|
parent_dir = Path(args.folder_path).parent
|
||||||
|
fp16_path = os.path.join(parent_dir, 'fp16.csv')
|
||||||
|
fp16_dict = create_fp16_dict(fp16_path)
|
||||||
|
|
||||||
|
csv_files = []
|
||||||
|
for file_name in os.listdir(args.folder_path):
|
||||||
|
file_path = os.path.join(args.folder_path, file_name)
|
||||||
|
if os.path.isfile(file_path) and file_name.endswith(".csv"):
|
||||||
|
csv_files.append(file_path)
|
||||||
|
csv_files.sort(reverse=True)
|
||||||
|
|
||||||
|
highlight_threshold=args.threshold
|
||||||
|
|
||||||
|
latest_csv = pd.read_csv(csv_files[0], index_col=0)
|
||||||
|
daily_html=csv_files[0].split(".")[0]+".html"
|
||||||
|
|
||||||
|
# Reset index
|
||||||
|
latest_csv.reset_index(inplace=True)
|
||||||
|
|
||||||
|
diffs_within_normal_range = True
|
||||||
|
|
||||||
|
# Add display of FP16 values for each model and add percentage difference column
|
||||||
|
for task in ['en', 'zh']:
|
||||||
|
latest_csv[f'{task}_FP16'] = latest_csv['Model'].apply(lambda model: fp16_dict.get(model, {}).get(task, 'N/A'))
|
||||||
|
latest_csv[f'{task}_diff_FP16(%)'] = latest_csv.apply(lambda row: calculate_percentage_difference(row[task], row[f'{task}_FP16']), axis=1)
|
||||||
|
|
||||||
|
if len(csv_files)>1:
|
||||||
|
if args.baseline_path:
|
||||||
|
previous_csv = pd.read_csv(args.baseline_path, index_col=0)
|
||||||
|
else:
|
||||||
|
previous_csv = pd.read_csv(csv_files[1], index_col=0)
|
||||||
|
|
||||||
|
last_en=['']*len(latest_csv.index)
|
||||||
|
diff_en=['']*len(latest_csv.index)
|
||||||
|
last_zh=['']*len(latest_csv.index)
|
||||||
|
diff_zh=['']*len(latest_csv.index)
|
||||||
|
|
||||||
|
en='en'
|
||||||
|
zh='zh'
|
||||||
|
|
||||||
|
csv_dict = {}
|
||||||
|
for csv_file in csv_files:
|
||||||
|
current_csv = pd.read_csv(csv_file, index_col=0)
|
||||||
|
for current_csv_ind,current_csv_row in current_csv.iterrows():
|
||||||
|
current_csv_model=current_csv_row['Model'].strip()
|
||||||
|
current_csv_precision=current_csv_row['Precision'].strip()
|
||||||
|
current_csv_model_en=current_csv_model+'-'+current_csv_precision+'-'+'en'
|
||||||
|
current_csv_model_zh=current_csv_model+'-'+current_csv_precision+'-'+'zh'
|
||||||
|
add_to_dict(csv_dict, current_csv_model_en, current_csv_row[en])
|
||||||
|
add_to_dict(csv_dict, current_csv_model_zh, current_csv_row[zh])
|
||||||
|
|
||||||
|
for latest_csv_ind,latest_csv_row in latest_csv.iterrows():
|
||||||
|
|
||||||
|
latest_csv_model=latest_csv_row['Model'].strip()
|
||||||
|
latest_csv_precision=latest_csv_row['Precision'].strip()
|
||||||
|
latest_en=latest_csv_row[en]
|
||||||
|
latest_zh=latest_csv_row[zh]
|
||||||
|
|
||||||
|
in_previous_flag=False
|
||||||
|
|
||||||
|
for previous_csv_ind,previous_csv_row in previous_csv.iterrows():
|
||||||
|
|
||||||
|
previous_csv_model=previous_csv_row['Model'].strip()
|
||||||
|
previous_csv_precision=previous_csv_row['Precision'].strip()
|
||||||
|
|
||||||
|
if latest_csv_model==previous_csv_model and latest_csv_precision==previous_csv_precision:
|
||||||
|
|
||||||
|
previous_en=previous_csv_row[en]
|
||||||
|
previous_zh=previous_csv_row[zh]
|
||||||
|
if previous_en > 0.0 or previous_zh > 0.0:
|
||||||
|
last_en[latest_csv_ind]=previous_en
|
||||||
|
diff_en[latest_csv_ind]=round((latest_en-previous_en)*100/previous_en,2)
|
||||||
|
last_zh[latest_csv_ind]=previous_zh
|
||||||
|
diff_zh[latest_csv_ind]=round((latest_zh-previous_zh)*100/previous_zh,2)
|
||||||
|
in_previous_flag=True
|
||||||
|
|
||||||
|
if not in_previous_flag:
|
||||||
|
last_en[latest_csv_ind]=pd.NA
|
||||||
|
diff_en[latest_csv_ind]=pd.NA
|
||||||
|
last_zh[latest_csv_ind]=pd.NA
|
||||||
|
diff_zh[latest_csv_ind]=pd.NA
|
||||||
|
|
||||||
|
latest_csv.insert(loc=9,column='last_en',value=last_en)
|
||||||
|
latest_csv.insert(loc=10,column='diff_en(%)',value=diff_en)
|
||||||
|
latest_csv.insert(loc=11,column='last_zh',value=last_zh)
|
||||||
|
latest_csv.insert(loc=12,column='diff_zh(%)',value=diff_zh)
|
||||||
|
|
||||||
|
|
||||||
|
diffs_within_normal_range = is_diffs_within_normal_range(diff_en, diff_zh, threshold=highlight_threshold)
|
||||||
|
|
||||||
|
subset1=['diff_en(%)','diff_zh(%)']
|
||||||
|
|
||||||
|
columns={'en': '{:.2f}', 'zh': '{:.2f}', 'last_en': '{:.2f}', 'diff_en(%)': '{:.2f}',
|
||||||
|
'last_zh': '{:.2f}', 'diff_zh(%)': '{:.2f}'}
|
||||||
|
|
||||||
|
latest_csv.drop('Index', axis=1, inplace=True)
|
||||||
|
|
||||||
|
styled_df = latest_csv.style.format(columns).applymap(lambda val: highlight_vals(val, max=3.0, is_last=True), subset=subset1)
|
||||||
|
for task in ['en', 'zh']:
|
||||||
|
styled_df = styled_df.applymap(lambda val: highlight_vals(val, max=highlight_threshold, is_last=False), subset=[f'{task}_diff_FP16(%)'])
|
||||||
|
|
||||||
|
# add css style to restrict width and wrap text
|
||||||
|
styled_df.set_table_styles([{
|
||||||
|
'selector': 'th, td',
|
||||||
|
'props': [('max-width', '88px'), ('word-wrap', 'break-word')]
|
||||||
|
}], overwrite=False)
|
||||||
|
|
||||||
|
html_output = styled_df.set_table_attributes("border=1").to_html()
|
||||||
|
|
||||||
|
with open(daily_html, 'w') as f:
|
||||||
|
f.write(html_output)
|
||||||
|
else:
|
||||||
|
latest_csv.to_html(daily_html)
|
||||||
|
|
||||||
|
if args.baseline_path and not diffs_within_normal_range:
|
||||||
|
print("The diffs are outside the normal range: %" + str(highlight_threshold))
|
||||||
|
return 1
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
||||||
|
|
@ -0,0 +1,49 @@
|
||||||
|
#
|
||||||
|
# Copyright 2016 The BigDL Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
# Python program to update Html in parent folder
|
||||||
|
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import argparse
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
def update_html_in_parent_folder(folder_path):
|
||||||
|
# Get parent folder
|
||||||
|
parent_folder = Path(folder_path).parent
|
||||||
|
|
||||||
|
# List all html files under parent folder and delete them
|
||||||
|
for html_file in parent_folder.glob('*.html'):
|
||||||
|
html_file.unlink()
|
||||||
|
|
||||||
|
# Find latest html file under folder_path
|
||||||
|
latest_html_file = max(Path(folder_path).glob('*.html'), key=os.path.getctime, default=None)
|
||||||
|
|
||||||
|
# Copy the latest html file to parent folder
|
||||||
|
if latest_html_file is not None:
|
||||||
|
shutil.copy(latest_html_file, parent_folder)
|
||||||
|
|
||||||
|
print(latest_html_file.name)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(description="Update HTML in parent folder.")
|
||||||
|
parser.add_argument("-f", "--folder", type=str, help="Path to the folder")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
update_html_in_parent_folder(args.folder)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
Loading…
Reference in a new issue