LLM: add the comparison between latest arc perf test and last one (#9296)
* add the comparison between latest test and last one to html * resolve some comments * modify some code logics
This commit is contained in:
parent
96f8158fe2
commit
03aa368776
2 changed files with 24 additions and 4 deletions
4
.github/workflows/llm_performance_tests.yml
vendored
4
.github/workflows/llm_performance_tests.yml
vendored
|
|
@ -131,6 +131,6 @@ jobs:
|
||||||
export https_proxy=${HTTPS_PROXY}
|
export https_proxy=${HTTPS_PROXY}
|
||||||
python run.py
|
python run.py
|
||||||
curl -T ./*.csv ${LLM_FTP_URL}/llm/ggml-actions/perf/
|
curl -T ./*.csv ${LLM_FTP_URL}/llm/ggml-actions/perf/
|
||||||
|
cp ./*.csv /mnt/disk1/nightly_perf/
|
||||||
cd ../../../test/benchmark
|
cd ../../../test/benchmark
|
||||||
python csv_to_html.py -f ../../dev/benchmark/all-in-one
|
python csv_to_html.py -f /mnt/disk1/nightly_perf/
|
||||||
cp ./*.html /mnt/disk1/nightly_perf/
|
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ import pandas as pd
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(description="convert .csv file to .html file")
|
parser = argparse.ArgumentParser(description="convert .csv file to .html file")
|
||||||
parser.add_argument("-f", "--folder_path", type=str, dest="folder_path",
|
parser.add_argument("-f", "--folder_path", type=str, dest="folder_path",
|
||||||
help="The directory which stores the .csv file", default="../../dev/benchmark/all-in-one")
|
help="The directory which stores the .csv file", default="/mnt/disk1/nightly_perf/")
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
csv_files = []
|
csv_files = []
|
||||||
|
|
@ -32,8 +32,28 @@ def main():
|
||||||
file_path = os.path.join(args.folder_path, file_name)
|
file_path = os.path.join(args.folder_path, file_name)
|
||||||
if os.path.isfile(file_path) and file_name.endswith(".csv"):
|
if os.path.isfile(file_path) and file_name.endswith(".csv"):
|
||||||
csv_files.append(file_path)
|
csv_files.append(file_path)
|
||||||
|
csv_files.sort(reverse=True)
|
||||||
|
|
||||||
a = pd.read_csv(csv_files[0], index_col=0).to_html(csv_files[0].split("/")[-1].split(".")[0]+".html")
|
data1 = pd.read_csv(csv_files[0], index_col=0)
|
||||||
|
|
||||||
|
if len(csv_files)>1:
|
||||||
|
data2 = pd.read_csv(csv_files[1], index_col=0)
|
||||||
|
|
||||||
|
origin_column_1='1st token avg latency (ms)'
|
||||||
|
origin_column_2='2+ avg latency (ms/token)'
|
||||||
|
|
||||||
|
added_column_1='last1'
|
||||||
|
added_column_2='diff1(%)'
|
||||||
|
added_column_3='last2'
|
||||||
|
added_column_4='diff2(%)'
|
||||||
|
|
||||||
|
data1.insert(loc=3,column=added_column_1,value=data2[origin_column_1])
|
||||||
|
data1.insert(loc=4,column=added_column_2,value=round((data2[origin_column_1]-data1[origin_column_1])*100/data2[origin_column_1],2))
|
||||||
|
data1.insert(loc=5,column=added_column_3,value=data2[origin_column_2])
|
||||||
|
data1.insert(loc=6,column=added_column_4,value=round((data2[origin_column_2]-data1[origin_column_2])*100/data2[origin_column_2],2))
|
||||||
|
|
||||||
|
daily_html=csv_files[0].split(".")[0]+".html"
|
||||||
|
data1.to_html(daily_html)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
Loading…
Reference in a new issue