LLM: check the final csv results for arc perf test (#9684)
* LLM: check the final csv results for arc perf test * delete useless python script * change threshold * revert the llm_performance_tests.yml
This commit is contained in:
		
							parent
							
								
									68d0c255fc
								
							
						
					
					
						commit
						1f0245039d
					
				
					 2 changed files with 46 additions and 0 deletions
				
			
		
							
								
								
									
										1
									
								
								.github/workflows/llm_performance_tests.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/workflows/llm_performance_tests.yml
									
									
									
									
										vendored
									
									
								
							| 
						 | 
					@ -98,6 +98,7 @@ jobs:
 | 
				
			||||||
          python -m pip install pandas==1.5.3
 | 
					          python -m pip install pandas==1.5.3
 | 
				
			||||||
          python csv_to_html.py -f $CSV_SAVE_PATH
 | 
					          python csv_to_html.py -f $CSV_SAVE_PATH
 | 
				
			||||||
          cd ../../dev/benchmark/all-in-one/
 | 
					          cd ../../dev/benchmark/all-in-one/
 | 
				
			||||||
 | 
					          python ../../../test/benchmark/check_results.py -n 48
 | 
				
			||||||
          if [ ${{ github.event.schedule}} ]; then
 | 
					          if [ ${{ github.event.schedule}} ]; then
 | 
				
			||||||
            curl -T ./*.csv ${LLM_FTP_URL}/llm/nightly_perf/gpu/
 | 
					            curl -T ./*.csv ${LLM_FTP_URL}/llm/nightly_perf/gpu/
 | 
				
			||||||
          fi
 | 
					          fi
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										45
									
								
								python/llm/test/benchmark/check_results.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										45
									
								
								python/llm/test/benchmark/check_results.py
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,45 @@
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Copyright 2016 The BigDL Authors.
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Licensed under the Apache License, Version 2.0 (the "License");
 | 
				
			||||||
 | 
					# you may not use this file except in compliance with the License.
 | 
				
			||||||
 | 
					# You may obtain a copy of the License at
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					#     http://www.apache.org/licenses/LICENSE-2.0
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					# Unless required by applicable law or agreed to in writing, software
 | 
				
			||||||
 | 
					# distributed under the License is distributed on an "AS IS" BASIS,
 | 
				
			||||||
 | 
					# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
				
			||||||
 | 
					# See the License for the specific language governing permissions and
 | 
				
			||||||
 | 
					# limitations under the License.
 | 
				
			||||||
 | 
					#
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Python program to check if the number of lines in html meets expectation
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import os
 | 
				
			||||||
 | 
					import sys
 | 
				
			||||||
 | 
					import argparse
 | 
				
			||||||
 | 
					import pandas as pd
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					def main():
 | 
				
			||||||
 | 
					    parser = argparse.ArgumentParser(description="check if the number of lines in html meets expectation")
 | 
				
			||||||
 | 
					    parser.add_argument("-n", "--expected_lines", type=int, dest="expected_lines",
 | 
				
			||||||
 | 
					                        help="the number of expected html lines", default=0)
 | 
				
			||||||
 | 
					    parser.add_argument("-f", "--folder_path", type=str, dest="folder_path",
 | 
				
			||||||
 | 
					                        help="The directory which stores the .csv files", default="./")
 | 
				
			||||||
 | 
					    args = parser.parse_args()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    csv_files = []
 | 
				
			||||||
 | 
					    for file_name in os.listdir(args.folder_path):
 | 
				
			||||||
 | 
					        file_path = os.path.join(args.folder_path, file_name)
 | 
				
			||||||
 | 
					        if os.path.isfile(file_path) and file_name.endswith(".csv"):
 | 
				
			||||||
 | 
					            csv_files.append(file_path)
 | 
				
			||||||
 | 
					    csv_files.sort()
 | 
				
			||||||
 | 
					    
 | 
				
			||||||
 | 
					    number_of_expected_lines=args.expected_lines
 | 
				
			||||||
 | 
					    num_rows = len(pd.read_csv(csv_files[0], index_col=0))
 | 
				
			||||||
 | 
					    if num_rows!=number_of_expected_lines:
 | 
				
			||||||
 | 
					        raise ValueError("The number of arc perf test results does not match the expected value. Please check carefully.")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					if __name__ == "__main__":
 | 
				
			||||||
 | 
					    sys.exit(main())
 | 
				
			||||||
		Loading…
	
		Reference in a new issue