diff options
author | duerpei <duep.fnst@fujitsu.com> | 2023-03-08 13:48:03 +0800 |
---|---|---|
committer | duerpei <duep.fnst@fujitsu.com> | 2023-03-08 13:48:03 +0800 |
commit | 0124d938848301f8768d715b20dc1c4af486dd33 (patch) | |
tree | 70e8e75bf09e15fcdb05084f5c249fd0768fdcf5 | |
parent | 9648099248359cb00aa0c31d5436b537b0571853 (diff) |
agl-test-framework: solve bugs in the test framework
Fixed problems:
1.When all the executed test suites are skipped, no new summary-
report is generated.
2.When there is test suite successfully executed, the generated
summary-report will contain the test suites which have logs
in the "/tmp-log" directory, rather than just the test suites
which has just executed.
Correction method:
1.Move the function in "conftest.py" to "agl_test_report.py"
and call it in the "agl-test" script to ensure that the
function will be executed.
2.Set the timestamp to record the information of each executed
test suite or skipped test suite to the file of
"test_list_timestamp". And generate a summar-report according
to the above file.
Bug-AGL: SPEC-4345
Signed-off-by: duerpei <duep.fnst@fujitsu.com>
Change-Id: I47bfd09706e37ce6bdc13f3f9f266bc62f74f777
-rwxr-xr-x | agl-test | 23 | ||||
-rw-r--r-- | conftest.py | 172 | ||||
-rw-r--r-- | plugins/agl_test_base.py | 22 | ||||
-rw-r--r-- | plugins/agl_test_ptest_base.py | 6 | ||||
-rw-r--r-- | plugins/agl_test_report.py | 182 | ||||
-rw-r--r-- | tests/LTP/agl_test_ltp_base.py | 6 | ||||
-rw-r--r-- | tests/LTP/posix_conformance_tests/run_tests.py | 8 | ||||
-rw-r--r-- | tests/aio_stress/run_tests.py | 9 | ||||
-rw-r--r-- | tests/crashme/run_tests.py | 9 | ||||
-rw-r--r-- | tests/linus_stress/run_tests.py | 9 | ||||
-rw-r--r-- | tests/stress_ng/run_tests.py | 8 |
11 files changed, 275 insertions, 179 deletions
@@ -1,5 +1,22 @@ #!/bin/sh +# set time stamp +current_time=`date +%Y_%m_%d_%H_%M_%S` +export TIME_STAMP=${current_time} + +# init file of test list +test_list_file=test_list_${current_time}.json +mkdir -p /var/run/agl-test/logs/tmp-log/ +echo "{ }" > /var/run/agl-test/logs/tmp-log/${test_list_file} + +# init the dir of log-to-report +if [[ -e "/var/run/agl-test/logs/log-to-report/summary-report.html" ]] +then + rm /var/run/agl-test/logs/log-to-report/* +else + mkdir -p /var/run/agl-test/logs/log-to-report/ +fi + cd /usr/AGL/agl-test/ # default stress level is low @@ -13,7 +30,9 @@ echo "STRESS_LEVEL: $STRESS_LEVEL" moption="-m " if [[ $* =~ ${moption} ]] then - exec pytest "$@" + pytest "$@" else - exec pytest "$@" -m "not dangerous" + pytest "$@" -m "not dangerous" fi + +python3 -c "import plugins.agl_test_report as report; report.generate_total_summary_files('${current_time}')" diff --git a/conftest.py b/conftest.py deleted file mode 100644 index fb89772..0000000 --- a/conftest.py +++ /dev/null @@ -1,172 +0,0 @@ -# -*- coding:utf-8 -*- -import pytest -import json -import shutil -import subprocess -from jinja2 import Environment,FileSystemLoader - -import plugins.agl_test_conf as conf - - -@pytest.fixture(scope='session' ,autouse=True) -def setup_compress_function(): - #Before the test start, clean the env - report_json = conf.TMP_LOGS_DIR + "report.json" - output = subprocess.run(['ls',report_json],stdout=subprocess.PIPE,stderr=subprocess.PIPE) - if(output.returncode == 0): - subprocess.run(['rm',report_json]) - - #Makdir of TMP_TEST_REPORT and REPORT_LOGS_DIR - subprocess.run(['mkdir','-p',conf.TMP_TEST_REPORT]) - subprocess.run(['mkdir','-p',conf.REPORT_LOGS_DIR]) - - yield - #Collect report.json from all test sets to generate a report.json for all the test sets - report_files = conf.TMP_LOGS_DIR + "report_files" - with open(report_files,'w') as report_f: - subprocess.run(['find','-name','report.json'],cwd=conf.TMP_LOGS_DIR,stdout=report_f) - report_f.close() - - #Get the summary data and write to report.json file - summary_data = get_summary_data(report_files) - summary_json = conf.TMP_LOGS_DIR + "/report.json" - with open(summary_json, 'w') as summary_file: - json.dump(summary_data,summary_file,indent=4,sort_keys=False) - summary_file.close() - - #Get zip file name - issue = subprocess.getoutput('cat /etc/issue') - version = issue[23:-7] - machine_name = subprocess.getoutput('uname -n') - date = subprocess.getoutput('date +%Y%m%d') - zip_name = "agl-test-log-" + version + '-' + machine_name + '-' + date - - #Get summary_html_data - summary_html_data = format_html_data(summary_data) - summary_html_data["log_zip_name"] = zip_name - - #Get timestamp - date_F = subprocess.getoutput("date +%F") - summary_html_data["date_F"] = date_F - date_T = subprocess.getoutput("date +%T") - summary_html_data["date_T"] = date_T - - #Creat summary report in html - env = Environment(loader=FileSystemLoader(conf.get_tpl_dir())) - template = env.get_template("all_test_suites_tpl.html") - html_path = conf.TMP_LOGS_DIR + "test-report/summary-report.html" - with open(html_path, "w") as html_file: - html_content = template.render(data=summary_html_data) - html_file.write(html_content) - html_file.close() - - #Copy summary report file - source_file = conf.TMP_LOGS_DIR + "test-report/summary-report.html" - target_file = conf.REPORT_LOGS_DIR + "summary-report.html" - shutil.copyfile(source_file,target_file) - - #Package the test report - base_name = conf.REPORT_LOGS_DIR + zip_name - root_dir = conf.TMP_LOGS_DIR + "test-report" - shutil.make_archive(base_name,"zip",root_dir) - - -#Summarize all reports.json file -def get_summary_data(report_files): - summary_data = {} - summary_total = summary_passed = summary_failed = summary_skipped = 0 - files = open(report_files) - while True: - report = files.readline() - if not report: - break - report = report[1:-1] - report_json = conf.TMP_LOGS_DIR + report - with open(report_json,'r') as f: - data = json.load(f) - - total = passed = xpassed = failed = xfailed = skipped = 0 - total = data["collected"] - passed = data["passed"] - xpassed = data["xpassed"] - failed = data["failed"] - xfailed = data["xfailed"] - skipped = data["skipped"] - test_status = data["test_status"] - test_name = data["test_name"] - - this_summary = { - 'total': total, - 'passed': passed, - 'xpassed': xpassed, - 'failed': failed, - 'xfailed': xfailed, - 'skipped': skipped, - 'test_status': test_status, - } - summary_data[test_name] = this_summary - - summary_total = summary_total + 1 - if(test_status=="passed"): - summary_passed = summary_passed + 1 - elif(test_status=="failed"): - summary_failed = summary_failed + 1 - else: - summary_skipped = summary_skipped + 1 - f.close() - summary_data["summary"] = { - "summary_total": summary_total, - "summary_passed": summary_passed, - "summary_failed": summary_failed, - "summary_skipped": summary_skipped, - } - - status = "" - if (summary_data["summary"]["summary_total"] == summary_data["summary"]["summary_skipped"]): - status = "skipped" - elif (summary_data["summary"]["summary_failed"] == 0): - status = "passed" - else: - status = "failed" - summary_data["summary"]["status"] = status - - return summary_data - -def format_html_data(summary_data): - html_data = "" - #init all rows - for key in summary_data: - if(key != "summary"): - html_data += "\t\t\t<tbody class=\"" - html_data += summary_data[key]["test_status"] + "\">\n" - html_data += "\t\t\t\t<tr>\n" - html_data += "\t\t\t\t<td class=\"col-result\">" - html_data += str.capitalize(summary_data[key]["test_status"]) - html_data += "</td>\n" - html_data += "\t\t\t\t<td>" + key + "</td>\n" - html_data += "\t\t\t\t<td>" + str(summary_data[key]["total"]) - html_data += "</td>\n" - html_data += "\t\t\t\t<td>" + str(summary_data[key]["passed"]) - html_data += "</td>\n" - html_data += "\t\t\t\t<td>" + str(summary_data[key]["skipped"]) - html_data += "</td>\n" - html_data += "\t\t\t\t<td>" + str(summary_data[key]["failed"]) - html_data += "</td>\n" - html_data += "\t\t\t\t<td>" + str(summary_data[key]["xfailed"]) - html_data += "</td>\n" - html_data += "\t\t\t\t<td>" + str(summary_data[key]["xpassed"]) - html_data += "</td>\n" - html_data += "\t\t\t\t</tr>\n" - html_data += "\t\t\t</tbody>\n" - - summary_data["test_suite_table_html"] = html_data - - #Create summry status in html - summry_status_html = "" - summry_status_html += "\t\t<p>test suite status : <span class=\"" - summry_status_html += summary_data["summary"]["status"] + "\">" - summry_status_html += str.capitalize(summary_data["summary"]["status"]) - summry_status_html += "</span></p>" - summary_data["summry_status_html"] = summry_status_html - - return summary_data diff --git a/plugins/agl_test_base.py b/plugins/agl_test_base.py index 3a0bab4..b30f841 100644 --- a/plugins/agl_test_base.py +++ b/plugins/agl_test_base.py @@ -1,4 +1,6 @@ import pytest +import os +import json import plugins.agl_test_utils as utils import plugins.agl_test_conf as conf @@ -111,3 +113,23 @@ class AGLBaseTest: def precheck(self): return True; + + def write_skip_info(self): + test_info = {"status":"skip","path":""} + self.write_info_to_file(test_info) + + def write_run_info(self): + path_str = self.name + "/report.json" + test_info = {"status":"run","path":path_str} + self.write_info_to_file(test_info) + + def write_info_to_file(self, test_info): + time_stamp = os.getenv("TIME_STAMP") + test_list = "/var/run/agl-test/logs/tmp-log/test_list_" + time_stamp + ".json" + with open(test_list, 'r') as f: + test_suites = json.load(f) + f.close() + with open(test_list, 'w') as f: + test_suites[self.name] = test_info + json.dump(test_suites,f) + f.close() diff --git a/plugins/agl_test_ptest_base.py b/plugins/agl_test_ptest_base.py index 4e261bc..16d4938 100644 --- a/plugins/agl_test_ptest_base.py +++ b/plugins/agl_test_ptest_base.py @@ -44,9 +44,15 @@ class PTESTBase(AGLBaseTest): test_script = pathlib.Path("/usr/lib/" + super().get_name() + "/ptest/run-ptest") check_test_script = test_script.is_file() + if((check_common and check_ptest_cmd and check_test_script) == False): + #write test suite info to file + self.write_skip_info() + return check_common and check_ptest_cmd and check_test_script def run_ptest(self): if(self.precheck() == True): self.run_test_fun() + #write test suite info to file + self.write_run_info() self.log_process() diff --git a/plugins/agl_test_report.py b/plugins/agl_test_report.py index ad7b9e6..4376d35 100644 --- a/plugins/agl_test_report.py +++ b/plugins/agl_test_report.py @@ -1,6 +1,8 @@ import json import shutil +import zipfile import subprocess +from jinja2 import Environment,FileSystemLoader import plugins.agl_test_conf as conf @@ -178,3 +180,183 @@ def format_html_data(summary_data): summary_data["test_suite_status_html"] = status_html return summary_data + +def generate_total_summary_files(time_stamp): + #Get the summary data and write to report.json file + test_list = conf.TMP_LOGS_DIR + "test_list_" + str(time_stamp) + ".json" + summary_data = get_summary_data(test_list) + if(summary_data['summary']['summary_total'] == 0): + return 0 + summary_json = conf.TMP_LOGS_DIR + "/report.json" + with open(summary_json, 'w') as summary_file: + json.dump(summary_data,summary_file,indent=4,sort_keys=False) + summary_file.close() + + #Get zip file name + issue = subprocess.getoutput('cat /etc/issue') + version = issue[23:-7] + machine_name = subprocess.getoutput('uname -n') + zip_name = "agl-test-log-" + version + '-' + machine_name + '-' + time_stamp + + #Get summary_html_data + summary_html_data = format_total_summary_html_data(summary_data) + summary_html_data["log_zip_name"] = zip_name + + #Get current timestamp for total summary html report + date_F = subprocess.getoutput("date +%F") + summary_html_data["date_F"] = date_F + date_T = subprocess.getoutput("date +%T") + summary_html_data["date_T"] = date_T + + #Creat total summary report in html + env = Environment(loader=FileSystemLoader(conf.get_tpl_dir())) + template = env.get_template("all_test_suites_tpl.html") + html_path = conf.TMP_LOGS_DIR + "test-report/summary-report.html" + with open(html_path, "w") as html_file: + html_content = template.render(data=summary_html_data) + html_file.write(html_content) + html_file.close() + + #Copy total summary report file + source_file = conf.TMP_LOGS_DIR + "test-report/summary-report.html" + target_file = conf.REPORT_LOGS_DIR + "summary-report.html" + shutil.copyfile(source_file,target_file) + + #Package the test report + base_name = conf.REPORT_LOGS_DIR + zip_name + root_dir = conf.TMP_LOGS_DIR + "test-report" + make_zip_file(test_list, base_name, root_dir) + +def make_zip_file(test_list, base_name, root_dir): + with open(test_list, 'r') as f: + test_suites = json.load(f) + f.close() + zip_name = base_name + ".zip" + zipf = zipfile.ZipFile(zip_name, 'a') + summ_file = root_dir + "/" + "summary-report.html" + zipf.write(summ_file, "summary-report.html") + + for key in test_suites.keys(): + sub_dir = root_dir + "/" + key + "/" + zipf.write(sub_dir, key) + if(test_suites[key]["status"] == "run"): + zipf.write(sub_dir+"/log.zip", key+"/log.zip") + zipf.write(sub_dir+"/report.html", key+"/report.html") + zipf.close() + +#Summarize all reports.json file +def get_summary_data(test_list): + summary_data = {} + summary_total = summary_passed = summary_failed = summary_skipped = 0 + with open(test_list,'r') as f: + test_suites = json.load(f) + f.close() + + for key in test_suites.keys(): + if(test_suites[key]["status"] == "skip"): + this_summary = { + 'total': "null", + 'passed': "null", + 'xpassed': "null", + 'failed': "null", + 'xfailed': "null", + 'skipped': "null", + 'test_status': "skipped", + } + summary_data[key] = this_summary + + summary_total = summary_total + 1 + summary_skipped = summary_skipped + 1 + + if(test_suites[key]["status"] == "run"): + report = test_suites[key]["path"] + report_json = conf.TMP_LOGS_DIR + report + with open(report_json,'r') as f: + data = json.load(f) + f.close() + + total = passed = xpassed = failed = xfailed = skipped = 0 + total = data["collected"] + passed = data["passed"] + xpassed = data["xpassed"] + failed = data["failed"] + xfailed = data["xfailed"] + skipped = data["skipped"] + test_status = data["test_status"] + test_name = data["test_name"] + + this_summary = { + 'total': total, + 'passed': passed, + 'xpassed': xpassed, + 'failed': failed, + 'xfailed': xfailed, + 'skipped': skipped, + 'test_status': test_status, + } + summary_data[key] = this_summary + + summary_total = summary_total + 1 + if(test_status=="passed"): + summary_passed = summary_passed + 1 + elif(test_status=="failed"): + summary_failed = summary_failed + 1 + else: + summary_skipped = summary_skipped + 1 + + summary_data["summary"] = { + "summary_total": summary_total, + "summary_passed": summary_passed, + "summary_failed": summary_failed, + "summary_skipped": summary_skipped, + } + + status = "" + if (summary_data["summary"]["summary_total"] == summary_data["summary"]["summary_skipped"]): + status = "skipped" + elif (summary_data["summary"]["summary_failed"] == 0): + status = "passed" + else: + status = "failed" + summary_data["summary"]["status"] = status + + return summary_data + +def format_total_summary_html_data(summary_data): + html_data = "" + #init all rows + for key in summary_data: + if(key != "summary"): + html_data += "\t\t\t<tbody class=\"" + html_data += summary_data[key]["test_status"] + "\">\n" + html_data += "\t\t\t\t<tr>\n" + html_data += "\t\t\t\t<td class=\"col-result\">" + html_data += str.capitalize(summary_data[key]["test_status"]) + html_data += "</td>\n" + html_data += "\t\t\t\t<td>" + key + "</td>\n" + html_data += "\t\t\t\t<td>" + str(summary_data[key]["total"]) + html_data += "</td>\n" + html_data += "\t\t\t\t<td>" + str(summary_data[key]["passed"]) + html_data += "</td>\n" + html_data += "\t\t\t\t<td>" + str(summary_data[key]["skipped"]) + html_data += "</td>\n" + html_data += "\t\t\t\t<td>" + str(summary_data[key]["failed"]) + html_data += "</td>\n" + html_data += "\t\t\t\t<td>" + str(summary_data[key]["xfailed"]) + html_data += "</td>\n" + html_data += "\t\t\t\t<td>" + str(summary_data[key]["xpassed"]) + html_data += "</td>\n" + html_data += "\t\t\t\t</tr>\n" + html_data += "\t\t\t</tbody>\n" + + summary_data["test_suite_table_html"] = html_data + + #Create summry status in html + summry_status_html = "" + summry_status_html += "\t\t<p>test suite status : <span class=\"" + summry_status_html += summary_data["summary"]["status"] + "\">" + summry_status_html += str.capitalize(summary_data["summary"]["status"]) + summry_status_html += "</span></p>" + summary_data["summry_status_html"] = summry_status_html + + return summary_data diff --git a/tests/LTP/agl_test_ltp_base.py b/tests/LTP/agl_test_ltp_base.py index 44d5325..982e60d 100644 --- a/tests/LTP/agl_test_ltp_base.py +++ b/tests/LTP/agl_test_ltp_base.py @@ -109,6 +109,10 @@ class LTPBase(AGLBaseTest): test_file = pathlib.Path(self.LTPTEST + self.test_name) check_test_file = test_file.is_file() + if((check_common and check_runltp_script and check_test_file) == False): + #write test suite info to file + self.write_skip_info() + return check_common and check_runltp_script and check_test_file def log_process(self): @@ -146,4 +150,6 @@ class LTPBase(AGLBaseTest): def run_ltp_test(self, case_name): if(self.precheck() == True): self.run_test_fun(case_name) + #write test suite info to file + self.write_run_info() self.log_process() diff --git a/tests/LTP/posix_conformance_tests/run_tests.py b/tests/LTP/posix_conformance_tests/run_tests.py index 5806fca..9d8405d 100644 --- a/tests/LTP/posix_conformance_tests/run_tests.py +++ b/tests/LTP/posix_conformance_tests/run_tests.py @@ -88,6 +88,8 @@ def testbase(): instance = Conformance_Base() # run all cases instance.run_all_tests() + #write test suite info to file + instance.write_run_info() # do log process instance.log_process() yield instance @@ -96,7 +98,11 @@ def testbase(): def local_precheck(): checker = Conformance_Base() - return checker.precheck() + output = checker.precheck() + if(output == False): + #write test suite info to file + instance.write_skip_info() + return output skip_msg = "The current environment does not match the test requirements." pytestmark = pytest.mark.skipif(local_precheck() == False, reason = skip_msg) diff --git a/tests/aio_stress/run_tests.py b/tests/aio_stress/run_tests.py index e7e8b5c..85a3515 100644 --- a/tests/aio_stress/run_tests.py +++ b/tests/aio_stress/run_tests.py @@ -39,6 +39,8 @@ def testbase(): instance = AIOBase() #run test scripts instance.run_test_fun() + #write test suite info to file + instance.write_run_info() yield instance @@ -50,7 +52,12 @@ def setup_module(testbase: AIOBase): def precheck(): instance = AIOBase() - return instance.precheck() + output = instance.precheck() + if(output == False): + #write test suite info to file + instance.write_skip_info() + return output + skip_msg = "The current environment does not match the test requirements." pytestmark = pytest.mark.skipif(precheck() == False, reason = skip_msg) diff --git a/tests/crashme/run_tests.py b/tests/crashme/run_tests.py index ccb4775..bedd0e4 100644 --- a/tests/crashme/run_tests.py +++ b/tests/crashme/run_tests.py @@ -105,6 +105,9 @@ def testbase(): #run test scripts instance.run_test_fun() + #write test suite info to file + instance.write_run_info() + yield instance #package log files and make report file @@ -112,7 +115,11 @@ def testbase(): def precheck(): instance = CrashmeBase() - return instance.precheck() + output = instance.precheck() + if(output == False): + #write test suite info to file + instance.write_skip_info() + return output skip_msg = "The current environment does not match the test requirements." pytestmark = pytest.mark.skipif(precheck() == False, reason = skip_msg) diff --git a/tests/linus_stress/run_tests.py b/tests/linus_stress/run_tests.py index df3157b..22e67e8 100644 --- a/tests/linus_stress/run_tests.py +++ b/tests/linus_stress/run_tests.py @@ -43,12 +43,19 @@ def testbase(): def precheck(): instance = LinusStressBase() - return instance.precheck() + output = instance.precheck() + if(output == False): + #write test suite info to file + instance.write_skip_info() + return output + skip_msg = "The current environment does not match the test requirements." pytestmark = pytest.mark.skipif(precheck() == False, reason = skip_msg) def test_linus_stress(testbase: LinusStressBase): testbase.run_test_fun() + #write test suite info to file + testbase.write_run_info() assert testbase.case_info_list['test_linus_stress'][2] == 'passed' if __name__ == '__main__': diff --git a/tests/stress_ng/run_tests.py b/tests/stress_ng/run_tests.py index ec6822b..22ca5c5 100644 --- a/tests/stress_ng/run_tests.py +++ b/tests/stress_ng/run_tests.py @@ -35,6 +35,8 @@ def testbase(): instance = StressngBase() #run test scripts instance.run_test_fun() + #write test suite info to file + instance.write_run_info() yield instance @@ -43,7 +45,11 @@ def testbase(): def precheck(): instance = StressngBase() - return instance.precheck() + output = instance.precheck() + if(output == False): + #write test suite info to file + instance.write_skip_info() + return output skip_msg = "The current environment does not match the test requirements." pytestmark = pytest.mark.skipif(precheck() == False, reason = skip_msg) |