diff options
Diffstat (limited to 'plugins/agl_test_base.py')
-rw-r--r-- | plugins/agl_test_base.py | 106 |
1 files changed, 106 insertions, 0 deletions
diff --git a/plugins/agl_test_base.py b/plugins/agl_test_base.py new file mode 100644 index 0000000..6888d34 --- /dev/null +++ b/plugins/agl_test_base.py @@ -0,0 +1,106 @@ +import pytest + +import plugins.agl_test_utils as utils +import plugins.agl_test_conf as conf +import plugins.agl_test_log as log +import plugins.agl_test_report as report + +class AGLBaseTest: + + name: str + case_info_list = dict() + + def __init__(self, name: str): + self.name=name + utils.create_dir(self.name) + + def get_name(self): + return self.name + + def get_temp_logfile(self): + return conf.get_log_file(self.name) + + def get_logfile(self): + return conf.get_log_file(self.name) + + def get_workdir(self): + return conf.WORK_DIR + self.name + "/resource/" + + def append_one_caseinfo(name, value, status): + self.case_info_list[name] = [name, value, status] + + def get_all_caseinfo(self): + return self.case_info_list + + def get_caseinfo_by_name(self, name): + return self.case_info_list[name] + + def update_caseinfo_by_name(self, name, case_info): + self.case_info_list[name] = case_info + + def log_process(self): + logfile = self.get_logfile() + self.case_info_list = log.log_process(logfile) + self.init_case_status() + + def init_case_status(self): + for key in self.case_info_list: + case_info = self.case_info_list[key] + case_info[2] = "skipped" + #if (case_info[1] == "TEST-PASS"): + # case_info[2] = "passed" + #if (case_info[1] == "TEST-FAIL"): + # case_info[2] = "failed" + #if (case_info[1] == "TEST-SKIP"): + # case_info[2] = "skipped" + self.update_caseinfo_by_name(key, case_info) + + def log_report_json(self): + #Get case status list + #case_status format + # { + # 'test_id': 'status', + # 'test_id': 'status' + # } + case_status = report.format_caselist(self.case_info_list) + + #Get the summary status of the test set. + #summary format + #{ + # 'collected': collected_num, + # 'passed': passed_num, + # 'failed': failed_num, + # 'skipped": skipped_num + # } + summary = report.format_summary(self.case_info_list) + + #Get test set status + test_set_status = self.get_test_set_status(summary) + #Format data for json log + data = report.format_json_data(self.name, test_set_status, summary, case_status) + report.write_data_to_file(data) + + #Get test set status + #default output: + # passed: there is no failed case + # failed: there is one or more failed case + # skipped: all case is skipped or no case is run + def get_test_set_status(self, summary): + #Judge whether the test set passes + test_set_status = None + if (summary["collected"] == summary["skipped"]): + test_set_status = "skipped" + elif (summary["failed"] == 0): + test_set_status = "passed" + else: + test_set_status = "failed" + return test_set_status + + def log_report(self): + self.log_report_json() + #Package log file + report.log_compress(self.name) + + #Write json data to html + report.change_json_to_html(self.name) + |