aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--rpm/rootfs-scripts/parser.py8
-rw-r--r--rpm/rootfs-scripts/report.py26
-rw-r--r--rpm/rootfs-scripts/run_tests.py85
3 files changed, 46 insertions, 73 deletions
diff --git a/rpm/rootfs-scripts/parser.py b/rpm/rootfs-scripts/parser.py
deleted file mode 100644
index c6de1af..0000000
--- a/rpm/rootfs-scripts/parser.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from plugins.agl_test_log import log_process_default
-
-
-def log_process(TMP_LOGS_DIR,THIS_TEST):
- log = TMP_LOGS_DIR + THIS_TEST + "/log/" + THIS_TEST + ".log"
- test_cases_values_and_status = []
- test_cases_values_and_status = log_process_default(log)
- return test_cases_values_and_status
diff --git a/rpm/rootfs-scripts/report.py b/rpm/rootfs-scripts/report.py
deleted file mode 100644
index 8540e69..0000000
--- a/rpm/rootfs-scripts/report.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import plugins.agl_test_report as agl_test_report
-
-def log_report(test_cases_values_and_status,THIS_TEST):
- #Get case_status, it's looks like : {'test_id': 'status',...}
- case_status = {}
- case_status = agl_test_report.get_case_status(test_cases_values_and_status)
-
- #Get the summary of the test case status, the result is like that:
- #Summary = [["collected",num1],["passed",num2],["failed",num3],["skipped",num4]]
- summary = []
- summary = agl_test_report.get_summary(case_status)
-
- #Judge whether the test set passes
- test_set_status = "null"
- if (summary[1][1] == summary[0][1]):
- test_set_status = "passed"
- else:
- test_set_status = "failed"
-
- agl_test_report.write_date_to_json(THIS_TEST,test_set_status,summary,case_status)
-
- #Package log file
- agl_test_report.log_compress(THIS_TEST)
-
- html = agl_test_report.get_report_html(THIS_TEST,test_set_status,summary,case_status)
- agl_test_report.write_to_html_file(THIS_TEST,html)
diff --git a/rpm/rootfs-scripts/run_tests.py b/rpm/rootfs-scripts/run_tests.py
index 8b085c6..c751faf 100644
--- a/rpm/rootfs-scripts/run_tests.py
+++ b/rpm/rootfs-scripts/run_tests.py
@@ -2,53 +2,60 @@ import pytest
import subprocess
import plugins.agl_test_utils as agl_test_utils
-import plugins.agl_test_conf as agl_test_conf
-import rpm.parser as parser
-import rpm.report as report
-
-WORK_DIR = agl_test_conf.WORK_DIR
-TMP_LOGS_DIR = agl_test_conf.TMP_LOGS_DIR
-
-THIS_TEST = "rpm"
-test_cases_values_and_status = []
-
-def setup_module():
+from plugins.agl_test_base import AGLBaseTest
+class RPMBase(AGLBaseTest):
+ def __init__(self):
+ super().__init__(name="rpm")
+
+ def run_case(self, case_id):
+ case_info = self.get_caseinfo_by_name(case_id)
+ if (case_info[1] == "TEST-PASS"):
+ case_info[2] = "passed"
+ else:
+ case_info[2] == "failed"
+ self.update_caseinfo_by_name(case_id, case_info)
+ assert case_info[2] == "passed"
+
+ #Run test, and redirect the log into the file of rpm.log
+ def run_test_fun(self):
+ log_file = self.get_logfile()
+ cwd = self.get_workdir()
+ with open(log_file,'w') as log_f:
+ subprocess.run(['sh','rpm_test.sh'],cwd=cwd,stdout=log_f,stderr=log_f)
+ log_f.close()
+
+@pytest.fixture(scope='module')
+def testbase():
+ #init instance for test
+ instance = RPMBase()
+ #run test scripts
+ instance.run_test_fun()
+ #parser log
+ instance.log_process()
+
+ yield instance
+
+ #package log files and make report file
+ instance.log_report()
+
+def setup_module(testbase: RPMBase):
agl_test_utils.find_cmd("rpm")
- agl_test_utils.create_dir(THIS_TEST)
- run_test_fun()
- global test_cases_values_and_status
- test_cases_values_and_status = parser.log_process(TMP_LOGS_DIR,THIS_TEST)
-
-#Run test, and redirect the log into the file of THIS_TEST.log under TMP_LOGS_DIR/THIS_TEST/
-def run_test_fun():
- log_file = agl_test_conf.get_log_file(THIS_TEST)
- cwd = WORK_DIR + THIS_TEST + "/resource/"
- with open(log_file,'w') as log_f:
- subprocess.run(['sh','rpm_test.sh'],cwd=cwd,stdout=log_f,stderr=log_f)
- log_f.close()
@pytest.mark.oss_default
-def test_rpm01():
- global test_cases_values_and_status
- assert test_cases_values_and_status[1][1] == "TEST-PASS"
- test_cases_values_and_status[1][2] = "passed"
+def test_rpm01(testbase: RPMBase):
+ name = "rpm01"
+ testbase.run_case(name)
@pytest.mark.oss_default
-def test_rpm02():
- global test_cases_values_and_status
- assert test_cases_values_and_status[2][1] == "TEST-PASS"
- test_cases_values_and_status[2][2] = "passed"
+def test_rpm02(testbase: RPMBase):
+ name = "rpm02"
+ testbase.run_case(name)
@pytest.mark.oss_default
-def test_rpm03():
- global test_cases_values_and_status
- assert test_cases_values_and_status[3][1] == "TEST-PASS"
- test_cases_values_and_status[3][2] = "passed"
-
-#Pack the log file and count the test results
-def teardown_module():
- report.log_report(test_cases_values_and_status,THIS_TEST)
+def test_rpm03(testbase: RPMBase):
+ name = "rpm03"
+ testbase.run_case(name)
if __name__ == '__main__':
pytest.main("-s run_tests")