aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--glib2/rootfs-scripts/__init__.py0
-rw-r--r--glib2/rootfs-scripts/parser.py7
-rw-r--r--glib2/rootfs-scripts/report.py26
-rw-r--r--glib2/rootfs-scripts/run_tests.py56
-rw-r--r--rpm/rootfs-scripts/__init__.py0
-rwxr-xr-xrpm/rootfs-scripts/resource/rpm_test.sh4
-rw-r--r--rpm/rootfs-scripts/resource/test-manual-1.2.3.noarch.rpmbin3293 -> 0 bytes
-rw-r--r--rpm/rootfs-scripts/resource/tests/rpm_01.sh13
-rw-r--r--rpm/rootfs-scripts/resource/tests/rpm_02.sh21
-rw-r--r--rpm/rootfs-scripts/resource/tests/rpm_03.sh14
-rw-r--r--rpm/rootfs-scripts/run_tests.py61
11 files changed, 0 insertions, 202 deletions
diff --git a/glib2/rootfs-scripts/__init__.py b/glib2/rootfs-scripts/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/glib2/rootfs-scripts/__init__.py
+++ /dev/null
diff --git a/glib2/rootfs-scripts/parser.py b/glib2/rootfs-scripts/parser.py
deleted file mode 100644
index 33aded3..0000000
--- a/glib2/rootfs-scripts/parser.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from plugins.agl_test_log import log_process_gnome_desktop_testing
-
-def log_process(TMP_LOGS_DIR,THIS_TEST):
- log = TMP_LOGS_DIR + THIS_TEST + "/log/" + THIS_TEST + ".log"
- test_cases_values_and_status = []
- test_cases_values_and_status = log_process_gnome_desktop_testing(log)
- return test_cases_values_and_status
diff --git a/glib2/rootfs-scripts/report.py b/glib2/rootfs-scripts/report.py
deleted file mode 100644
index 8540e69..0000000
--- a/glib2/rootfs-scripts/report.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import plugins.agl_test_report as agl_test_report
-
-def log_report(test_cases_values_and_status,THIS_TEST):
- #Get case_status, it's looks like : {'test_id': 'status',...}
- case_status = {}
- case_status = agl_test_report.get_case_status(test_cases_values_and_status)
-
- #Get the summary of the test case status, the result is like that:
- #Summary = [["collected",num1],["passed",num2],["failed",num3],["skipped",num4]]
- summary = []
- summary = agl_test_report.get_summary(case_status)
-
- #Judge whether the test set passes
- test_set_status = "null"
- if (summary[1][1] == summary[0][1]):
- test_set_status = "passed"
- else:
- test_set_status = "failed"
-
- agl_test_report.write_date_to_json(THIS_TEST,test_set_status,summary,case_status)
-
- #Package log file
- agl_test_report.log_compress(THIS_TEST)
-
- html = agl_test_report.get_report_html(THIS_TEST,test_set_status,summary,case_status)
- agl_test_report.write_to_html_file(THIS_TEST,html)
diff --git a/glib2/rootfs-scripts/run_tests.py b/glib2/rootfs-scripts/run_tests.py
deleted file mode 100644
index 2387b95..0000000
--- a/glib2/rootfs-scripts/run_tests.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import pytest
-import subprocess
-
-import plugins.agl_test_conf as agl_test_conf
-import plugins.agl_test_utils as agl_test_utils
-
-import glib2.parser as parser
-import glib2.report as report
-
-TMP_LOGS_DIR = agl_test_conf.TMP_LOGS_DIR
-
-THIS_TEST = "glib2"
-test_cases_values_and_status = []
-
-def setup_module():
- agl_test_utils.create_dir(THIS_TEST)
- run_test_fun()
- global test_cases_values_and_status
- test_cases_values_and_status = parser.log_process(TMP_LOGS_DIR,THIS_TEST)
-
-#Run test, and redirect the log into the file of THIS_TEST.log under TMP_LOGS_DIR/THIS_TEST/
-def run_test_fun():
- log_file = agl_test_conf.get_log_file(THIS_TEST)
- with open(log_file,'w') as log_f:
- subprocess.run(['ptest-runner','glib-2.0'],stdout=log_f,stderr=log_f)
- log_f.close()
-
-def check_status(test_name):
- global test_cases_values_and_status
- for item in test_cases_values_and_status:
- if(item[0]==test_name):
- if(item[1] == "PASS"):
- item[2] = "passed"
- return 1
- if(item[1] == "FAIL"):
- item[2] = "failed"
- return 0
-
-def test_glib2_gdbus_names():
- assert check_status("glib/gdbus-names.test")
-
-def test_glib2_rand():
- assert check_status("glib/rand.test")
-
-def test_glib2_base64():
- assert check_status("glib/base64.test")
-
-#TODO
-#Complete all test cases
-
-#Pack the log file and count the test results
-def teardown_module():
- report.log_report(test_cases_values_and_status,THIS_TEST)
-
-if __name__ == '__main__':
- pytest.main("run_tests")
diff --git a/rpm/rootfs-scripts/__init__.py b/rpm/rootfs-scripts/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/rpm/rootfs-scripts/__init__.py
+++ /dev/null
diff --git a/rpm/rootfs-scripts/resource/rpm_test.sh b/rpm/rootfs-scripts/resource/rpm_test.sh
deleted file mode 100755
index dd5ce37..0000000
--- a/rpm/rootfs-scripts/resource/rpm_test.sh
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/sh
-for i in tests/*.sh; do
- sh $i
-done
diff --git a/rpm/rootfs-scripts/resource/test-manual-1.2.3.noarch.rpm b/rpm/rootfs-scripts/resource/test-manual-1.2.3.noarch.rpm
deleted file mode 100644
index d2e681c..0000000
--- a/rpm/rootfs-scripts/resource/test-manual-1.2.3.noarch.rpm
+++ /dev/null
Binary files differ
diff --git a/rpm/rootfs-scripts/resource/tests/rpm_01.sh b/rpm/rootfs-scripts/resource/tests/rpm_01.sh
deleted file mode 100644
index 6ab9e3d..0000000
--- a/rpm/rootfs-scripts/resource/tests/rpm_01.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-
-# The testscript checks the following options of the command file
-# 1) Option help
-
-test="rpm01"
-
-if rpm --help | grep .*Usage.*
-then
- echo " -> $test: TEST-PASS"
-else
- echo " -> $test: TEST-FAIL"
-fi;
diff --git a/rpm/rootfs-scripts/resource/tests/rpm_02.sh b/rpm/rootfs-scripts/resource/tests/rpm_02.sh
deleted file mode 100644
index 9b0f411..0000000
--- a/rpm/rootfs-scripts/resource/tests/rpm_02.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/sh
-
-# The testscript checks the following options of the command file
-# 1) Option ql
-
-test="rpm02"
-test_manual="test-manual-1.2.3.noarch"
-
-if rpm -qa | grep $test_manual
-then
- rpm -e $test_manual
-fi
-
-rpm -ivh ${test_manual}.rpm --nodeps
-
-if rpm -ql $test_manual | grep '.*/home/test/rpm-test/text1.txt.*'
-then
- echo " -> $test: TEST-PASS"
-else
- echo " -> $test: TEST-FAIL"
-fi;
diff --git a/rpm/rootfs-scripts/resource/tests/rpm_03.sh b/rpm/rootfs-scripts/resource/tests/rpm_03.sh
deleted file mode 100644
index 51d5b95..0000000
--- a/rpm/rootfs-scripts/resource/tests/rpm_03.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh
-
-# The testscript checks the following options of the command file
-# 1) Option qi
-
-test="rpm03"
-test_manual="test-manual-1.2.3.noarch"
-
-if rpm -qi $test_manual | grep ".*1.2.3.*"
-then
- echo " -> $test: TEST-PASS"
-else
- echo " -> $test: TEST-FAIL"
-fi;
diff --git a/rpm/rootfs-scripts/run_tests.py b/rpm/rootfs-scripts/run_tests.py
deleted file mode 100644
index c751faf..0000000
--- a/rpm/rootfs-scripts/run_tests.py
+++ /dev/null
@@ -1,61 +0,0 @@
-import pytest
-import subprocess
-
-import plugins.agl_test_utils as agl_test_utils
-
-from plugins.agl_test_base import AGLBaseTest
-class RPMBase(AGLBaseTest):
- def __init__(self):
- super().__init__(name="rpm")
-
- def run_case(self, case_id):
- case_info = self.get_caseinfo_by_name(case_id)
- if (case_info[1] == "TEST-PASS"):
- case_info[2] = "passed"
- else:
- case_info[2] == "failed"
- self.update_caseinfo_by_name(case_id, case_info)
- assert case_info[2] == "passed"
-
- #Run test, and redirect the log into the file of rpm.log
- def run_test_fun(self):
- log_file = self.get_logfile()
- cwd = self.get_workdir()
- with open(log_file,'w') as log_f:
- subprocess.run(['sh','rpm_test.sh'],cwd=cwd,stdout=log_f,stderr=log_f)
- log_f.close()
-
-@pytest.fixture(scope='module')
-def testbase():
- #init instance for test
- instance = RPMBase()
- #run test scripts
- instance.run_test_fun()
- #parser log
- instance.log_process()
-
- yield instance
-
- #package log files and make report file
- instance.log_report()
-
-def setup_module(testbase: RPMBase):
- agl_test_utils.find_cmd("rpm")
-
-@pytest.mark.oss_default
-def test_rpm01(testbase: RPMBase):
- name = "rpm01"
- testbase.run_case(name)
-
-@pytest.mark.oss_default
-def test_rpm02(testbase: RPMBase):
- name = "rpm02"
- testbase.run_case(name)
-
-@pytest.mark.oss_default
-def test_rpm03(testbase: RPMBase):
- name = "rpm03"
- testbase.run_case(name)
-
-if __name__ == '__main__':
- pytest.main("-s run_tests")