summaryrefslogtreecommitdiffstats
path: root/rpm
diff options
context:
space:
mode:
Diffstat (limited to 'rpm')
-rw-r--r--rpm/rootfs-scripts/__init__.py0
-rw-r--r--rpm/rootfs-scripts/parser.py8
-rw-r--r--rpm/rootfs-scripts/report.py26
-rwxr-xr-xrpm/rootfs-scripts/resource/rpm_test.sh4
-rw-r--r--rpm/rootfs-scripts/resource/test-manual-1.2.3.noarch.rpmbin0 -> 3293 bytes
-rw-r--r--rpm/rootfs-scripts/resource/tests/rpm_01.sh13
-rw-r--r--rpm/rootfs-scripts/resource/tests/rpm_02.sh21
-rw-r--r--rpm/rootfs-scripts/resource/tests/rpm_03.sh14
-rw-r--r--rpm/rootfs-scripts/run_tests.py54
9 files changed, 140 insertions, 0 deletions
diff --git a/rpm/rootfs-scripts/__init__.py b/rpm/rootfs-scripts/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/rpm/rootfs-scripts/__init__.py
diff --git a/rpm/rootfs-scripts/parser.py b/rpm/rootfs-scripts/parser.py
new file mode 100644
index 0000000..c6de1af
--- /dev/null
+++ b/rpm/rootfs-scripts/parser.py
@@ -0,0 +1,8 @@
+from plugins.agl_test_log import log_process_default
+
+
+def log_process(TMP_LOGS_DIR,THIS_TEST):
+ log = TMP_LOGS_DIR + THIS_TEST + "/log/" + THIS_TEST + ".log"
+ test_cases_values_and_status = []
+ test_cases_values_and_status = log_process_default(log)
+ return test_cases_values_and_status
diff --git a/rpm/rootfs-scripts/report.py b/rpm/rootfs-scripts/report.py
new file mode 100644
index 0000000..28f2ac5
--- /dev/null
+++ b/rpm/rootfs-scripts/report.py
@@ -0,0 +1,26 @@
+import plugins.agl_test_report as agl_test_report
+
+def log_report(test_cases_values_and_status,THIS_TEST):
+ #Get case_status, it's looks like : {'test_id': 'status',...}
+ case_status = {}
+ case_status = agl_test_report.get_case_status(test_cases_values_and_status)
+
+ #Get the summary of the test case status, the result is like that:
+ #Summary = [["collected",num1],["passed",num2],["failed",num3],["skipped",num4]]
+ summary = []
+ summary = agl_test_report.get_summary(case_status)
+
+ #Judge whether the test set passes
+ test_set_status = "null"
+ if (summary[1][1] == summary[0][1]):
+ test_set_status = "passed"
+ else:
+ test_set_status = "failed"
+
+ agl_test_report.write_date_to_json(test_set_status,THIS_TEST,summary,case_status)
+
+ #Package log file
+ agl_test_report.log_compress(THIS_TEST)
+
+ html = agl_test_report.get_report_html(THIS_TEST,test_set_status,summary,case_status)
+ agl_test_report.write_to_html_file(THIS_TEST,html)
diff --git a/rpm/rootfs-scripts/resource/rpm_test.sh b/rpm/rootfs-scripts/resource/rpm_test.sh
new file mode 100755
index 0000000..dd5ce37
--- /dev/null
+++ b/rpm/rootfs-scripts/resource/rpm_test.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+for i in tests/*.sh; do
+ sh $i
+done
diff --git a/rpm/rootfs-scripts/resource/test-manual-1.2.3.noarch.rpm b/rpm/rootfs-scripts/resource/test-manual-1.2.3.noarch.rpm
new file mode 100644
index 0000000..d2e681c
--- /dev/null
+++ b/rpm/rootfs-scripts/resource/test-manual-1.2.3.noarch.rpm
Binary files differ
diff --git a/rpm/rootfs-scripts/resource/tests/rpm_01.sh b/rpm/rootfs-scripts/resource/tests/rpm_01.sh
new file mode 100644
index 0000000..6ab9e3d
--- /dev/null
+++ b/rpm/rootfs-scripts/resource/tests/rpm_01.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+# The testscript checks the following options of the command file
+# 1) Option help
+
+test="rpm01"
+
+if rpm --help | grep .*Usage.*
+then
+ echo " -> $test: TEST-PASS"
+else
+ echo " -> $test: TEST-FAIL"
+fi;
diff --git a/rpm/rootfs-scripts/resource/tests/rpm_02.sh b/rpm/rootfs-scripts/resource/tests/rpm_02.sh
new file mode 100644
index 0000000..9b0f411
--- /dev/null
+++ b/rpm/rootfs-scripts/resource/tests/rpm_02.sh
@@ -0,0 +1,21 @@
+#!/bin/sh
+
+# The testscript checks the following options of the command file
+# 1) Option ql
+
+test="rpm02"
+test_manual="test-manual-1.2.3.noarch"
+
+if rpm -qa | grep $test_manual
+then
+ rpm -e $test_manual
+fi
+
+rpm -ivh ${test_manual}.rpm --nodeps
+
+if rpm -ql $test_manual | grep '.*/home/test/rpm-test/text1.txt.*'
+then
+ echo " -> $test: TEST-PASS"
+else
+ echo " -> $test: TEST-FAIL"
+fi;
diff --git a/rpm/rootfs-scripts/resource/tests/rpm_03.sh b/rpm/rootfs-scripts/resource/tests/rpm_03.sh
new file mode 100644
index 0000000..51d5b95
--- /dev/null
+++ b/rpm/rootfs-scripts/resource/tests/rpm_03.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+# The testscript checks the following options of the command file
+# 1) Option qi
+
+test="rpm03"
+test_manual="test-manual-1.2.3.noarch"
+
+if rpm -qi $test_manual | grep ".*1.2.3.*"
+then
+ echo " -> $test: TEST-PASS"
+else
+ echo " -> $test: TEST-FAIL"
+fi;
diff --git a/rpm/rootfs-scripts/run_tests.py b/rpm/rootfs-scripts/run_tests.py
new file mode 100644
index 0000000..8b085c6
--- /dev/null
+++ b/rpm/rootfs-scripts/run_tests.py
@@ -0,0 +1,54 @@
+import pytest
+import subprocess
+
+import plugins.agl_test_utils as agl_test_utils
+import plugins.agl_test_conf as agl_test_conf
+
+import rpm.parser as parser
+import rpm.report as report
+
+WORK_DIR = agl_test_conf.WORK_DIR
+TMP_LOGS_DIR = agl_test_conf.TMP_LOGS_DIR
+
+THIS_TEST = "rpm"
+test_cases_values_and_status = []
+
+def setup_module():
+ agl_test_utils.find_cmd("rpm")
+ agl_test_utils.create_dir(THIS_TEST)
+ run_test_fun()
+ global test_cases_values_and_status
+ test_cases_values_and_status = parser.log_process(TMP_LOGS_DIR,THIS_TEST)
+
+#Run test, and redirect the log into the file of THIS_TEST.log under TMP_LOGS_DIR/THIS_TEST/
+def run_test_fun():
+ log_file = agl_test_conf.get_log_file(THIS_TEST)
+ cwd = WORK_DIR + THIS_TEST + "/resource/"
+ with open(log_file,'w') as log_f:
+ subprocess.run(['sh','rpm_test.sh'],cwd=cwd,stdout=log_f,stderr=log_f)
+ log_f.close()
+
+@pytest.mark.oss_default
+def test_rpm01():
+ global test_cases_values_and_status
+ assert test_cases_values_and_status[1][1] == "TEST-PASS"
+ test_cases_values_and_status[1][2] = "passed"
+
+@pytest.mark.oss_default
+def test_rpm02():
+ global test_cases_values_and_status
+ assert test_cases_values_and_status[2][1] == "TEST-PASS"
+ test_cases_values_and_status[2][2] = "passed"
+
+@pytest.mark.oss_default
+def test_rpm03():
+ global test_cases_values_and_status
+ assert test_cases_values_and_status[3][1] == "TEST-PASS"
+ test_cases_values_and_status[3][2] = "passed"
+
+#Pack the log file and count the test results
+def teardown_module():
+ report.log_report(test_cases_values_and_status,THIS_TEST)
+
+if __name__ == '__main__':
+ pytest.main("-s run_tests")