summaryrefslogtreecommitdiffstats
path: root/plugins/agl_test_report.py
diff options
context:
space:
mode:
authorduerpei <duep.fnst@fujitsu.com>2022-07-28 10:34:43 +0800
committerduerpei <duep.fnst@fujitsu.com>2022-07-28 10:34:43 +0800
commitaa5fab53993f29311f1aef83488eb0f759dabca8 (patch)
treea8f561e714feaa48c577c24b062fef8fe9c9b2d3 /plugins/agl_test_report.py
parent76665693bf19bdbe159849b43cc42142d3093c2f (diff)
agl-test-framework: demo code submissionneedlefish_13.93.0needlefish/13.93.013.93.0
Submit the demo code of agl-test-framework The "agl-test framework" encapsulates pytest,which aims to provide a unified test set execution entrance. It supports to run various test sets, even these test sets come from different test frameworks, processing these test logs uniformly, and generating complete test report. In this way, it is convenient to test as many targets as possible, in a wide range, so that the test covers a wider range of objects, and is more comprehensive. At present, we plan to support the following test sets in "agl-test": 1. Transplant test sets under Fuego and AGL-JTA 2. Retain the test set under pyagl and agl-ptest (so will depend on "agl-ptest") 3. Migrate new test sets (with upstream) 4. Append new test sets (without upstream) The output of test run is summarized by levels. The first level is the summary of all test sets, and the second level is the summary of a single test set. Now, they are displayed in HTML format, and other formats also can be considered later. Bug-AGL: SPEC-4345 Signed-off-by: duerpei <duep.fnst@fujitsu.com> Change-Id: I25dfedcf8cdd373544c4fae677330defb5d21840
Diffstat (limited to 'plugins/agl_test_report.py')
-rw-r--r--plugins/agl_test_report.py120
1 files changed, 120 insertions, 0 deletions
diff --git a/plugins/agl_test_report.py b/plugins/agl_test_report.py
new file mode 100644
index 0000000..cee1853
--- /dev/null
+++ b/plugins/agl_test_report.py
@@ -0,0 +1,120 @@
+import json
+import shutil
+
+from plugins.agl_test_conf import REPORT_LOGS_DIR
+from plugins.agl_test_conf import TMP_LOGS_DIR
+
+
+#Compress the tmp log to .zip, and store the zip file under TMP_LOGS_DIR/test-report
+def log_compress(THIS_TEST):
+ base_name = TMP_LOGS_DIR + "test-report/" + THIS_TEST + "/log"
+ root_dir = TMP_LOGS_DIR + THIS_TEST + "/log"
+ shutil.make_archive(base_name,'zip',root_dir)
+
+
+#Get all test cases status
+#The type of test_cases_values_and_status is list,it's looks like that:
+#[['test_id', 'values', 'status'], ['rpm01', 'TEST-PASS', 'passed'],....]
+#The type of case_status is directory,it's looks like:
+#{'rpm03': 'passed', 'rpm02': 'passed', 'rpm01': 'passed'}
+def get_case_status(test_cases_values_and_status):
+ num = len(test_cases_values_and_status)
+ case_status = {}
+ for i in range(num):
+ if (i==0):
+ continue
+ case_status[test_cases_values_and_status[i][0]] = test_cases_values_and_status[i][2]
+ return case_status
+
+
+#Case_status is a dictionary type of data,Record the test name/id and final results of all test cases
+#Get the summary of the test case status, the result is like that:
+#Summary = [["collected",3],["passed",3],["failed",0],["skipped",0]]
+def get_summary(case_status):
+ collected_num = passed_num = failed_num = skipped_num = 0
+ collected_num = len(case_status)
+ for status in case_status.values():
+ if (status == "passed"):
+ passed_num = passed_num + 1
+ elif (status == "failed"):
+ failed_num = failed_num + 1
+ else:
+ skipped_num = skipped_num + 1
+ summary = [["collected",collected_num],["passed",passed_num],["failed",failed_num],["skipped",skipped_num]]
+ return summary
+
+
+#Write the test result to a json file under the dir TMP_LOGS_DIR
+def write_date_to_json(test_set_status,THIS_TEST,summary,case_status):
+ #The data that will be written into the json file
+ data = {
+ 'test_status': test_set_status,
+ 'test_name': THIS_TEST,
+ 'collected': summary[0][1],
+ 'passed': summary[1][1],
+ 'failed': summary[2][1],
+ 'skipped': summary[3][1],
+ 'case_status': case_status
+ }
+
+ #Write the "data" to the json file
+ report_json = TMP_LOGS_DIR + THIS_TEST + "/" + "report.json"
+ with open(report_json,'w') as f:
+ json.dump(data,f,indent=4,sort_keys=False)
+ f.close()
+
+def get_report_html(THIS_TEST,test_set_status,summary,case_status):
+ html = "<html>"
+
+ #<head> </head>
+ html = html + "<head>"
+ html = html + "<title>"
+ html = html + THIS_TEST + "test report"
+ html = html + "</title>"
+ html = html + "</head>"
+
+ #<body> </body>
+ html = html + "<body>"
+ html = html + "<h1>" + THIS_TEST + " test report" + "</h1>"
+ html = html + "<p>" + "Status :" + test_set_status + "</p>"
+ html = html + "<p>" + "Total: " + str(summary[0][1])
+ html = html + " Pass: " + str(summary[1][1])
+ html = html + " Fail: " + str(summary[2][1])
+ html = html + " Skip: " + str(summary[3][1]) + "</p>"
+ html = html + "<p>Details : </p>"
+
+ #<table> </table>
+ html = html + "<table border=\"1\" cellspacing=\"2\" >"
+ html = html + "<tr bgcolor = \"2400B0\">"
+ html = html + "<th><font color = \"white\">test case</font></th>"
+ html = html + "<th><font color = \"white\">status</font></th>"
+ html = html + "</tr>"
+
+ #Add content to the table
+ bgcolor = 0
+ for test_case in case_status:
+ if bgcolor == 0:
+ html = html + "<tr bgcolor = \"CCCBE4\">"
+ bgcolor = 1
+ else:
+ html = html + "<tr bgcolor = \"E8E7F2\">"
+ bgcolor = 0
+ html = html + "<th>" + test_case + "</th>"
+ html = html + "<th>" + case_status[test_case] + "</th>"
+ html = html + "</tr>"
+
+ html = html + "</table>"
+ html = html + "<p></p>"
+ html = html + "<font>Detail log :</font>"
+ #TODO update the link address for log.zip
+ html = html + "<a href=\"" + THIS_TEST + "/log.zip" + "\">log.zip</a>"
+ html = html + "</body>"
+ html = html + "</html>"
+
+ return html
+
+def write_to_html_file(THIS_TEST,html):
+ html_path = TMP_LOGS_DIR + "test-report/" + THIS_TEST + "/report.html"
+ html_file = open(html_path,"w")
+ html_file.write(html)
+ html_file.close()