diff options
author | 2022-07-28 10:34:43 +0800 | |
---|---|---|
committer | 2022-07-28 10:34:43 +0800 | |
commit | aa5fab53993f29311f1aef83488eb0f759dabca8 (patch) | |
tree | a8f561e714feaa48c577c24b062fef8fe9c9b2d3 /plugins/agl_test_log.py | |
parent | 76665693bf19bdbe159849b43cc42142d3093c2f (diff) |
agl-test-framework: demo code submissionneedlefish_13.93.0needlefish/13.93.013.93.0
Submit the demo code of agl-test-framework
The "agl-test framework" encapsulates pytest,which aims to provide a
unified test set execution entrance. It supports to run various test sets,
even these test sets come from different test frameworks, processing these
test logs uniformly, and generating complete test report.
In this way, it is convenient to test as many targets as possible,
in a wide range, so that the test covers a wider range of objects,
and is more comprehensive.
At present, we plan to support the following test sets in "agl-test":
1. Transplant test sets under Fuego and AGL-JTA
2. Retain the test set under pyagl and agl-ptest
(so will depend on "agl-ptest")
3. Migrate new test sets (with upstream)
4. Append new test sets (without upstream)
The output of test run is summarized by levels.
The first level is the summary of all test sets, and the second level is
the summary of a single test set. Now, they are displayed in HTML format,
and other formats also can be considered later.
Bug-AGL: SPEC-4345
Signed-off-by: duerpei <duep.fnst@fujitsu.com>
Change-Id: I25dfedcf8cdd373544c4fae677330defb5d21840
Diffstat (limited to 'plugins/agl_test_log.py')
-rw-r--r-- | plugins/agl_test_log.py | 65 |
1 files changed, 65 insertions, 0 deletions
diff --git a/plugins/agl_test_log.py b/plugins/agl_test_log.py new file mode 100644 index 0000000..f7a0721 --- /dev/null +++ b/plugins/agl_test_log.py @@ -0,0 +1,65 @@ +import pytest +import re + +''' +Process the log and init test_cases_values_and_status. + +log : the path of default log + +default log formate : + -> case_name: TEST-PASS + -> case_name: TEST-FAIL + -> case_name: TEST-SKIP +''' +def log_process_default(log): + pattern = '^ -> (.+?): (.+?)$' + parse_result = log_parse(log, pattern) + test_cases_values_and_status = [["test_id","values","status"]] + + if parse_result: + for item in parse_result: + item_result = [item[0], item[1], ""] + test_cases_values_and_status.append(item_result) + + return test_cases_values_and_status + +''' +Process the log create by gnome_desktop_testing +and init test_cases_values_and_status. + +log : the path of gnome_desktop_testing log + +gnome_desktop_testing log formate: + PASS: glib/tls-database.test + FAIL: glib/markup-escape.test + SKIP: glib/testname.test +''' +def log_process_gnome_desktop_testing(log): + pattern = '^(FAIL|PASS|SKIP.+?): (.+test?)' + parse_result = log_parse(log, pattern) + test_cases_values_and_status = [["test_id","values","status"]] + + if parse_result: + for item in parse_result: + item_result = [item[1], item[0], ""] + test_cases_values_and_status.append(item_result) + + return test_cases_values_and_status + +# parse log file with pattern +def log_parse(log, pattern): + regex = re.compile(pattern, re.MULTILINE) + + test_log = open(log, 'r') + + parse_result = [] + line = test_log.readline() + while line: + matchs = regex.search(line) + if matchs: + groups = matchs.groups() + parse_result.append(groups) + line=test_log.readline() + test_log.close() + return parse_result + |