summaryrefslogtreecommitdiffstats
path: root/external/poky/scripts/lib/resulttool
diff options
context:
space:
mode:
authortakeshi_hoshina <takeshi_hoshina@mail.toyota.co.jp>2020-11-02 11:07:33 +0900
committertakeshi_hoshina <takeshi_hoshina@mail.toyota.co.jp>2020-11-02 11:07:33 +0900
commit1c7d6584a7811b7785ae5c1e378f14b5ba0971cf (patch)
treecd70a267a5ef105ba32f200aa088e281fbd85747 /external/poky/scripts/lib/resulttool
parent4204309872da5cb401cbb2729d9e2d4869a87f42 (diff)
recipes
Diffstat (limited to 'external/poky/scripts/lib/resulttool')
-rw-r--r--external/poky/scripts/lib/resulttool/log.py84
-rwxr-xr-xexternal/poky/scripts/lib/resulttool/manualexecution.py41
-rw-r--r--external/poky/scripts/lib/resulttool/merge.py30
-rw-r--r--external/poky/scripts/lib/resulttool/regression.py10
-rw-r--r--external/poky/scripts/lib/resulttool/report.py234
-rw-r--r--external/poky/scripts/lib/resulttool/resultutils.py85
-rw-r--r--external/poky/scripts/lib/resulttool/store.py24
-rw-r--r--external/poky/scripts/lib/resulttool/template/test_report_full_text.txt47
8 files changed, 420 insertions, 135 deletions
diff --git a/external/poky/scripts/lib/resulttool/log.py b/external/poky/scripts/lib/resulttool/log.py
index 49816357..eb3927ec 100644
--- a/external/poky/scripts/lib/resulttool/log.py
+++ b/external/poky/scripts/lib/resulttool/log.py
@@ -2,27 +2,29 @@
#
# Copyright (c) 2019 Garmin International
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
import os
import resulttool.resultutils as resultutils
def show_ptest(result, ptest, logger):
- if 'ptestresult.sections' in result:
- if ptest in result['ptestresult.sections'] and 'log' in result['ptestresult.sections'][ptest]:
- print(result['ptestresult.sections'][ptest]['log'])
- return 0
+ logdata = resultutils.ptestresult_get_log(result, ptest)
+ if logdata is not None:
+ print(logdata)
+ return 0
- print("ptest '%s' not found" % ptest)
+ print("ptest '%s' log not found" % ptest)
return 1
+def show_reproducible(result, reproducible, logger):
+ try:
+ print(result['reproducible'][reproducible]['diffoscope.text'])
+ return 0
+
+ except KeyError:
+ print("reproducible '%s' not found" % reproducible)
+ return 1
+
def log(args, logger):
results = resultutils.load_resultsdata(args.source)
@@ -33,31 +35,49 @@ def log(args, logger):
for _, run_name, _, r in resultutils.test_run_results(results):
if args.dump_ptest:
- if 'ptestresult.sections' in r:
- for name, ptest in r['ptestresult.sections'].items():
- if 'log' in ptest:
- dest_dir = args.dump_ptest
- if args.prepend_run:
- dest_dir = os.path.join(dest_dir, run_name)
+ for sectname in ['ptestresult.sections', 'ltpposixresult.sections', 'ltpresult.sections']:
+ if sectname in r:
+ for name, ptest in r[sectname].items():
+ logdata = resultutils.generic_get_log(sectname, r, name)
+ if logdata is not None:
+ dest_dir = args.dump_ptest
+ if args.prepend_run:
+ dest_dir = os.path.join(dest_dir, run_name)
+ if not sectname.startswith("ptest"):
+ dest_dir = os.path.join(dest_dir, sectname.split(".")[0])
- os.makedirs(dest_dir, exist_ok=True)
+ os.makedirs(dest_dir, exist_ok=True)
+ dest = os.path.join(dest_dir, '%s.log' % name)
+ print(dest)
+ with open(dest, 'w') as f:
+ f.write(logdata)
- dest = os.path.join(dest_dir, '%s.log' % name)
- print(dest)
- with open(dest, 'w') as f:
- f.write(ptest['log'])
+ if args.raw_ptest:
+ found = False
+ for sectname in ['ptestresult.rawlogs', 'ltpposixresult.rawlogs', 'ltpresult.rawlogs']:
+ rawlog = resultutils.generic_get_rawlogs(sectname, r)
+ if rawlog is not None:
+ print(rawlog)
+ found = True
+ if not found:
+ print('Raw ptest logs not found')
+ return 1
- if args.raw:
- if 'ptestresult.rawlogs' in r:
- print(r['ptestresult.rawlogs']['log'])
+ if args.raw_reproducible:
+ if 'reproducible.rawlogs' in r:
+ print(r['reproducible.rawlogs']['log'])
else:
- print('Raw logs not found')
+ print('Raw reproducible logs not found')
return 1
for ptest in args.ptest:
if not show_ptest(r, ptest, logger):
return 1
+ for reproducible in args.reproducible:
+ if not show_reproducible(r, reproducible, logger):
+ return 1
+
def register_commands(subparsers):
"""Register subcommands from this plugin"""
parser = subparsers.add_parser('log', help='show logs',
@@ -70,9 +90,15 @@ def register_commands(subparsers):
help='show logs for a ptest')
parser.add_argument('--dump-ptest', metavar='DIR',
help='Dump all ptest log files to the specified directory.')
+ parser.add_argument('--reproducible', action='append', default=[],
+ help='show logs for a reproducible test')
parser.add_argument('--prepend-run', action='store_true',
help='''Dump ptest results to a subdirectory named after the test run when using --dump-ptest.
Required if more than one test run is present in the result file''')
parser.add_argument('--raw', action='store_true',
- help='show raw logs')
+ help='show raw (ptest) logs. Deprecated. Alias for "--raw-ptest"', dest='raw_ptest')
+ parser.add_argument('--raw-ptest', action='store_true',
+ help='show raw ptest log')
+ parser.add_argument('--raw-reproducible', action='store_true',
+ help='show raw reproducible build logs')
diff --git a/external/poky/scripts/lib/resulttool/manualexecution.py b/external/poky/scripts/lib/resulttool/manualexecution.py
index dc368f36..ecb27c59 100755
--- a/external/poky/scripts/lib/resulttool/manualexecution.py
+++ b/external/poky/scripts/lib/resulttool/manualexecution.py
@@ -2,15 +2,9 @@
#
# Copyright (c) 2018, Intel Corporation.
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import argparse
import json
import os
@@ -187,11 +181,38 @@ class ManualTestRunner(object):
write_json_file(config_options_file, config_options)
logger.info('Configuration option file created at %s' % config_options_file)
+ def make_testcase_config_file(self, logger, case_file, testcase_config_file):
+ if testcase_config_file:
+ if os.path.exists(testcase_config_file):
+ print('\nTest configuration file with name %s already exists. Please provide a unique file name' % (testcase_config_file))
+ return 0
+
+ if not testcase_config_file:
+ testcase_config_file = os.path.join(self._get_write_dir(), "testconfig_new.json")
+
+ testcase_config = {}
+ cases = load_json_file(case_file)
+ new_test_module = self._get_test_module(case_file)
+ new_testcase_config = {}
+ new_testcase_config['testcases'] = []
+
+ print('\nAdd testcases for this configuration file:')
+ for case in cases:
+ print('\n' + case['test']['@alias'])
+ add_tc_config = self._get_true_false_input('\nDo you want to add this test case to test configuration : (Y)es/(N)o\n')
+ if add_tc_config:
+ new_testcase_config['testcases'].append(case['test']['@alias'])
+ write_json_file(testcase_config_file, new_testcase_config)
+ logger.info('Testcase Configuration file created at %s' % testcase_config_file)
+
def manualexecution(args, logger):
testrunner = ManualTestRunner()
if args.make_config_options_file:
testrunner.make_config_option_file(logger, args.file, args.config_options_file)
return 0
+ if args.make_testcase_config_file:
+ testrunner.make_testcase_config_file(logger, args.file, args.testcase_config_file)
+ return 0
configurations, result_id, write_dir, test_results = testrunner.run_test(args.file, args.config_options_file, args.testcase_config_file)
resultjsonhelper = OETestResultJSONHelper()
resultjsonhelper.dump_testresult_file(write_dir, configurations, result_id, test_results)
@@ -209,4 +230,6 @@ def register_commands(subparsers):
parser_build.add_argument('-m', '--make-config-options-file', action='store_true',
help='make the configuration options file based on provided inputs')
parser_build.add_argument('-t', '--testcase-config-file', default='',
- help='the testcase configuration file to enable user to run a selected set of test case') \ No newline at end of file
+ help='the testcase configuration file to enable user to run a selected set of test case or make a testcase configuration file')
+ parser_build.add_argument('-d', '--make-testcase-config-file', action='store_true',
+ help='make the testcase configuration file to run a set of test cases based on user selection') \ No newline at end of file
diff --git a/external/poky/scripts/lib/resulttool/merge.py b/external/poky/scripts/lib/resulttool/merge.py
index 7159463f..18b4825a 100644
--- a/external/poky/scripts/lib/resulttool/merge.py
+++ b/external/poky/scripts/lib/resulttool/merge.py
@@ -3,30 +3,31 @@
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import os
import json
import resulttool.resultutils as resultutils
def merge(args, logger):
+ configvars = {}
+ if not args.not_add_testseries:
+ configvars = resultutils.extra_configvars.copy()
+ if args.executed_by:
+ configvars['EXECUTED_BY'] = args.executed_by
if resultutils.is_url(args.target_results) or os.path.isdir(args.target_results):
- results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map)
- resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map)
+ results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map, configvars=configvars)
+ resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map, configvars=configvars)
resultutils.save_resultsdata(results, args.target_results)
else:
- results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map)
+ results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map, configvars=configvars)
if os.path.exists(args.target_results):
- resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map)
+ resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map, configvars=configvars)
resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(args.target_results))
+ logger.info('Merged results to %s' % os.path.dirname(args.target_results))
+
return 0
def register_commands(subparsers):
@@ -39,4 +40,7 @@ def register_commands(subparsers):
help='the results file/directory/URL to import')
parser_build.add_argument('target_results',
help='the target file or directory to merge the base_results with')
-
+ parser_build.add_argument('-t', '--not-add-testseries', action='store_true',
+ help='do not add testseries configuration to results')
+ parser_build.add_argument('-x', '--executed-by', default='',
+ help='add executed-by configuration to each result file')
diff --git a/external/poky/scripts/lib/resulttool/regression.py b/external/poky/scripts/lib/resulttool/regression.py
index fa90ab1e..9f952951 100644
--- a/external/poky/scripts/lib/resulttool/regression.py
+++ b/external/poky/scripts/lib/resulttool/regression.py
@@ -3,15 +3,9 @@
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import resulttool.resultutils as resultutils
import json
diff --git a/external/poky/scripts/lib/resulttool/report.py b/external/poky/scripts/lib/resulttool/report.py
index 8ae42728..f0ca50eb 100644
--- a/external/poky/scripts/lib/resulttool/report.py
+++ b/external/poky/scripts/lib/resulttool/report.py
@@ -3,15 +3,9 @@
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import os
import glob
import json
@@ -23,26 +17,37 @@ import oeqa.utils.gitarchive as gitarchive
class ResultsTextReport(object):
def __init__(self):
self.ptests = {}
- self.result_types = {'passed': ['PASSED', 'passed'],
- 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'],
- 'skipped': ['SKIPPED', 'skipped']}
+ self.ltptests = {}
+ self.ltpposixtests = {}
+ self.result_types = {'passed': ['PASSED', 'passed', 'PASS', 'XFAIL'],
+ 'failed': ['FAILED', 'failed', 'FAIL', 'ERROR', 'error', 'UNKNOWN', 'XPASS'],
+ 'skipped': ['SKIPPED', 'skipped', 'UNSUPPORTED', 'UNTESTED', 'UNRESOLVED']}
+
+ def handle_ptest_result(self, k, status, result, machine):
+ if machine not in self.ptests:
+ self.ptests[machine] = {}
- def handle_ptest_result(self, k, status, result):
if k == 'ptestresult.sections':
# Ensure tests without any test results still show up on the report
for suite in result['ptestresult.sections']:
- if suite not in self.ptests:
- self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if suite not in self.ptests[machine]:
+ self.ptests[machine][suite] = {
+ 'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
+ 'failed_testcases': [], "testcases": set(),
+ }
if 'duration' in result['ptestresult.sections'][suite]:
- self.ptests[suite]['duration'] = result['ptestresult.sections'][suite]['duration']
+ self.ptests[machine][suite]['duration'] = result['ptestresult.sections'][suite]['duration']
if 'timeout' in result['ptestresult.sections'][suite]:
- self.ptests[suite]['duration'] += " T"
- return
+ self.ptests[machine][suite]['duration'] += " T"
+ return True
+
+ # process test result
try:
_, suite, test = k.split(".", 2)
except ValueError:
- return
+ return True
+
# Handle 'glib-2.0'
if 'ptestresult.sections' in result and suite not in result['ptestresult.sections']:
try:
@@ -51,24 +56,105 @@ class ResultsTextReport(object):
suite = suite + "." + suite1
except ValueError:
pass
- if suite not in self.ptests:
- self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+
+ if suite not in self.ptests[machine]:
+ self.ptests[machine][suite] = {
+ 'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-',
+ 'failed_testcases': [], "testcases": set(),
+ }
+
+ # do not process duplicate results
+ if test in self.ptests[machine][suite]["testcases"]:
+ print("Warning duplicate ptest result '{}.{}' for {}".format(suite, test, machine))
+ return False
+
for tk in self.result_types:
if status in self.result_types[tk]:
- self.ptests[suite][tk] += 1
+ self.ptests[machine][suite][tk] += 1
+ self.ptests[machine][suite]["testcases"].add(test)
+ return True
+
+ def handle_ltptest_result(self, k, status, result, machine):
+ if machine not in self.ltptests:
+ self.ltptests[machine] = {}
- def get_aggregated_test_result(self, logger, testresult):
+ if k == 'ltpresult.sections':
+ # Ensure tests without any test results still show up on the report
+ for suite in result['ltpresult.sections']:
+ if suite not in self.ltptests[machine]:
+ self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if 'duration' in result['ltpresult.sections'][suite]:
+ self.ltptests[machine][suite]['duration'] = result['ltpresult.sections'][suite]['duration']
+ if 'timeout' in result['ltpresult.sections'][suite]:
+ self.ltptests[machine][suite]['duration'] += " T"
+ return
+ try:
+ _, suite, test = k.split(".", 2)
+ except ValueError:
+ return
+ # Handle 'glib-2.0'
+ if 'ltpresult.sections' in result and suite not in result['ltpresult.sections']:
+ try:
+ _, suite, suite1, test = k.split(".", 3)
+ if suite + "." + suite1 in result['ltpresult.sections']:
+ suite = suite + "." + suite1
+ except ValueError:
+ pass
+ if suite not in self.ltptests[machine]:
+ self.ltptests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ for tk in self.result_types:
+ if status in self.result_types[tk]:
+ self.ltptests[machine][suite][tk] += 1
+
+ def handle_ltpposixtest_result(self, k, status, result, machine):
+ if machine not in self.ltpposixtests:
+ self.ltpposixtests[machine] = {}
+
+ if k == 'ltpposixresult.sections':
+ # Ensure tests without any test results still show up on the report
+ for suite in result['ltpposixresult.sections']:
+ if suite not in self.ltpposixtests[machine]:
+ self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if 'duration' in result['ltpposixresult.sections'][suite]:
+ self.ltpposixtests[machine][suite]['duration'] = result['ltpposixresult.sections'][suite]['duration']
+ return
+ try:
+ _, suite, test = k.split(".", 2)
+ except ValueError:
+ return
+ # Handle 'glib-2.0'
+ if 'ltpposixresult.sections' in result and suite not in result['ltpposixresult.sections']:
+ try:
+ _, suite, suite1, test = k.split(".", 3)
+ if suite + "." + suite1 in result['ltpposixresult.sections']:
+ suite = suite + "." + suite1
+ except ValueError:
+ pass
+ if suite not in self.ltpposixtests[machine]:
+ self.ltpposixtests[machine][suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ for tk in self.result_types:
+ if status in self.result_types[tk]:
+ self.ltpposixtests[machine][suite][tk] += 1
+
+ def get_aggregated_test_result(self, logger, testresult, machine):
test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
result = testresult.get('result', [])
for k in result:
test_status = result[k].get('status', [])
+ if k.startswith("ptestresult."):
+ if not self.handle_ptest_result(k, test_status, result, machine):
+ continue
+ elif k.startswith("ltpresult."):
+ self.handle_ltptest_result(k, test_status, result, machine)
+ elif k.startswith("ltpposixresult."):
+ self.handle_ltpposixtest_result(k, test_status, result, machine)
+
+ # process result if it was not skipped by a handler
for tk in self.result_types:
if test_status in self.result_types[tk]:
test_count_report[tk] += 1
if test_status in self.result_types['failed']:
test_count_report['failed_testcases'].append(k)
- if k.startswith("ptestresult."):
- self.handle_ptest_result(k, test_status, result)
return test_count_report
def print_test_report(self, template_file_name, test_count_reports):
@@ -78,10 +164,10 @@ class ResultsTextReport(object):
env = Environment(loader=file_loader, trim_blocks=True)
template = env.get_template(template_file_name)
havefailed = False
- haveptest = bool(self.ptests)
reportvalues = []
+ machines = []
cols = ['passed', 'failed', 'skipped']
- maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 }
+ maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 ,'ltptest': 0, 'ltpposixtest': 0}
for line in test_count_reports:
total_tested = line['passed'] + line['failed'] + line['skipped']
vals = {}
@@ -97,18 +183,53 @@ class ResultsTextReport(object):
reportvalues.append(vals)
if line['failed_testcases']:
havefailed = True
- for ptest in self.ptests:
- if len(ptest) > maxlen['ptest']:
- maxlen['ptest'] = len(ptest)
+ if line['machine'] not in machines:
+ machines.append(line['machine'])
+ reporttotalvalues = {}
+ for k in cols:
+ reporttotalvalues[k] = '%s' % sum([line[k] for line in test_count_reports])
+ reporttotalvalues['count'] = '%s' % len(test_count_reports)
+ for (machine, report) in self.ptests.items():
+ for ptest in self.ptests[machine]:
+ if len(ptest) > maxlen['ptest']:
+ maxlen['ptest'] = len(ptest)
+ for (machine, report) in self.ltptests.items():
+ for ltptest in self.ltptests[machine]:
+ if len(ltptest) > maxlen['ltptest']:
+ maxlen['ltptest'] = len(ltptest)
+ for (machine, report) in self.ltpposixtests.items():
+ for ltpposixtest in self.ltpposixtests[machine]:
+ if len(ltpposixtest) > maxlen['ltpposixtest']:
+ maxlen['ltpposixtest'] = len(ltpposixtest)
output = template.render(reportvalues=reportvalues,
+ reporttotalvalues=reporttotalvalues,
havefailed=havefailed,
- haveptest=haveptest,
+ machines=machines,
ptests=self.ptests,
+ ltptests=self.ltptests,
+ ltpposixtests=self.ltpposixtests,
maxlen=maxlen)
print(output)
- def view_test_report(self, logger, source_dir, branch, commit, tag):
+ def view_test_report(self, logger, source_dir, branch, commit, tag, use_regression_map, raw_test, selected_test_case_only):
+ def print_selected_testcase_result(testresults, selected_test_case_only):
+ for testsuite in testresults:
+ for resultid in testresults[testsuite]:
+ result = testresults[testsuite][resultid]['result']
+ test_case_result = result.get(selected_test_case_only, {})
+ if test_case_result.get('status'):
+ print('Found selected test case result for %s from %s' % (selected_test_case_only,
+ resultid))
+ print(test_case_result['status'])
+ else:
+ print('Could not find selected test case result for %s from %s' % (selected_test_case_only,
+ resultid))
+ if test_case_result.get('log'):
+ print(test_case_result['log'])
test_count_reports = []
+ configmap = resultutils.store_map
+ if use_regression_map:
+ configmap = resultutils.regression_map
if commit:
if tag:
logger.warning("Ignoring --tag as --commit was specified")
@@ -116,16 +237,48 @@ class ResultsTextReport(object):
repo = GitRepo(source_dir)
revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch)
rev_index = gitarchive.rev_find(revs, 'commit', commit)
- testresults = resultutils.git_get_result(repo, revs[rev_index][2])
+ testresults = resultutils.git_get_result(repo, revs[rev_index][2], configmap=configmap)
elif tag:
repo = GitRepo(source_dir)
- testresults = resultutils.git_get_result(repo, [tag])
+ testresults = resultutils.git_get_result(repo, [tag], configmap=configmap)
else:
- testresults = resultutils.load_resultsdata(source_dir)
+ testresults = resultutils.load_resultsdata(source_dir, configmap=configmap)
+ if raw_test:
+ raw_results = {}
+ for testsuite in testresults:
+ result = testresults[testsuite].get(raw_test, {})
+ if result:
+ raw_results[testsuite] = {raw_test: result}
+ if raw_results:
+ if selected_test_case_only:
+ print_selected_testcase_result(raw_results, selected_test_case_only)
+ else:
+ print(json.dumps(raw_results, sort_keys=True, indent=4))
+ else:
+ print('Could not find raw test result for %s' % raw_test)
+ return 0
+ if selected_test_case_only:
+ print_selected_testcase_result(testresults, selected_test_case_only)
+ return 0
for testsuite in testresults:
for resultid in testresults[testsuite]:
+ skip = False
result = testresults[testsuite][resultid]
- test_count_report = self.get_aggregated_test_result(logger, result)
+ machine = result['configuration']['MACHINE']
+
+ # Check to see if there is already results for these kinds of tests for the machine
+ for key in result['result'].keys():
+ testtype = str(key).split('.')[0]
+ if ((machine in self.ltptests and testtype == "ltpiresult" and self.ltptests[machine]) or
+ (machine in self.ltpposixtests and testtype == "ltpposixresult" and self.ltpposixtests[machine])):
+ print("Already have test results for %s on %s, skipping %s" %(str(key).split('.')[0], machine, resultid))
+ skip = True
+ break
+ if skip:
+ break
+
+ test_count_report = self.get_aggregated_test_result(logger, result, machine)
+ test_count_report['machine'] = machine
test_count_report['testseries'] = result['configuration']['TESTSERIES']
test_count_report['result_id'] = resultid
test_count_reports.append(test_count_report)
@@ -133,7 +286,8 @@ class ResultsTextReport(object):
def report(args, logger):
report = ResultsTextReport()
- report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag)
+ report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag, args.use_regression_map,
+ args.raw_test_only, args.selected_test_case_only)
return 0
def register_commands(subparsers):
@@ -148,3 +302,11 @@ def register_commands(subparsers):
parser_build.add_argument('--commit', help="Revision to report")
parser_build.add_argument('-t', '--tag', default='',
help='source_dir is a git repository, report on the tag specified from that repository')
+ parser_build.add_argument('-m', '--use_regression_map', action='store_true',
+ help='instead of the default "store_map", use the "regression_map" for report')
+ parser_build.add_argument('-r', '--raw_test_only', default='',
+ help='output raw test result only for the user provided test result id')
+ parser_build.add_argument('-s', '--selected_test_case_only', default='',
+ help='output selected test case result for the user provided test case id, if both test '
+ 'result id and test case id are provided then output the selected test case result '
+ 'from the provided test result id')
diff --git a/external/poky/scripts/lib/resulttool/resultutils.py b/external/poky/scripts/lib/resulttool/resultutils.py
index 07dab4cb..8917022d 100644
--- a/external/poky/scripts/lib/resulttool/resultutils.py
+++ b/external/poky/scripts/lib/resulttool/resultutils.py
@@ -3,16 +3,12 @@
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import os
+import base64
+import zlib
import json
import scriptpath
import copy
@@ -48,10 +44,12 @@ def is_url(p):
"""
return p.startswith('http://') or p.startswith('https://')
+extra_configvars = {'TESTSERIES': ''}
+
#
# Load the json file and append the results data into the provided results dict
#
-def append_resultsdata(results, f, configmap=store_map):
+def append_resultsdata(results, f, configmap=store_map, configvars=extra_configvars):
if type(f) is str:
if is_url(f):
with urllib.request.urlopen(f) as response:
@@ -67,12 +65,15 @@ def append_resultsdata(results, f, configmap=store_map):
for res in data:
if "configuration" not in data[res] or "result" not in data[res]:
raise ValueError("Test results data without configuration or result section?")
- if "TESTSERIES" not in data[res]["configuration"]:
- data[res]["configuration"]["TESTSERIES"] = testseries
+ for config in configvars:
+ if config == "TESTSERIES" and "TESTSERIES" not in data[res]["configuration"]:
+ data[res]["configuration"]["TESTSERIES"] = testseries
+ continue
+ if config not in data[res]["configuration"]:
+ data[res]["configuration"][config] = configvars[config]
testtype = data[res]["configuration"].get("TEST_TYPE")
if testtype not in configmap:
raise ValueError("Unknown test type %s" % testtype)
- configvars = configmap[testtype]
testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype])
if testpath not in results:
results[testpath] = {}
@@ -82,16 +83,16 @@ def append_resultsdata(results, f, configmap=store_map):
# Walk a directory and find/load results data
# or load directly from a file
#
-def load_resultsdata(source, configmap=store_map):
+def load_resultsdata(source, configmap=store_map, configvars=extra_configvars):
results = {}
if is_url(source) or os.path.isfile(source):
- append_resultsdata(results, source, configmap)
+ append_resultsdata(results, source, configmap, configvars)
return results
for root, dirs, files in os.walk(source):
for name in files:
f = os.path.join(root, name)
if name == "testresults.json":
- append_resultsdata(results, f, configmap)
+ append_resultsdata(results, f, configmap, configvars)
return results
def filter_resultsdata(results, resultid):
@@ -118,6 +119,41 @@ def strip_ptestresults(results):
del newresults[res]['result']['ptestresult.sections'][i]['log']
return newresults
+def decode_log(logdata):
+ if isinstance(logdata, str):
+ return logdata
+ elif isinstance(logdata, dict):
+ if "compressed" in logdata:
+ data = logdata.get("compressed")
+ data = base64.b64decode(data.encode("utf-8"))
+ data = zlib.decompress(data)
+ return data.decode("utf-8", errors='ignore')
+ return None
+
+def generic_get_log(sectionname, results, section):
+ if sectionname not in results:
+ return None
+ if section not in results[sectionname]:
+ return None
+
+ ptest = results[sectionname][section]
+ if 'log' not in ptest:
+ return None
+ return decode_log(ptest['log'])
+
+def ptestresult_get_log(results, section):
+ return generic_get_log('ptestresuls.sections', results, section)
+
+def generic_get_rawlogs(sectname, results):
+ if sectname not in results:
+ return None
+ if 'log' not in results[sectname]:
+ return None
+ return decode_log(results[sectname]['log'])
+
+def ptestresult_get_rawlogs(results):
+ return generic_get_rawlogs('ptestresult.rawlogs', results)
+
def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, ptestlogs=False):
for res in results:
if res:
@@ -132,16 +168,19 @@ def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, p
f.write(json.dumps(resultsout, sort_keys=True, indent=4))
for res2 in results[res]:
if ptestlogs and 'result' in results[res][res2]:
- if 'ptestresult.rawlogs' in results[res][res2]['result']:
+ seriesresults = results[res][res2]['result']
+ rawlogs = ptestresult_get_rawlogs(seriesresults)
+ if rawlogs is not None:
with open(dst.replace(fn, "ptest-raw.log"), "w+") as f:
- f.write(results[res][res2]['result']['ptestresult.rawlogs']['log'])
- if 'ptestresult.sections' in results[res][res2]['result']:
- for i in results[res][res2]['result']['ptestresult.sections']:
- if 'log' in results[res][res2]['result']['ptestresult.sections'][i]:
+ f.write(rawlogs)
+ if 'ptestresult.sections' in seriesresults:
+ for i in seriesresults['ptestresult.sections']:
+ sectionlog = ptestresult_get_log(seriesresults, i)
+ if sectionlog is not None:
with open(dst.replace(fn, "ptest-%s.log" % i), "w+") as f:
- f.write(results[res][res2]['result']['ptestresult.sections'][i]['log'])
+ f.write(sectionlog)
-def git_get_result(repo, tags):
+def git_get_result(repo, tags, configmap=store_map):
git_objs = []
for tag in tags:
files = repo.run_cmd(['ls-tree', "--name-only", "-r", tag]).splitlines()
@@ -164,7 +203,7 @@ def git_get_result(repo, tags):
# Optimize by reading all data with one git command
results = {}
for obj in parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--'])):
- append_resultsdata(results, obj)
+ append_resultsdata(results, obj, configmap=configmap)
return results
diff --git a/external/poky/scripts/lib/resulttool/store.py b/external/poky/scripts/lib/resulttool/store.py
index acdfbd94..e0951f0a 100644
--- a/external/poky/scripts/lib/resulttool/store.py
+++ b/external/poky/scripts/lib/resulttool/store.py
@@ -3,15 +3,9 @@
# Copyright (c) 2019, Intel Corporation.
# Copyright (c) 2019, Linux Foundation
#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
-# more details.
+# SPDX-License-Identifier: GPL-2.0-only
#
+
import tempfile
import os
import subprocess
@@ -27,16 +21,21 @@ import oeqa.utils.gitarchive as gitarchive
def store(args, logger):
tempdir = tempfile.mkdtemp(prefix='testresults.')
try:
+ configvars = resultutils.extra_configvars.copy()
+ if args.executed_by:
+ configvars['EXECUTED_BY'] = args.executed_by
+ if args.extra_test_env:
+ configvars['EXTRA_TEST_ENV'] = args.extra_test_env
results = {}
logger.info('Reading files from %s' % args.source)
if resultutils.is_url(args.source) or os.path.isfile(args.source):
- resultutils.append_resultsdata(results, args.source)
+ resultutils.append_resultsdata(results, args.source, configvars=configvars)
else:
for root, dirs, files in os.walk(args.source):
for name in files:
f = os.path.join(root, name)
if name == "testresults.json":
- resultutils.append_resultsdata(results, f)
+ resultutils.append_resultsdata(results, f, configvars=configvars)
elif args.all:
dst = f.replace(args.source, tempdir + "/")
os.makedirs(os.path.dirname(dst), exist_ok=True)
@@ -99,4 +98,7 @@ def register_commands(subparsers):
help='include all files, not just testresults.json files')
parser_build.add_argument('-e', '--allow-empty', action='store_true',
help='don\'t error if no results to store are found')
-
+ parser_build.add_argument('-x', '--executed-by', default='',
+ help='add executed-by configuration to each result file')
+ parser_build.add_argument('-t', '--extra-test-env', default='',
+ help='add extra test environment data to each result file configuration')
diff --git a/external/poky/scripts/lib/resulttool/template/test_report_full_text.txt b/external/poky/scripts/lib/resulttool/template/test_report_full_text.txt
index 590f35c7..2efba2ef 100644
--- a/external/poky/scripts/lib/resulttool/template/test_report_full_text.txt
+++ b/external/poky/scripts/lib/resulttool/template/test_report_full_text.txt
@@ -8,22 +8,57 @@ Test Result Status Summary (Counts/Percentages sorted by testseries, ID)
{{ report.testseries.ljust(maxlen['testseries']) }} | {{ report.result_id.ljust(maxlen['result_id']) }} | {{ (report.passed|string).ljust(maxlen['passed']) }} | {{ (report.failed|string).ljust(maxlen['failed']) }} | {{ (report.skipped|string).ljust(maxlen['skipped']) }}
{% endfor %}
--------------------------------------------------------------------------------------------------------------
+{{ 'Total'.ljust(maxlen['testseries']) }} | {{ reporttotalvalues['count'].ljust(maxlen['result_id']) }} | {{ reporttotalvalues['passed'].ljust(maxlen['passed']) }} | {{ reporttotalvalues['failed'].ljust(maxlen['failed']) }} | {{ reporttotalvalues['skipped'].ljust(maxlen['skipped']) }}
+--------------------------------------------------------------------------------------------------------------
-{% if haveptest %}
+{% for machine in machines %}
+{% if ptests[machine] %}
==============================================================================================================
-PTest Result Summary
+{{ machine }} PTest Result Summary
==============================================================================================================
--------------------------------------------------------------------------------------------------------------
{{ 'Recipe'.ljust(maxlen['ptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
--------------------------------------------------------------------------------------------------------------
-{% for ptest in ptests |sort %}
-{{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[ptest]['duration']|string) }}
+{% for ptest in ptests[machine] |sort %}
+{{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[machine][ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[machine][ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[machine][ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[machine][ptest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% endif %}
+{% endfor %}
+
+{% for machine in machines %}
+{% if ltptests[machine] %}
+==============================================================================================================
+{{ machine }} Ltp Test Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ltptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ltptest in ltptests[machine] |sort %}
+{{ ltptest.ljust(maxlen['ltptest']) }} | {{ (ltptests[machine][ltptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltptests[machine][ltptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltptests[machine][ltptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltptests[machine][ltptest]['duration']|string) }}
{% endfor %}
--------------------------------------------------------------------------------------------------------------
-{% else %}
-There was no ptest data
{% endif %}
+{% endfor %}
+
+{% for machine in machines %}
+{% if ltpposixtests[machine] %}
+==============================================================================================================
+{{ machine }} Ltp Posix Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ltpposixtest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ltpposixtest in ltpposixtests[machine] |sort %}
+{{ ltpposixtest.ljust(maxlen['ltpposixtest']) }} | {{ (ltpposixtests[machine][ltpposixtest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ltpposixtests[machine][ltpposixtest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ltpposixtests[machine][ltpposixtest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% endif %}
+{% endfor %}
+
==============================================================================================================
Failed test cases (sorted by testseries, ID)