poky: refresh thud: e4c0a8a7cb..9dfebdaf7a
Update poky to thud HEAD.
Mazliana (2):
scripts/resulttool: enable manual execution and result creation
resulttool/manualexecution: To output right test case id
Michael Halstead (1):
yocto-uninative: Correct sha256sum for aarch64
Richard Purdie (12):
resulttool: Improvements to allow integration to the autobuilder
resulttool/resultutils: Avoids tracebacks for missing logs
resulttool/store: Handle results files for multiple revisions
resulttool/report: Handle missing metadata sections more cleanly
resulttool/report: Ensure test suites with no results show up on the report
resulttool/report: Ensure ptest results are sorted
resulttool/store: Fix missing variable causing testresult corruption
oeqa/utils/gitarchive: Handle case where parent is only on origin
scripts/wic: Be consistent about how we call bitbake
yocto-uninative: Update to 2.4
poky.conf: Bump version for 2.6.2 thud release
build-appliance-image: Update to thud head revision
Yeoh Ee Peng (4):
resulttool: enable merge, store, report and regression analysis
resulttool/regression: Ensure regressoin results are sorted
scripts/resulttool: Enable manual result store and regression
resulttool/report: Enable roll-up report for a commit
Change-Id: Icf3c93db794539bdd4501d2e7db15c68b6c541ae
Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
diff --git a/poky/scripts/lib/resulttool/__init__.py b/poky/scripts/lib/resulttool/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/poky/scripts/lib/resulttool/__init__.py
diff --git a/poky/scripts/lib/resulttool/manualexecution.py b/poky/scripts/lib/resulttool/manualexecution.py
new file mode 100755
index 0000000..6487cd9
--- /dev/null
+++ b/poky/scripts/lib/resulttool/manualexecution.py
@@ -0,0 +1,142 @@
+# test case management tool - manual execution from testopia test cases
+#
+# Copyright (c) 2018, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+import argparse
+import json
+import os
+import sys
+import datetime
+import re
+from oeqa.core.runner import OETestResultJSONHelper
+
+
+def load_json_file(file):
+ with open(file, "r") as f:
+ return json.load(f)
+
+
+class ManualTestRunner(object):
+ def __init__(self):
+ self.jdata = ''
+ self.test_module = ''
+ self.test_cases_id = ''
+ self.configuration = ''
+ self.starttime = ''
+ self.result_id = ''
+ self.write_dir = ''
+
+ def _get_testcases(self, file):
+ self.jdata = load_json_file(file)
+ self.test_cases_id = []
+ self.test_module = self.jdata[0]['test']['@alias'].split('.', 2)[0]
+ for i in self.jdata:
+ self.test_cases_id.append(i['test']['@alias'])
+
+ def _get_input(self, config):
+ while True:
+ output = input('{} = '.format(config))
+ if re.match('^[a-zA-Z0-9_-]+$', output):
+ break
+ print('Only alphanumeric and underscore/hyphen are allowed. Please try again')
+ return output
+
+ def _create_config(self):
+ from oeqa.utils.metadata import get_layers
+ from oeqa.utils.commands import get_bb_var
+ from resulttool.resultutils import store_map
+
+ layers = get_layers(get_bb_var('BBLAYERS'))
+ self.configuration = {}
+ self.configuration['LAYERS'] = layers
+ current_datetime = datetime.datetime.now()
+ self.starttime = current_datetime.strftime('%Y%m%d%H%M%S')
+ self.configuration['STARTTIME'] = self.starttime
+ self.configuration['TEST_TYPE'] = 'manual'
+ self.configuration['TEST_MODULE'] = self.test_module
+
+ extra_config = set(store_map['manual']) - set(self.configuration)
+ for config in sorted(extra_config):
+ print('---------------------------------------------')
+ print('This is configuration #%s. Please provide configuration value(use "None" if not applicable).'
+ % config)
+ print('---------------------------------------------')
+ value_conf = self._get_input('Configuration Value')
+ print('---------------------------------------------\n')
+ self.configuration[config] = value_conf
+
+ def _create_result_id(self):
+ self.result_id = 'manual_' + self.test_module + '_' + self.starttime
+
+ def _execute_test_steps(self, test_id):
+ test_result = {}
+ total_steps = len(self.jdata[test_id]['test']['execution'].keys())
+ print('------------------------------------------------------------------------')
+ print('Executing test case:' + '' '' + self.test_cases_id[test_id])
+ print('------------------------------------------------------------------------')
+ print('You have total ' + str(total_steps) + ' test steps to be executed.')
+ print('------------------------------------------------------------------------\n')
+ for step in sorted((self.jdata[test_id]['test']['execution']).keys()):
+ print('Step %s: ' % step + self.jdata[test_id]['test']['execution']['%s' % step]['action'])
+ print('Expected output: ' + self.jdata[test_id]['test']['execution']['%s' % step]['expected_results'])
+ done = input('\nPlease press ENTER when you are done to proceed to next step.\n')
+ while True:
+ done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n')
+ done = done.lower()
+ result_types = {'p':'PASSED',
+ 'f':'FAILED',
+ 'b':'BLOCKED',
+ 's':'SKIPPED'}
+ if done in result_types:
+ for r in result_types:
+ if done == r:
+ res = result_types[r]
+ if res == 'FAILED':
+ log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n')
+ test_result.update({self.test_cases_id[test_id]: {'status': '%s' % res, 'log': '%s' % log_input}})
+ else:
+ test_result.update({self.test_cases_id[test_id]: {'status': '%s' % res}})
+ break
+ print('Invalid input!')
+ return test_result
+
+ def _create_write_dir(self):
+ basepath = os.environ['BUILDDIR']
+ self.write_dir = basepath + '/tmp/log/manual/'
+
+ def run_test(self, file):
+ self._get_testcases(file)
+ self._create_config()
+ self._create_result_id()
+ self._create_write_dir()
+ test_results = {}
+ print('\nTotal number of test cases in this test suite: ' + '%s\n' % len(self.jdata))
+ for i in range(0, len(self.jdata)):
+ test_result = self._execute_test_steps(i)
+ test_results.update(test_result)
+ return self.configuration, self.result_id, self.write_dir, test_results
+
+def manualexecution(args, logger):
+ testrunner = ManualTestRunner()
+ get_configuration, get_result_id, get_write_dir, get_test_results = testrunner.run_test(args.file)
+ resultjsonhelper = OETestResultJSONHelper()
+ resultjsonhelper.dump_testresult_file(get_write_dir, get_configuration, get_result_id,
+ get_test_results)
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('manualexecution', help='helper script for results populating during manual test execution.',
+ description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/',
+ group='manualexecution')
+ parser_build.set_defaults(func=manualexecution)
+ parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.')
diff --git a/poky/scripts/lib/resulttool/merge.py b/poky/scripts/lib/resulttool/merge.py
new file mode 100644
index 0000000..3e4b7a3
--- /dev/null
+++ b/poky/scripts/lib/resulttool/merge.py
@@ -0,0 +1,42 @@
+# resulttool - merge multiple testresults.json files into a file or directory
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+import os
+import json
+import resulttool.resultutils as resultutils
+
+def merge(args, logger):
+ if os.path.isdir(args.target_results):
+ results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map)
+ resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map)
+ resultutils.save_resultsdata(results, args.target_results)
+ else:
+ results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map)
+ if os.path.exists(args.target_results):
+ resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map)
+ resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(args.target_results))
+
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('merge', help='merge test result files/directories',
+ description='merge the results from multiple files/directories into the target file or directory',
+ group='setup')
+ parser_build.set_defaults(func=merge)
+ parser_build.add_argument('base_results',
+ help='the results file/directory to import')
+ parser_build.add_argument('target_results',
+ help='the target file or directory to merge the base_results with')
+
diff --git a/poky/scripts/lib/resulttool/regression.py b/poky/scripts/lib/resulttool/regression.py
new file mode 100644
index 0000000..bdf531d
--- /dev/null
+++ b/poky/scripts/lib/resulttool/regression.py
@@ -0,0 +1,192 @@
+# resulttool - regression analysis
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+import resulttool.resultutils as resultutils
+import json
+
+from oeqa.utils.git import GitRepo
+import oeqa.utils.gitarchive as gitarchive
+
+def compare_result(logger, base_name, target_name, base_result, target_result):
+ base_result = base_result.get('result')
+ target_result = target_result.get('result')
+ result = {}
+ if base_result and target_result:
+ for k in base_result:
+ base_testcase = base_result[k]
+ base_status = base_testcase.get('status')
+ if base_status:
+ target_testcase = target_result.get(k, {})
+ target_status = target_testcase.get('status')
+ if base_status != target_status:
+ result[k] = {'base': base_status, 'target': target_status}
+ else:
+ logger.error('Failed to retrieved base test case status: %s' % k)
+ if result:
+ resultstring = "Regression: %s\n %s\n" % (base_name, target_name)
+ for k in sorted(result):
+ resultstring += ' %s: %s -> %s\n' % (k, result[k]['base'], result[k]['target'])
+ else:
+ resultstring = "Match: %s\n %s" % (base_name, target_name)
+ return result, resultstring
+
+def get_results(logger, source):
+ return resultutils.load_resultsdata(source, configmap=resultutils.regression_map)
+
+def regression(args, logger):
+ base_results = get_results(logger, args.base_result)
+ target_results = get_results(logger, args.target_result)
+
+ regression_common(args, logger, base_results, target_results)
+
+def regression_common(args, logger, base_results, target_results):
+ if args.base_result_id:
+ base_results = resultutils.filter_resultsdata(base_results, args.base_result_id)
+ if args.target_result_id:
+ target_results = resultutils.filter_resultsdata(target_results, args.target_result_id)
+
+ matches = []
+ regressions = []
+ notfound = []
+
+ for a in base_results:
+ if a in target_results:
+ base = list(base_results[a].keys())
+ target = list(target_results[a].keys())
+ # We may have multiple base/targets which are for different configurations. Start by
+ # removing any pairs which match
+ for c in base.copy():
+ for b in target.copy():
+ res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
+ if not res:
+ matches.append(resstr)
+ base.remove(c)
+ target.remove(b)
+ break
+ # Should only now see regressions, we may not be able to match multiple pairs directly
+ for c in base:
+ for b in target:
+ res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
+ if res:
+ regressions.append(resstr)
+ else:
+ notfound.append("%s not found in target" % a)
+ print("\n".join(sorted(matches)))
+ print("\n".join(sorted(regressions)))
+ print("\n".join(sorted(notfound)))
+
+ return 0
+
+def regression_git(args, logger):
+ base_results = {}
+ target_results = {}
+
+ tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
+ repo = GitRepo(args.repo)
+
+ revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch)
+
+ if args.branch2:
+ revs2 = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch2)
+ if not len(revs2):
+ logger.error("No revisions found to compare against")
+ return 1
+ if not len(revs):
+ logger.error("No revision to report on found")
+ return 1
+ else:
+ if len(revs) < 2:
+ logger.error("Only %d tester revisions found, unable to generate report" % len(revs))
+ return 1
+
+ # Pick revisions
+ if args.commit:
+ if args.commit_number:
+ logger.warning("Ignoring --commit-number as --commit was specified")
+ index1 = gitarchive.rev_find(revs, 'commit', args.commit)
+ elif args.commit_number:
+ index1 = gitarchive.rev_find(revs, 'commit_number', args.commit_number)
+ else:
+ index1 = len(revs) - 1
+
+ if args.branch2:
+ revs2.append(revs[index1])
+ index1 = len(revs2) - 1
+ revs = revs2
+
+ if args.commit2:
+ if args.commit_number2:
+ logger.warning("Ignoring --commit-number2 as --commit2 was specified")
+ index2 = gitarchive.rev_find(revs, 'commit', args.commit2)
+ elif args.commit_number2:
+ index2 = gitarchive.rev_find(revs, 'commit_number', args.commit_number2)
+ else:
+ if index1 > 0:
+ index2 = index1 - 1
+ # Find the closest matching commit number for comparision
+ # In future we could check the commit is a common ancestor and
+ # continue back if not but this good enough for now
+ while index2 > 0 and revs[index2].commit_number > revs[index1].commit_number:
+ index2 = index2 - 1
+ else:
+ logger.error("Unable to determine the other commit, use "
+ "--commit2 or --commit-number2 to specify it")
+ return 1
+
+ logger.info("Comparing:\n%s\nto\n%s\n" % (revs[index1], revs[index2]))
+
+ base_results = resultutils.git_get_result(repo, revs[index1][2])
+ target_results = resultutils.git_get_result(repo, revs[index2][2])
+
+ regression_common(args, logger, base_results, target_results)
+
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+
+ parser_build = subparsers.add_parser('regression', help='regression file/directory analysis',
+ description='regression analysis comparing the base set of results to the target results',
+ group='analysis')
+ parser_build.set_defaults(func=regression)
+ parser_build.add_argument('base_result',
+ help='base result file/directory for the comparison')
+ parser_build.add_argument('target_result',
+ help='target result file/directory to compare with')
+ parser_build.add_argument('-b', '--base-result-id', default='',
+ help='(optional) filter the base results to this result ID')
+ parser_build.add_argument('-t', '--target-result-id', default='',
+ help='(optional) filter the target results to this result ID')
+
+ parser_build = subparsers.add_parser('regression-git', help='regression git analysis',
+ description='regression analysis comparing base result set to target '
+ 'result set',
+ group='analysis')
+ parser_build.set_defaults(func=regression_git)
+ parser_build.add_argument('repo',
+ help='the git repository containing the data')
+ parser_build.add_argument('-b', '--base-result-id', default='',
+ help='(optional) default select regression based on configurations unless base result '
+ 'id was provided')
+ parser_build.add_argument('-t', '--target-result-id', default='',
+ help='(optional) default select regression based on configurations unless target result '
+ 'id was provided')
+
+ parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
+ parser_build.add_argument('--branch2', help="Branch to find comparision revisions in")
+ parser_build.add_argument('--commit', help="Revision to search for")
+ parser_build.add_argument('--commit-number', help="Revision number to search for, redundant if --commit is specified")
+ parser_build.add_argument('--commit2', help="Revision to compare with")
+ parser_build.add_argument('--commit-number2', help="Revision number to compare with, redundant if --commit2 is specified")
+
diff --git a/poky/scripts/lib/resulttool/report.py b/poky/scripts/lib/resulttool/report.py
new file mode 100644
index 0000000..9008620
--- /dev/null
+++ b/poky/scripts/lib/resulttool/report.py
@@ -0,0 +1,150 @@
+# test result tool - report text based test results
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+import os
+import glob
+import json
+import resulttool.resultutils as resultutils
+from oeqa.utils.git import GitRepo
+import oeqa.utils.gitarchive as gitarchive
+
+
+class ResultsTextReport(object):
+ def __init__(self):
+ self.ptests = {}
+ self.result_types = {'passed': ['PASSED', 'passed'],
+ 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'],
+ 'skipped': ['SKIPPED', 'skipped']}
+
+
+ def handle_ptest_result(self, k, status, result):
+ if k == 'ptestresult.sections':
+ # Ensure tests without any test results still show up on the report
+ for suite in result['ptestresult.sections']:
+ if suite not in self.ptests:
+ self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ if 'duration' in result['ptestresult.sections'][suite]:
+ self.ptests[suite]['duration'] = result['ptestresult.sections'][suite]['duration']
+ if 'timeout' in result['ptestresult.sections'][suite]:
+ self.ptests[suite]['duration'] += " T"
+ return
+ try:
+ _, suite, test = k.split(".", 2)
+ except ValueError:
+ return
+ # Handle 'glib-2.0'
+ if 'ptestresult.sections' in result and suite not in result['ptestresult.sections']:
+ try:
+ _, suite, suite1, test = k.split(".", 3)
+ if suite + "." + suite1 in result['ptestresult.sections']:
+ suite = suite + "." + suite1
+ except ValueError:
+ pass
+ if suite not in self.ptests:
+ self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ for tk in self.result_types:
+ if status in self.result_types[tk]:
+ self.ptests[suite][tk] += 1
+
+ def get_aggregated_test_result(self, logger, testresult):
+ test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
+ result = testresult.get('result', [])
+ for k in result:
+ test_status = result[k].get('status', [])
+ for tk in self.result_types:
+ if test_status in self.result_types[tk]:
+ test_count_report[tk] += 1
+ if test_status in self.result_types['failed']:
+ test_count_report['failed_testcases'].append(k)
+ if k.startswith("ptestresult."):
+ self.handle_ptest_result(k, test_status, result)
+ return test_count_report
+
+ def print_test_report(self, template_file_name, test_count_reports):
+ from jinja2 import Environment, FileSystemLoader
+ script_path = os.path.dirname(os.path.realpath(__file__))
+ file_loader = FileSystemLoader(script_path + '/template')
+ env = Environment(loader=file_loader, trim_blocks=True)
+ template = env.get_template(template_file_name)
+ havefailed = False
+ haveptest = bool(self.ptests)
+ reportvalues = []
+ cols = ['passed', 'failed', 'skipped']
+ maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 }
+ for line in test_count_reports:
+ total_tested = line['passed'] + line['failed'] + line['skipped']
+ vals = {}
+ vals['result_id'] = line['result_id']
+ vals['testseries'] = line['testseries']
+ vals['sort'] = line['testseries'] + "_" + line['result_id']
+ vals['failed_testcases'] = line['failed_testcases']
+ for k in cols:
+ vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
+ for k in maxlen:
+ if k in vals and len(vals[k]) > maxlen[k]:
+ maxlen[k] = len(vals[k])
+ reportvalues.append(vals)
+ if line['failed_testcases']:
+ havefailed = True
+ for ptest in self.ptests:
+ if len(ptest) > maxlen['ptest']:
+ maxlen['ptest'] = len(ptest)
+ output = template.render(reportvalues=reportvalues,
+ havefailed=havefailed,
+ haveptest=haveptest,
+ ptests=self.ptests,
+ maxlen=maxlen)
+ print(output)
+
+ def view_test_report(self, logger, source_dir, branch, commit, tag):
+ test_count_reports = []
+ if commit:
+ if tag:
+ logger.warning("Ignoring --tag as --commit was specified")
+ tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
+ repo = GitRepo(source_dir)
+ revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch)
+ rev_index = gitarchive.rev_find(revs, 'commit', commit)
+ testresults = resultutils.git_get_result(repo, revs[rev_index][2])
+ elif tag:
+ repo = GitRepo(source_dir)
+ testresults = resultutils.git_get_result(repo, [tag])
+ else:
+ testresults = resultutils.load_resultsdata(source_dir)
+ for testsuite in testresults:
+ for resultid in testresults[testsuite]:
+ result = testresults[testsuite][resultid]
+ test_count_report = self.get_aggregated_test_result(logger, result)
+ test_count_report['testseries'] = result['configuration']['TESTSERIES']
+ test_count_report['result_id'] = resultid
+ test_count_reports.append(test_count_report)
+ self.print_test_report('test_report_full_text.txt', test_count_reports)
+
+def report(args, logger):
+ report = ResultsTextReport()
+ report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag)
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('report', help='summarise test results',
+ description='print a text-based summary of the test results',
+ group='analysis')
+ parser_build.set_defaults(func=report)
+ parser_build.add_argument('source_dir',
+ help='source file/directory that contain the test result files to summarise')
+ parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
+ parser_build.add_argument('--commit', help="Revision to report")
+ parser_build.add_argument('-t', '--tag', default='',
+ help='source_dir is a git repository, report on the tag specified from that repository')
diff --git a/poky/scripts/lib/resulttool/resultutils.py b/poky/scripts/lib/resulttool/resultutils.py
new file mode 100644
index 0000000..153f2b8
--- /dev/null
+++ b/poky/scripts/lib/resulttool/resultutils.py
@@ -0,0 +1,131 @@
+# resulttool - common library/utility functions
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+import os
+import json
+import scriptpath
+scriptpath.add_oe_lib_path()
+
+flatten_map = {
+ "oeselftest": [],
+ "runtime": [],
+ "sdk": [],
+ "sdkext": [],
+ "manual": []
+}
+regression_map = {
+ "oeselftest": ['TEST_TYPE', 'MACHINE'],
+ "runtime": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'IMAGE_PKGTYPE', 'DISTRO'],
+ "sdk": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
+ "sdkext": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
+ "manual": ['TEST_TYPE', 'TEST_MODULE', 'IMAGE_BASENAME', 'MACHINE']
+}
+store_map = {
+ "oeselftest": ['TEST_TYPE'],
+ "runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'],
+ "sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
+ "sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
+ "manual": ['TEST_TYPE', 'TEST_MODULE', 'MACHINE', 'IMAGE_BASENAME']
+}
+
+#
+# Load the json file and append the results data into the provided results dict
+#
+def append_resultsdata(results, f, configmap=store_map):
+ if type(f) is str:
+ with open(f, "r") as filedata:
+ data = json.load(filedata)
+ else:
+ data = f
+ for res in data:
+ if "configuration" not in data[res] or "result" not in data[res]:
+ raise ValueError("Test results data without configuration or result section?")
+ if "TESTSERIES" not in data[res]["configuration"]:
+ data[res]["configuration"]["TESTSERIES"] = os.path.basename(os.path.dirname(f))
+ testtype = data[res]["configuration"].get("TEST_TYPE")
+ if testtype not in configmap:
+ raise ValueError("Unknown test type %s" % testtype)
+ configvars = configmap[testtype]
+ testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype])
+ if testpath not in results:
+ results[testpath] = {}
+ if 'ptestresult.rawlogs' in data[res]['result']:
+ del data[res]['result']['ptestresult.rawlogs']
+ if 'ptestresult.sections' in data[res]['result']:
+ for i in data[res]['result']['ptestresult.sections']:
+ if 'log' in data[res]['result']['ptestresult.sections'][i]:
+ del data[res]['result']['ptestresult.sections'][i]['log']
+ results[testpath][res] = data[res]
+
+#
+# Walk a directory and find/load results data
+# or load directly from a file
+#
+def load_resultsdata(source, configmap=store_map):
+ results = {}
+ if os.path.isfile(source):
+ append_resultsdata(results, source, configmap)
+ return results
+ for root, dirs, files in os.walk(source):
+ for name in files:
+ f = os.path.join(root, name)
+ if name == "testresults.json":
+ append_resultsdata(results, f, configmap)
+ return results
+
+def filter_resultsdata(results, resultid):
+ newresults = {}
+ for r in results:
+ for i in results[r]:
+ if i == resultsid:
+ newresults[r] = {}
+ newresults[r][i] = results[r][i]
+ return newresults
+
+def save_resultsdata(results, destdir, fn="testresults.json"):
+ for res in results:
+ if res:
+ dst = destdir + "/" + res + "/" + fn
+ else:
+ dst = destdir + "/" + fn
+ os.makedirs(os.path.dirname(dst), exist_ok=True)
+ with open(dst, 'w') as f:
+ f.write(json.dumps(results[res], sort_keys=True, indent=4))
+
+def git_get_result(repo, tags):
+ git_objs = []
+ for tag in tags:
+ files = repo.run_cmd(['ls-tree', "--name-only", "-r", tag]).splitlines()
+ git_objs.extend([tag + ':' + f for f in files if f.endswith("testresults.json")])
+
+ def parse_json_stream(data):
+ """Parse multiple concatenated JSON objects"""
+ objs = []
+ json_d = ""
+ for line in data.splitlines():
+ if line == '}{':
+ json_d += '}'
+ objs.append(json.loads(json_d))
+ json_d = '{'
+ else:
+ json_d += line
+ objs.append(json.loads(json_d))
+ return objs
+
+ # Optimize by reading all data with one git command
+ results = {}
+ for obj in parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--'])):
+ append_resultsdata(results, obj)
+
+ return results
diff --git a/poky/scripts/lib/resulttool/store.py b/poky/scripts/lib/resulttool/store.py
new file mode 100644
index 0000000..5e33716
--- /dev/null
+++ b/poky/scripts/lib/resulttool/store.py
@@ -0,0 +1,99 @@
+# resulttool - store test results
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+import tempfile
+import os
+import subprocess
+import json
+import shutil
+import scriptpath
+scriptpath.add_bitbake_lib_path()
+scriptpath.add_oe_lib_path()
+import resulttool.resultutils as resultutils
+import oeqa.utils.gitarchive as gitarchive
+
+
+def store(args, logger):
+ tempdir = tempfile.mkdtemp(prefix='testresults.')
+ try:
+ results = {}
+ logger.info('Reading files from %s' % args.source)
+ for root, dirs, files in os.walk(args.source):
+ for name in files:
+ f = os.path.join(root, name)
+ if name == "testresults.json":
+ resultutils.append_resultsdata(results, f)
+ elif args.all:
+ dst = f.replace(args.source, tempdir + "/")
+ os.makedirs(os.path.dirname(dst), exist_ok=True)
+ shutil.copyfile(f, dst)
+
+ revisions = {}
+
+ if not results and not args.all:
+ if args.allow_empty:
+ logger.info("No results found to store")
+ return 0
+ logger.error("No results found to store")
+ return 1
+
+ # Find the branch/commit/commit_count and ensure they all match
+ for suite in results:
+ for result in results[suite]:
+ config = results[suite][result]['configuration']['LAYERS']['meta']
+ revision = (config['commit'], config['branch'], str(config['commit_count']))
+ if revision not in revisions:
+ revisions[revision] = {}
+ if suite not in revisions[revision]:
+ revisions[revision][suite] = {}
+ revisions[revision][suite][result] = results[suite][result]
+
+ logger.info("Found %d revisions to store" % len(revisions))
+
+ for r in revisions:
+ results = revisions[r]
+ keywords = {'commit': r[0], 'branch': r[1], "commit_count": r[2]}
+ subprocess.check_call(["find", tempdir, "!", "-path", "./.git/*", "-delete"])
+ resultutils.save_resultsdata(results, tempdir)
+
+ logger.info('Storing test result into git repository %s' % args.git_dir)
+
+ gitarchive.gitarchive(tempdir, args.git_dir, False, False,
+ "Results of {branch}:{commit}", "branch: {branch}\ncommit: {commit}", "{branch}",
+ False, "{branch}/{commit_count}-g{commit}/{tag_number}",
+ 'Test run #{tag_number} of {branch}:{commit}', '',
+ [], [], False, keywords, logger)
+
+ finally:
+ subprocess.check_call(["rm", "-rf", tempdir])
+
+ return 0
+
+def register_commands(subparsers):
+ """Register subcommands from this plugin"""
+ parser_build = subparsers.add_parser('store', help='store test results into a git repository',
+ description='takes a results file or directory of results files and stores '
+ 'them into the destination git repository, splitting out the results '
+ 'files as configured',
+ group='setup')
+ parser_build.set_defaults(func=store)
+ parser_build.add_argument('source',
+ help='source file or directory that contain the test result files to be stored')
+ parser_build.add_argument('git_dir',
+ help='the location of the git repository to store the results in')
+ parser_build.add_argument('-a', '--all', action='store_true',
+ help='include all files, not just testresults.json files')
+ parser_build.add_argument('-e', '--allow-empty', action='store_true',
+ help='don\'t error if no results to store are found')
+
diff --git a/poky/scripts/lib/resulttool/template/test_report_full_text.txt b/poky/scripts/lib/resulttool/template/test_report_full_text.txt
new file mode 100644
index 0000000..590f35c
--- /dev/null
+++ b/poky/scripts/lib/resulttool/template/test_report_full_text.txt
@@ -0,0 +1,44 @@
+==============================================================================================================
+Test Result Status Summary (Counts/Percentages sorted by testseries, ID)
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Test Series'.ljust(maxlen['testseries']) }} | {{ 'ID'.ljust(maxlen['result_id']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }}
+--------------------------------------------------------------------------------------------------------------
+{% for report in reportvalues |sort(attribute='sort') %}
+{{ report.testseries.ljust(maxlen['testseries']) }} | {{ report.result_id.ljust(maxlen['result_id']) }} | {{ (report.passed|string).ljust(maxlen['passed']) }} | {{ (report.failed|string).ljust(maxlen['failed']) }} | {{ (report.skipped|string).ljust(maxlen['skipped']) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% if haveptest %}
+==============================================================================================================
+PTest Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ptest in ptests |sort %}
+{{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[ptest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% else %}
+There was no ptest data
+{% endif %}
+
+==============================================================================================================
+Failed test cases (sorted by testseries, ID)
+==============================================================================================================
+{% if havefailed %}
+--------------------------------------------------------------------------------------------------------------
+{% for report in reportvalues |sort(attribute='sort') %}
+{% if report.failed_testcases %}
+testseries | result_id : {{ report.testseries }} | {{ report.result_id }}
+{% for testcase in report.failed_testcases %}
+ {{ testcase }}
+{% endfor %}
+{% endif %}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+{% else %}
+There were no test failures
+{% endif %}