poky: refresh thud: e4c0a8a7cb..9dfebdaf7a

Update poky to thud HEAD.

Mazliana (2):
      scripts/resulttool: enable manual execution and result creation
      resulttool/manualexecution: To output right test case id

Michael Halstead (1):
      yocto-uninative: Correct sha256sum for aarch64

Richard Purdie (12):
      resulttool: Improvements to allow integration to the autobuilder
      resulttool/resultutils: Avoids tracebacks for missing logs
      resulttool/store: Handle results files for multiple revisions
      resulttool/report: Handle missing metadata sections more cleanly
      resulttool/report: Ensure test suites with no results show up on the report
      resulttool/report: Ensure ptest results are sorted
      resulttool/store: Fix missing variable causing testresult corruption
      oeqa/utils/gitarchive: Handle case where parent is only on origin
      scripts/wic: Be consistent about how we call bitbake
      yocto-uninative: Update to 2.4
      poky.conf: Bump version for 2.6.2 thud release
      build-appliance-image: Update to thud head revision

Yeoh Ee Peng (4):
      resulttool: enable merge, store, report and regression analysis
      resulttool/regression: Ensure regressoin results are sorted
      scripts/resulttool: Enable manual result store and regression
      resulttool/report: Enable roll-up report for a commit

Change-Id: Icf3c93db794539bdd4501d2e7db15c68b6c541ae
Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
diff --git a/poky/meta-poky/conf/distro/poky.conf b/poky/meta-poky/conf/distro/poky.conf
index 70f90d3..a370b21 100644
--- a/poky/meta-poky/conf/distro/poky.conf
+++ b/poky/meta-poky/conf/distro/poky.conf
@@ -1,6 +1,6 @@
 DISTRO = "poky"
 DISTRO_NAME = "Poky (Yocto Project Reference Distro)"
-DISTRO_VERSION = "2.6.1"
+DISTRO_VERSION = "2.6.2"
 DISTRO_CODENAME = "thud"
 SDK_VENDOR = "-pokysdk"
 SDK_VERSION := "${@'${DISTRO_VERSION}'.replace('snapshot-${DATE}','snapshot')}"
diff --git a/poky/meta/conf/distro/include/yocto-uninative.inc b/poky/meta/conf/distro/include/yocto-uninative.inc
index c9d502b..59ccd69 100644
--- a/poky/meta/conf/distro/include/yocto-uninative.inc
+++ b/poky/meta/conf/distro/include/yocto-uninative.inc
@@ -6,10 +6,9 @@
 # to the distro running on the build machine.
 #
 
-UNINATIVE_MAXGLIBCVERSION = "2.28"
+UNINATIVE_MAXGLIBCVERSION = "2.29"
 
-UNINATIVE_URL ?= "http://downloads.yoctoproject.org/releases/uninative/2.3/"
-UNINATIVE_CHECKSUM[aarch64] ?= "b7fbbaad1ec86d76eca84d83098f50525b8a4124cc8685eaed"
-UNINATIVE_CHECKSUM[i686] ?= "44253cddbf629082568cea4fff59419106871a0cf81b4845b5d34e7014887b20"
-UNINATIVE_CHECKSUM[x86_64] ?= "c6954563dad3c95608117c6fc328099036c832bbd924ebf5fdccb622fc0a8684"
-
+UNINATIVE_URL ?= "http://downloads.yoctoproject.org/releases/uninative/2.4/"
+UNINATIVE_CHECKSUM[aarch64] ?= "af2e2faf6cf00ff45cc1bcd5e3fb00cee7f79b3ec7c3be15917ad4ff8c154cfe"
+UNINATIVE_CHECKSUM[i686] ?= "fafacfc537a6ce2bd122bd16c146881ab5ac69bd575abf6cb68a0dd33fa70ea2"
+UNINATIVE_CHECKSUM[x86_64] ?= "06f91685b782f2ccfedf3070b3ba0fe4a5ba2f0766dad5c9d1642dccf95accd0"
diff --git a/poky/meta/lib/oeqa/files/testresults/testresults.json b/poky/meta/lib/oeqa/files/testresults/testresults.json
new file mode 100644
index 0000000..1a62155
--- /dev/null
+++ b/poky/meta/lib/oeqa/files/testresults/testresults.json
@@ -0,0 +1,40 @@
+{
+    "runtime_core-image-minimal_qemuarm_20181225195701": {
+        "configuration": {
+            "DISTRO": "poky",
+            "HOST_DISTRO": "ubuntu-16.04",
+            "IMAGE_BASENAME": "core-image-minimal",
+            "IMAGE_PKGTYPE": "rpm",
+            "LAYERS": {
+                "meta": {
+                    "branch": "master",
+                    "commit": "801745d918e83f976c706f29669779f5b292ade3",
+                    "commit_count": 52782
+                },
+                "meta-poky": {
+                    "branch": "master",
+                    "commit": "801745d918e83f976c706f29669779f5b292ade3",
+                    "commit_count": 52782
+                },
+                "meta-yocto-bsp": {
+                    "branch": "master",
+                    "commit": "801745d918e83f976c706f29669779f5b292ade3",
+                    "commit_count": 52782
+                }
+            },
+            "MACHINE": "qemuarm",
+            "STARTTIME": "20181225195701",
+            "TEST_TYPE": "runtime"
+        },
+        "result": {
+            "apt.AptRepoTest.test_apt_install_from_repo": {
+                "log": "Test requires apt to be installed",
+                "status": "PASSED"
+            },
+            "buildcpio.BuildCpioTest.test_cpio": {
+                "log": "Test requires autoconf to be installed",
+                "status": "ERROR"
+            }            
+        }
+    }
+}
\ No newline at end of file
diff --git a/poky/meta/lib/oeqa/selftest/cases/resulttooltests.py b/poky/meta/lib/oeqa/selftest/cases/resulttooltests.py
new file mode 100644
index 0000000..0a089c0
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/resulttooltests.py
@@ -0,0 +1,94 @@
+import os
+import sys
+basepath = os.path.abspath(os.path.dirname(__file__) + '/../../../../../')
+lib_path = basepath + '/scripts/lib'
+sys.path = sys.path + [lib_path]
+from resulttool.report import ResultsTextReport
+from resulttool import regression as regression
+from resulttool import resultutils as resultutils
+from oeqa.selftest.case import OESelftestTestCase
+
+class ResultToolTests(OESelftestTestCase):
+    base_results_data = {'base_result1': {'configuration': {"TEST_TYPE": "runtime",
+                                                            "TESTSERIES": "series1",
+                                                            "IMAGE_BASENAME": "image",
+                                                            "IMAGE_PKGTYPE": "ipk",
+                                                            "DISTRO": "mydistro",
+                                                            "MACHINE": "qemux86"},
+                                          'result': {}},
+                         'base_result2': {'configuration': {"TEST_TYPE": "runtime",
+                                                            "TESTSERIES": "series1",
+                                                            "IMAGE_BASENAME": "image",
+                                                            "IMAGE_PKGTYPE": "ipk",
+                                                            "DISTRO": "mydistro",
+                                                            "MACHINE": "qemux86-64"},
+                                          'result': {}}}
+    target_results_data = {'target_result1': {'configuration': {"TEST_TYPE": "runtime",
+                                                                "TESTSERIES": "series1",
+                                                                "IMAGE_BASENAME": "image",
+                                                                "IMAGE_PKGTYPE": "ipk",
+                                                                "DISTRO": "mydistro",
+                                                                "MACHINE": "qemux86"},
+                                          'result': {}},
+                           'target_result2': {'configuration': {"TEST_TYPE": "runtime",
+                                                                "TESTSERIES": "series1",
+                                                                "IMAGE_BASENAME": "image",
+                                                                "IMAGE_PKGTYPE": "ipk",
+                                                                "DISTRO": "mydistro",
+                                                                "MACHINE": "qemux86"},
+                                          'result': {}},
+                           'target_result3': {'configuration': {"TEST_TYPE": "runtime",
+                                                                "TESTSERIES": "series1",
+                                                                "IMAGE_BASENAME": "image",
+                                                                "IMAGE_PKGTYPE": "ipk",
+                                                                "DISTRO": "mydistro",
+                                                                "MACHINE": "qemux86-64"},
+                                          'result': {}}}
+
+    def test_report_can_aggregate_test_result(self):
+        result_data = {'result': {'test1': {'status': 'PASSED'},
+                                  'test2': {'status': 'PASSED'},
+                                  'test3': {'status': 'FAILED'},
+                                  'test4': {'status': 'ERROR'},
+                                  'test5': {'status': 'SKIPPED'}}}
+        report = ResultsTextReport()
+        result_report = report.get_aggregated_test_result(None, result_data)
+        self.assertTrue(result_report['passed'] == 2, msg="Passed count not correct:%s" % result_report['passed'])
+        self.assertTrue(result_report['failed'] == 2, msg="Failed count not correct:%s" % result_report['failed'])
+        self.assertTrue(result_report['skipped'] == 1, msg="Skipped count not correct:%s" % result_report['skipped'])
+
+    def test_regression_can_get_regression_base_target_pair(self):
+
+        results = {}
+        resultutils.append_resultsdata(results, ResultToolTests.base_results_data)
+        resultutils.append_resultsdata(results, ResultToolTests.target_results_data)
+        self.assertTrue('target_result1' in results['runtime/mydistro/qemux86/image'], msg="Pair not correct:%s" % results)
+        self.assertTrue('target_result3' in results['runtime/mydistro/qemux86-64/image'], msg="Pair not correct:%s" % results)
+
+    def test_regrresion_can_get_regression_result(self):
+        base_result_data = {'result': {'test1': {'status': 'PASSED'},
+                                       'test2': {'status': 'PASSED'},
+                                       'test3': {'status': 'FAILED'},
+                                       'test4': {'status': 'ERROR'},
+                                       'test5': {'status': 'SKIPPED'}}}
+        target_result_data = {'result': {'test1': {'status': 'PASSED'},
+                                         'test2': {'status': 'FAILED'},
+                                         'test3': {'status': 'PASSED'},
+                                         'test4': {'status': 'ERROR'},
+                                         'test5': {'status': 'SKIPPED'}}}
+        result, text = regression.compare_result(self.logger, "BaseTestRunName", "TargetTestRunName", base_result_data, target_result_data)
+        self.assertTrue(result['test2']['base'] == 'PASSED',
+                        msg="regression not correct:%s" % result['test2']['base'])
+        self.assertTrue(result['test2']['target'] == 'FAILED',
+                        msg="regression not correct:%s" % result['test2']['target'])
+        self.assertTrue(result['test3']['base'] == 'FAILED',
+                        msg="regression not correct:%s" % result['test3']['base'])
+        self.assertTrue(result['test3']['target'] == 'PASSED',
+                        msg="regression not correct:%s" % result['test3']['target'])
+
+    def test_merge_can_merged_results(self):
+        results = {}
+        resultutils.append_resultsdata(results, ResultToolTests.base_results_data, configmap=resultutils.flatten_map)
+        resultutils.append_resultsdata(results, ResultToolTests.target_results_data, configmap=resultutils.flatten_map)
+        self.assertEqual(len(results[''].keys()), 5, msg="Flattened results not correct %s" % str(results))
+
diff --git a/poky/meta/lib/oeqa/utils/gitarchive.py b/poky/meta/lib/oeqa/utils/gitarchive.py
index ff614d0..9520b2e 100644
--- a/poky/meta/lib/oeqa/utils/gitarchive.py
+++ b/poky/meta/lib/oeqa/utils/gitarchive.py
@@ -80,6 +80,8 @@
 
         # Create new commit object from the tree
         parent = repo.rev_parse(branch)
+        if not parent:
+            parent = repo.rev_parse("origin/" + branch)
         git_cmd = ['commit-tree', tree, '-m', message]
         if parent:
             git_cmd += ['-p', parent]
@@ -93,8 +95,6 @@
 
         # Update branch head
         git_cmd = ['update-ref', 'refs/heads/' + branch, commit]
-        if parent:
-            git_cmd.append(parent)
         repo.run_cmd(git_cmd)
 
         # Update current HEAD, if we're on branch 'branch'
diff --git a/poky/meta/recipes-core/images/build-appliance-image_15.0.0.bb b/poky/meta/recipes-core/images/build-appliance-image_15.0.0.bb
index c4c369c..5e37a48 100644
--- a/poky/meta/recipes-core/images/build-appliance-image_15.0.0.bb
+++ b/poky/meta/recipes-core/images/build-appliance-image_15.0.0.bb
@@ -22,7 +22,7 @@
 
 inherit core-image module-base setuptools3
 
-SRCREV ?= "ca417455d79b29cd14cd8d39a9da904bf23fcc48"
+SRCREV ?= "a192fff25b10fc29543edaef8555f3b15ef8234f"
 SRC_URI = "git://git.yoctoproject.org/poky;branch=thud \
            file://Yocto_Build_Appliance.vmx \
            file://Yocto_Build_Appliance.vmxf \
diff --git a/poky/scripts/lib/resulttool/__init__.py b/poky/scripts/lib/resulttool/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/poky/scripts/lib/resulttool/__init__.py
diff --git a/poky/scripts/lib/resulttool/manualexecution.py b/poky/scripts/lib/resulttool/manualexecution.py
new file mode 100755
index 0000000..6487cd9
--- /dev/null
+++ b/poky/scripts/lib/resulttool/manualexecution.py
@@ -0,0 +1,142 @@
+# test case management tool - manual execution from testopia test cases
+#
+# Copyright (c) 2018, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+import argparse
+import json
+import os
+import sys
+import datetime
+import re
+from oeqa.core.runner import OETestResultJSONHelper
+
+
+def load_json_file(file):
+    with open(file, "r") as f:
+        return json.load(f)
+
+
+class ManualTestRunner(object):
+    def __init__(self):
+        self.jdata = ''
+        self.test_module = ''
+        self.test_cases_id = ''
+        self.configuration = ''
+        self.starttime = ''
+        self.result_id = ''
+        self.write_dir = ''
+
+    def _get_testcases(self, file):
+        self.jdata = load_json_file(file)
+        self.test_cases_id = []
+        self.test_module = self.jdata[0]['test']['@alias'].split('.', 2)[0]
+        for i in self.jdata:
+            self.test_cases_id.append(i['test']['@alias'])
+    
+    def _get_input(self, config):
+        while True:
+            output = input('{} = '.format(config))
+            if re.match('^[a-zA-Z0-9_-]+$', output):
+                break
+            print('Only alphanumeric and underscore/hyphen are allowed. Please try again')
+        return output
+
+    def _create_config(self):
+        from oeqa.utils.metadata import get_layers
+        from oeqa.utils.commands import get_bb_var
+        from resulttool.resultutils import store_map
+
+        layers = get_layers(get_bb_var('BBLAYERS'))
+        self.configuration = {}
+        self.configuration['LAYERS'] = layers
+        current_datetime = datetime.datetime.now()
+        self.starttime = current_datetime.strftime('%Y%m%d%H%M%S')
+        self.configuration['STARTTIME'] = self.starttime
+        self.configuration['TEST_TYPE'] = 'manual'
+        self.configuration['TEST_MODULE'] = self.test_module
+
+        extra_config = set(store_map['manual']) - set(self.configuration)
+        for config in sorted(extra_config):
+            print('---------------------------------------------')
+            print('This is configuration #%s. Please provide configuration value(use "None" if not applicable).'
+                  % config)
+            print('---------------------------------------------')
+            value_conf = self._get_input('Configuration Value')
+            print('---------------------------------------------\n')
+            self.configuration[config] = value_conf
+
+    def _create_result_id(self):
+        self.result_id = 'manual_' + self.test_module + '_' + self.starttime
+
+    def _execute_test_steps(self, test_id):
+        test_result = {}
+        total_steps = len(self.jdata[test_id]['test']['execution'].keys())
+        print('------------------------------------------------------------------------')
+        print('Executing test case:' + '' '' + self.test_cases_id[test_id])
+        print('------------------------------------------------------------------------')
+        print('You have total ' + str(total_steps) + ' test steps to be executed.')
+        print('------------------------------------------------------------------------\n')
+        for step in sorted((self.jdata[test_id]['test']['execution']).keys()):
+            print('Step %s: ' % step + self.jdata[test_id]['test']['execution']['%s' % step]['action'])
+            print('Expected output: ' + self.jdata[test_id]['test']['execution']['%s' % step]['expected_results'])
+            done = input('\nPlease press ENTER when you are done to proceed to next step.\n')
+        while True:
+            done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n')
+            done = done.lower()
+            result_types = {'p':'PASSED',
+                                'f':'FAILED',
+                                'b':'BLOCKED',
+                                's':'SKIPPED'}
+            if done in result_types:
+                for r in result_types:
+                    if done == r:
+                        res = result_types[r]
+                        if res == 'FAILED':
+                            log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n')
+                            test_result.update({self.test_cases_id[test_id]: {'status': '%s' % res, 'log': '%s' % log_input}})
+                        else:
+                            test_result.update({self.test_cases_id[test_id]: {'status': '%s' % res}})
+                break
+            print('Invalid input!')
+        return test_result
+
+    def _create_write_dir(self):
+        basepath = os.environ['BUILDDIR']
+        self.write_dir = basepath + '/tmp/log/manual/'
+
+    def run_test(self, file):
+        self._get_testcases(file)
+        self._create_config()
+        self._create_result_id()
+        self._create_write_dir()
+        test_results = {}
+        print('\nTotal number of test cases in this test suite: ' + '%s\n' % len(self.jdata))
+        for i in range(0, len(self.jdata)):
+            test_result = self._execute_test_steps(i)
+            test_results.update(test_result)
+        return self.configuration, self.result_id, self.write_dir, test_results
+
+def manualexecution(args, logger):
+    testrunner = ManualTestRunner()
+    get_configuration, get_result_id, get_write_dir, get_test_results = testrunner.run_test(args.file)
+    resultjsonhelper = OETestResultJSONHelper()
+    resultjsonhelper.dump_testresult_file(get_write_dir, get_configuration, get_result_id,
+                                          get_test_results)
+    return 0
+
+def register_commands(subparsers):
+    """Register subcommands from this plugin"""
+    parser_build = subparsers.add_parser('manualexecution', help='helper script for results populating during manual test execution.',
+                                         description='helper script for results populating during manual test execution. You can find manual test case JSON file in meta/lib/oeqa/manual/',
+                                         group='manualexecution')
+    parser_build.set_defaults(func=manualexecution)
+    parser_build.add_argument('file', help='specify path to manual test case JSON file.Note: Please use \"\" to encapsulate the file path.')
diff --git a/poky/scripts/lib/resulttool/merge.py b/poky/scripts/lib/resulttool/merge.py
new file mode 100644
index 0000000..3e4b7a3
--- /dev/null
+++ b/poky/scripts/lib/resulttool/merge.py
@@ -0,0 +1,42 @@
+# resulttool - merge multiple testresults.json files into a file or directory
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+import os
+import json
+import resulttool.resultutils as resultutils
+
+def merge(args, logger):
+    if os.path.isdir(args.target_results):
+        results = resultutils.load_resultsdata(args.target_results, configmap=resultutils.store_map)
+        resultutils.append_resultsdata(results, args.base_results, configmap=resultutils.store_map)
+        resultutils.save_resultsdata(results, args.target_results)
+    else:
+        results = resultutils.load_resultsdata(args.base_results, configmap=resultutils.flatten_map)
+        if os.path.exists(args.target_results):
+            resultutils.append_resultsdata(results, args.target_results, configmap=resultutils.flatten_map)
+        resultutils.save_resultsdata(results, os.path.dirname(args.target_results), fn=os.path.basename(args.target_results))
+
+    return 0
+
+def register_commands(subparsers):
+    """Register subcommands from this plugin"""
+    parser_build = subparsers.add_parser('merge', help='merge test result files/directories',
+                                         description='merge the results from multiple files/directories into the target file or directory',
+                                         group='setup')
+    parser_build.set_defaults(func=merge)
+    parser_build.add_argument('base_results',
+                              help='the results file/directory to import')
+    parser_build.add_argument('target_results',
+                              help='the target file or directory to merge the base_results with')
+
diff --git a/poky/scripts/lib/resulttool/regression.py b/poky/scripts/lib/resulttool/regression.py
new file mode 100644
index 0000000..bdf531d
--- /dev/null
+++ b/poky/scripts/lib/resulttool/regression.py
@@ -0,0 +1,192 @@
+# resulttool - regression analysis
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+import resulttool.resultutils as resultutils
+import json
+
+from oeqa.utils.git import GitRepo
+import oeqa.utils.gitarchive as gitarchive
+
+def compare_result(logger, base_name, target_name, base_result, target_result):
+    base_result = base_result.get('result')
+    target_result = target_result.get('result')
+    result = {}
+    if base_result and target_result:
+        for k in base_result:
+            base_testcase = base_result[k]
+            base_status = base_testcase.get('status')
+            if base_status:
+                target_testcase = target_result.get(k, {})
+                target_status = target_testcase.get('status')
+                if base_status != target_status:
+                    result[k] = {'base': base_status, 'target': target_status}
+            else:
+                logger.error('Failed to retrieved base test case status: %s' % k)
+    if result:
+        resultstring = "Regression: %s\n            %s\n" % (base_name, target_name)
+        for k in sorted(result):
+            resultstring += '    %s: %s -> %s\n' % (k, result[k]['base'], result[k]['target'])
+    else:
+        resultstring = "Match: %s\n       %s" % (base_name, target_name)
+    return result, resultstring
+
+def get_results(logger, source):
+    return resultutils.load_resultsdata(source, configmap=resultutils.regression_map)
+
+def regression(args, logger):
+    base_results = get_results(logger, args.base_result)
+    target_results = get_results(logger, args.target_result)
+
+    regression_common(args, logger, base_results, target_results)
+
+def regression_common(args, logger, base_results, target_results):
+    if args.base_result_id:
+        base_results = resultutils.filter_resultsdata(base_results, args.base_result_id)
+    if args.target_result_id:
+        target_results = resultutils.filter_resultsdata(target_results, args.target_result_id)
+
+    matches = []
+    regressions = []
+    notfound = []
+
+    for a in base_results:
+        if a in target_results:
+            base = list(base_results[a].keys())
+            target = list(target_results[a].keys())
+            # We may have multiple base/targets which are for different configurations. Start by 
+            # removing any pairs which match
+            for c in base.copy():
+                for b in target.copy():
+                    res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
+                    if not res:
+                        matches.append(resstr)
+                        base.remove(c)
+                        target.remove(b)
+                        break
+            # Should only now see regressions, we may not be able to match multiple pairs directly
+            for c in base:
+                for b in target:
+                    res, resstr = compare_result(logger, c, b, base_results[a][c], target_results[a][b])
+                    if res:
+                        regressions.append(resstr)
+        else:
+            notfound.append("%s not found in target" % a)
+    print("\n".join(sorted(matches)))
+    print("\n".join(sorted(regressions)))
+    print("\n".join(sorted(notfound)))
+
+    return 0
+
+def regression_git(args, logger):
+    base_results = {}
+    target_results = {}
+
+    tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
+    repo = GitRepo(args.repo)
+
+    revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch)
+
+    if args.branch2:
+        revs2 = gitarchive.get_test_revs(logger, repo, tag_name, branch=args.branch2)
+        if not len(revs2):
+            logger.error("No revisions found to compare against")
+            return 1
+        if not len(revs):
+            logger.error("No revision to report on found")
+            return 1
+    else:
+        if len(revs) < 2:
+            logger.error("Only %d tester revisions found, unable to generate report" % len(revs))
+            return 1
+
+    # Pick revisions
+    if args.commit:
+        if args.commit_number:
+            logger.warning("Ignoring --commit-number as --commit was specified")
+        index1 = gitarchive.rev_find(revs, 'commit', args.commit)
+    elif args.commit_number:
+        index1 = gitarchive.rev_find(revs, 'commit_number', args.commit_number)
+    else:
+        index1 = len(revs) - 1
+
+    if args.branch2:
+        revs2.append(revs[index1])
+        index1 = len(revs2) - 1
+        revs = revs2
+
+    if args.commit2:
+        if args.commit_number2:
+            logger.warning("Ignoring --commit-number2 as --commit2 was specified")
+        index2 = gitarchive.rev_find(revs, 'commit', args.commit2)
+    elif args.commit_number2:
+        index2 = gitarchive.rev_find(revs, 'commit_number', args.commit_number2)
+    else:
+        if index1 > 0:
+            index2 = index1 - 1
+            # Find the closest matching commit number for comparision
+            # In future we could check the commit is a common ancestor and
+            # continue back if not but this good enough for now
+            while index2 > 0 and revs[index2].commit_number > revs[index1].commit_number:
+                index2 = index2 - 1
+        else:
+            logger.error("Unable to determine the other commit, use "
+                      "--commit2 or --commit-number2 to specify it")
+            return 1
+
+    logger.info("Comparing:\n%s\nto\n%s\n" % (revs[index1], revs[index2]))
+
+    base_results = resultutils.git_get_result(repo, revs[index1][2])
+    target_results = resultutils.git_get_result(repo, revs[index2][2])
+
+    regression_common(args, logger, base_results, target_results)
+
+    return 0
+
+def register_commands(subparsers):
+    """Register subcommands from this plugin"""
+
+    parser_build = subparsers.add_parser('regression', help='regression file/directory analysis',
+                                         description='regression analysis comparing the base set of results to the target results',
+                                         group='analysis')
+    parser_build.set_defaults(func=regression)
+    parser_build.add_argument('base_result',
+                              help='base result file/directory for the comparison')
+    parser_build.add_argument('target_result',
+                              help='target result file/directory to compare with')
+    parser_build.add_argument('-b', '--base-result-id', default='',
+                              help='(optional) filter the base results to this result ID')
+    parser_build.add_argument('-t', '--target-result-id', default='',
+                              help='(optional) filter the target results to this result ID')
+
+    parser_build = subparsers.add_parser('regression-git', help='regression git analysis',
+                                         description='regression analysis comparing base result set to target '
+                                                     'result set',
+                                         group='analysis')
+    parser_build.set_defaults(func=regression_git)
+    parser_build.add_argument('repo',
+                              help='the git repository containing the data')
+    parser_build.add_argument('-b', '--base-result-id', default='',
+                              help='(optional) default select regression based on configurations unless base result '
+                                   'id was provided')
+    parser_build.add_argument('-t', '--target-result-id', default='',
+                              help='(optional) default select regression based on configurations unless target result '
+                                   'id was provided')
+
+    parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
+    parser_build.add_argument('--branch2', help="Branch to find comparision revisions in")
+    parser_build.add_argument('--commit', help="Revision to search for")
+    parser_build.add_argument('--commit-number', help="Revision number to search for, redundant if --commit is specified")
+    parser_build.add_argument('--commit2', help="Revision to compare with")
+    parser_build.add_argument('--commit-number2', help="Revision number to compare with, redundant if --commit2 is specified")
+
diff --git a/poky/scripts/lib/resulttool/report.py b/poky/scripts/lib/resulttool/report.py
new file mode 100644
index 0000000..9008620
--- /dev/null
+++ b/poky/scripts/lib/resulttool/report.py
@@ -0,0 +1,150 @@
+# test result tool - report text based test results
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+import os
+import glob
+import json
+import resulttool.resultutils as resultutils
+from oeqa.utils.git import GitRepo
+import oeqa.utils.gitarchive as gitarchive
+
+
+class ResultsTextReport(object):
+    def __init__(self):
+        self.ptests = {}
+        self.result_types = {'passed': ['PASSED', 'passed'],
+                             'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'],
+                             'skipped': ['SKIPPED', 'skipped']}
+
+
+    def handle_ptest_result(self, k, status, result):
+        if k == 'ptestresult.sections':
+            # Ensure tests without any test results still show up on the report
+            for suite in result['ptestresult.sections']:
+                if suite not in self.ptests:
+                    self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+                if 'duration' in result['ptestresult.sections'][suite]:
+                    self.ptests[suite]['duration'] = result['ptestresult.sections'][suite]['duration']
+                if 'timeout' in result['ptestresult.sections'][suite]:
+                    self.ptests[suite]['duration'] += " T"
+            return
+        try:
+            _, suite, test = k.split(".", 2)
+        except ValueError:
+            return
+        # Handle 'glib-2.0'
+        if 'ptestresult.sections' in result and suite not in result['ptestresult.sections']:
+            try:
+                _, suite, suite1, test = k.split(".", 3)
+                if suite + "." + suite1 in result['ptestresult.sections']:
+                    suite = suite + "." + suite1
+            except ValueError:
+                pass
+        if suite not in self.ptests:
+            self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+        for tk in self.result_types:
+            if status in self.result_types[tk]:
+                self.ptests[suite][tk] += 1
+
+    def get_aggregated_test_result(self, logger, testresult):
+        test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
+        result = testresult.get('result', [])
+        for k in result:
+            test_status = result[k].get('status', [])
+            for tk in self.result_types:
+                if test_status in self.result_types[tk]:
+                    test_count_report[tk] += 1
+            if test_status in self.result_types['failed']:
+                test_count_report['failed_testcases'].append(k)
+            if k.startswith("ptestresult."):
+                self.handle_ptest_result(k, test_status, result)
+        return test_count_report
+
+    def print_test_report(self, template_file_name, test_count_reports):
+        from jinja2 import Environment, FileSystemLoader
+        script_path = os.path.dirname(os.path.realpath(__file__))
+        file_loader = FileSystemLoader(script_path + '/template')
+        env = Environment(loader=file_loader, trim_blocks=True)
+        template = env.get_template(template_file_name)
+        havefailed = False
+        haveptest = bool(self.ptests)
+        reportvalues = []
+        cols = ['passed', 'failed', 'skipped']
+        maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 }
+        for line in test_count_reports:
+            total_tested = line['passed'] + line['failed'] + line['skipped']
+            vals = {}
+            vals['result_id'] = line['result_id']
+            vals['testseries'] = line['testseries']
+            vals['sort'] = line['testseries'] + "_" + line['result_id']
+            vals['failed_testcases'] = line['failed_testcases']
+            for k in cols:
+                vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
+            for k in maxlen:
+                if k in vals and len(vals[k]) > maxlen[k]:
+                    maxlen[k] = len(vals[k])
+            reportvalues.append(vals)
+            if line['failed_testcases']:
+                havefailed = True
+        for ptest in self.ptests:
+            if len(ptest) > maxlen['ptest']:
+                maxlen['ptest'] = len(ptest)
+        output = template.render(reportvalues=reportvalues,
+                                 havefailed=havefailed,
+                                 haveptest=haveptest,
+                                 ptests=self.ptests,
+                                 maxlen=maxlen)
+        print(output)
+
+    def view_test_report(self, logger, source_dir, branch, commit, tag):
+        test_count_reports = []
+        if commit:
+            if tag:
+                logger.warning("Ignoring --tag as --commit was specified")
+            tag_name = "{branch}/{commit_number}-g{commit}/{tag_number}"
+            repo = GitRepo(source_dir)
+            revs = gitarchive.get_test_revs(logger, repo, tag_name, branch=branch)
+            rev_index = gitarchive.rev_find(revs, 'commit', commit)
+            testresults = resultutils.git_get_result(repo, revs[rev_index][2])
+        elif tag:
+            repo = GitRepo(source_dir)
+            testresults = resultutils.git_get_result(repo, [tag])
+        else:
+            testresults = resultutils.load_resultsdata(source_dir)
+        for testsuite in testresults:
+            for resultid in testresults[testsuite]:
+                result = testresults[testsuite][resultid]
+                test_count_report = self.get_aggregated_test_result(logger, result)
+                test_count_report['testseries'] = result['configuration']['TESTSERIES']
+                test_count_report['result_id'] = resultid
+                test_count_reports.append(test_count_report)
+        self.print_test_report('test_report_full_text.txt', test_count_reports)
+
+def report(args, logger):
+    report = ResultsTextReport()
+    report.view_test_report(logger, args.source_dir, args.branch, args.commit, args.tag)
+    return 0
+
+def register_commands(subparsers):
+    """Register subcommands from this plugin"""
+    parser_build = subparsers.add_parser('report', help='summarise test results',
+                                         description='print a text-based summary of the test results',
+                                         group='analysis')
+    parser_build.set_defaults(func=report)
+    parser_build.add_argument('source_dir',
+                              help='source file/directory that contain the test result files to summarise')
+    parser_build.add_argument('--branch', '-B', default='master', help="Branch to find commit in")
+    parser_build.add_argument('--commit', help="Revision to report")
+    parser_build.add_argument('-t', '--tag', default='',
+                              help='source_dir is a git repository, report on the tag specified from that repository')
diff --git a/poky/scripts/lib/resulttool/resultutils.py b/poky/scripts/lib/resulttool/resultutils.py
new file mode 100644
index 0000000..153f2b8
--- /dev/null
+++ b/poky/scripts/lib/resulttool/resultutils.py
@@ -0,0 +1,131 @@
+# resulttool - common library/utility functions
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+import os
+import json
+import scriptpath
+scriptpath.add_oe_lib_path()
+
+flatten_map = {
+    "oeselftest": [],
+    "runtime": [],
+    "sdk": [],
+    "sdkext": [],
+    "manual": []
+}
+regression_map = {
+    "oeselftest": ['TEST_TYPE', 'MACHINE'],
+    "runtime": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'IMAGE_PKGTYPE', 'DISTRO'],
+    "sdk": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
+    "sdkext": ['TESTSERIES', 'TEST_TYPE', 'IMAGE_BASENAME', 'MACHINE', 'SDKMACHINE'],
+    "manual": ['TEST_TYPE', 'TEST_MODULE', 'IMAGE_BASENAME', 'MACHINE']
+}
+store_map = {
+    "oeselftest": ['TEST_TYPE'],
+    "runtime": ['TEST_TYPE', 'DISTRO', 'MACHINE', 'IMAGE_BASENAME'],
+    "sdk": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
+    "sdkext": ['TEST_TYPE', 'MACHINE', 'SDKMACHINE', 'IMAGE_BASENAME'],
+    "manual": ['TEST_TYPE', 'TEST_MODULE', 'MACHINE', 'IMAGE_BASENAME']
+}
+
+#
+# Load the json file and append the results data into the provided results dict
+#
+def append_resultsdata(results, f, configmap=store_map):
+    if type(f) is str:
+        with open(f, "r") as filedata:
+            data = json.load(filedata)
+    else:
+        data = f
+    for res in data:
+        if "configuration" not in data[res] or "result" not in data[res]:
+            raise ValueError("Test results data without configuration or result section?")
+        if "TESTSERIES" not in data[res]["configuration"]:
+            data[res]["configuration"]["TESTSERIES"] = os.path.basename(os.path.dirname(f))
+        testtype = data[res]["configuration"].get("TEST_TYPE")
+        if testtype not in configmap:
+            raise ValueError("Unknown test type %s" % testtype)
+        configvars = configmap[testtype]
+        testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype])
+        if testpath not in results:
+            results[testpath] = {}
+        if 'ptestresult.rawlogs' in data[res]['result']:
+            del data[res]['result']['ptestresult.rawlogs']
+        if 'ptestresult.sections' in data[res]['result']:
+            for i in data[res]['result']['ptestresult.sections']:
+                if 'log' in data[res]['result']['ptestresult.sections'][i]:
+                    del data[res]['result']['ptestresult.sections'][i]['log']
+        results[testpath][res] = data[res]
+
+#
+# Walk a directory and find/load results data
+# or load directly from a file
+#
+def load_resultsdata(source, configmap=store_map):
+    results = {}
+    if os.path.isfile(source):
+        append_resultsdata(results, source, configmap)
+        return results
+    for root, dirs, files in os.walk(source):
+        for name in files:
+            f = os.path.join(root, name)
+            if name == "testresults.json":
+                append_resultsdata(results, f, configmap)
+    return results
+
+def filter_resultsdata(results, resultid):
+    newresults = {}
+    for r in results:
+        for i in results[r]:
+            if i == resultsid:
+                 newresults[r] = {}
+                 newresults[r][i] = results[r][i]
+    return newresults
+
+def save_resultsdata(results, destdir, fn="testresults.json"):
+    for res in results:
+        if res:
+            dst = destdir + "/" + res + "/" + fn
+        else:
+            dst = destdir + "/" + fn
+        os.makedirs(os.path.dirname(dst), exist_ok=True)
+        with open(dst, 'w') as f:
+            f.write(json.dumps(results[res], sort_keys=True, indent=4))
+
+def git_get_result(repo, tags):
+    git_objs = []
+    for tag in tags:
+        files = repo.run_cmd(['ls-tree', "--name-only", "-r", tag]).splitlines()
+        git_objs.extend([tag + ':' + f for f in files if f.endswith("testresults.json")])
+
+    def parse_json_stream(data):
+        """Parse multiple concatenated JSON objects"""
+        objs = []
+        json_d = ""
+        for line in data.splitlines():
+            if line == '}{':
+                json_d += '}'
+                objs.append(json.loads(json_d))
+                json_d = '{'
+            else:
+                json_d += line
+        objs.append(json.loads(json_d))
+        return objs
+
+    # Optimize by reading all data with one git command
+    results = {}
+    for obj in parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--'])):
+        append_resultsdata(results, obj)
+
+    return results
diff --git a/poky/scripts/lib/resulttool/store.py b/poky/scripts/lib/resulttool/store.py
new file mode 100644
index 0000000..5e33716
--- /dev/null
+++ b/poky/scripts/lib/resulttool/store.py
@@ -0,0 +1,99 @@
+# resulttool - store test results
+#
+# Copyright (c) 2019, Intel Corporation.
+# Copyright (c) 2019, Linux Foundation
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+import tempfile
+import os
+import subprocess
+import json
+import shutil
+import scriptpath
+scriptpath.add_bitbake_lib_path()
+scriptpath.add_oe_lib_path()
+import resulttool.resultutils as resultutils
+import oeqa.utils.gitarchive as gitarchive
+
+
+def store(args, logger):
+    tempdir = tempfile.mkdtemp(prefix='testresults.')
+    try:
+        results = {}
+        logger.info('Reading files from %s' % args.source)
+        for root, dirs,  files in os.walk(args.source):
+            for name in files:
+                f = os.path.join(root, name)
+                if name == "testresults.json":
+                    resultutils.append_resultsdata(results, f)
+                elif args.all:
+                    dst = f.replace(args.source, tempdir + "/")
+                    os.makedirs(os.path.dirname(dst), exist_ok=True)
+                    shutil.copyfile(f, dst)
+
+        revisions = {}
+
+        if not results and not args.all:
+            if args.allow_empty:
+                logger.info("No results found to store")
+                return 0
+            logger.error("No results found to store")
+            return 1
+
+        # Find the branch/commit/commit_count and ensure they all match
+        for suite in results:
+            for result in results[suite]:
+                config = results[suite][result]['configuration']['LAYERS']['meta']
+                revision = (config['commit'], config['branch'], str(config['commit_count']))
+                if revision not in revisions:
+                    revisions[revision] = {}
+                if suite not in revisions[revision]:
+                    revisions[revision][suite] = {}
+                revisions[revision][suite][result] = results[suite][result]
+
+        logger.info("Found %d revisions to store" % len(revisions))
+
+        for r in revisions:
+            results = revisions[r]
+            keywords = {'commit': r[0], 'branch': r[1], "commit_count": r[2]}
+            subprocess.check_call(["find", tempdir, "!", "-path", "./.git/*", "-delete"])
+            resultutils.save_resultsdata(results, tempdir)
+
+            logger.info('Storing test result into git repository %s' % args.git_dir)
+
+            gitarchive.gitarchive(tempdir, args.git_dir, False, False,
+                                  "Results of {branch}:{commit}", "branch: {branch}\ncommit: {commit}", "{branch}",
+                                  False, "{branch}/{commit_count}-g{commit}/{tag_number}",
+                                  'Test run #{tag_number} of {branch}:{commit}', '',
+                                  [], [], False, keywords, logger)
+
+    finally:
+        subprocess.check_call(["rm", "-rf",  tempdir])
+
+    return 0
+
+def register_commands(subparsers):
+    """Register subcommands from this plugin"""
+    parser_build = subparsers.add_parser('store', help='store test results into a git repository',
+                                         description='takes a results file or directory of results files and stores '
+                                                     'them into the destination git repository, splitting out the results '
+                                                     'files as configured',
+                                         group='setup')
+    parser_build.set_defaults(func=store)
+    parser_build.add_argument('source',
+                              help='source file or directory that contain the test result files to be stored')
+    parser_build.add_argument('git_dir',
+                              help='the location of the git repository to store the results in')
+    parser_build.add_argument('-a', '--all', action='store_true',
+                              help='include all files, not just testresults.json files')
+    parser_build.add_argument('-e', '--allow-empty', action='store_true',
+                              help='don\'t error if no results to store are found')
+
diff --git a/poky/scripts/lib/resulttool/template/test_report_full_text.txt b/poky/scripts/lib/resulttool/template/test_report_full_text.txt
new file mode 100644
index 0000000..590f35c
--- /dev/null
+++ b/poky/scripts/lib/resulttool/template/test_report_full_text.txt
@@ -0,0 +1,44 @@
+==============================================================================================================
+Test Result Status Summary (Counts/Percentages sorted by testseries, ID)
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Test Series'.ljust(maxlen['testseries']) }} | {{ 'ID'.ljust(maxlen['result_id']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }}
+--------------------------------------------------------------------------------------------------------------
+{% for report in reportvalues |sort(attribute='sort') %}
+{{ report.testseries.ljust(maxlen['testseries']) }} | {{ report.result_id.ljust(maxlen['result_id']) }} | {{ (report.passed|string).ljust(maxlen['passed']) }} | {{ (report.failed|string).ljust(maxlen['failed']) }} | {{ (report.skipped|string).ljust(maxlen['skipped']) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% if haveptest %}
+==============================================================================================================
+PTest Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ptest in ptests |sort %}
+{{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[ptest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% else %}
+There was no ptest data
+{% endif %}
+
+==============================================================================================================
+Failed test cases (sorted by testseries, ID)
+==============================================================================================================
+{% if havefailed %}
+--------------------------------------------------------------------------------------------------------------
+{% for report in reportvalues |sort(attribute='sort') %}
+{% if report.failed_testcases %}
+testseries | result_id : {{ report.testseries }} | {{ report.result_id }}
+{% for testcase in report.failed_testcases %}
+    {{ testcase }}
+{% endfor %}
+{% endif %}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+{% else %}
+There were no test failures
+{% endif %}
diff --git a/poky/scripts/resulttool b/poky/scripts/resulttool
new file mode 100755
index 0000000..5a89e1c
--- /dev/null
+++ b/poky/scripts/resulttool
@@ -0,0 +1,93 @@
+#!/usr/bin/env python3
+#
+# test results tool - tool for manipulating OEQA test result json files
+# (merge results, summarise results, regression analysis, generate manual test results file)
+#
+# To look for help information.
+#    $ resulttool
+#
+# To store test results from oeqa automated tests, execute the below
+#     $ resulttool store <source_dir> <git_branch>
+#
+# To merge test results, execute the below
+#    $ resulttool merge <base_result_file> <target_result_file>
+#
+# To report test report, execute the below
+#     $ resulttool report <source_dir>
+#
+# To perform regression file analysis, execute the below
+#     $ resulttool regression-file <base_result_file> <target_result_file>
+#
+# To execute manual test cases, execute the below
+#     $ resulttool manualexecution <manualjsonfile>
+#
+# By default testresults.json for manualexecution store in <build>/tmp/log/manual/
+#
+# Copyright (c) 2019, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+
+import os
+import sys
+import argparse
+import logging
+script_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = script_path + '/lib'
+sys.path = sys.path + [lib_path]
+import argparse_oe
+import scriptutils
+import resulttool.merge
+import resulttool.store
+import resulttool.regression
+import resulttool.report
+import resulttool.manualexecution
+logger = scriptutils.logger_create('resulttool')
+
+def _validate_user_input_arguments(args):
+    if hasattr(args, "source_dir"):
+        if not os.path.isdir(args.source_dir):
+            logger.error('source_dir argument need to be a directory : %s' % args.source_dir)
+            return False
+    return True
+
+def main():
+    parser = argparse_oe.ArgumentParser(description="OEQA test result manipulation tool.",
+                                        epilog="Use %(prog)s <subcommand> --help to get help on a specific command")
+    parser.add_argument('-d', '--debug', help='enable debug output', action='store_true')
+    parser.add_argument('-q', '--quiet', help='print only errors', action='store_true')
+    subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>')
+    subparsers.required = True
+    subparsers.add_subparser_group('manualexecution', 'manual testcases', 300)
+    resulttool.manualexecution.register_commands(subparsers)
+    subparsers.add_subparser_group('setup', 'setup', 200)
+    resulttool.merge.register_commands(subparsers)
+    resulttool.store.register_commands(subparsers)
+    subparsers.add_subparser_group('analysis', 'analysis', 100)
+    resulttool.regression.register_commands(subparsers)
+    resulttool.report.register_commands(subparsers)
+
+    args = parser.parse_args()
+    if args.debug:
+        logger.setLevel(logging.DEBUG)
+    elif args.quiet:
+        logger.setLevel(logging.ERROR)
+
+    if not _validate_user_input_arguments(args):
+        return -1
+
+    try:
+        ret = args.func(args, logger)
+    except argparse_oe.ArgumentUsageError as ae:
+        parser.error_subcommand(ae.message, ae.subcommand)
+    return ret
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/poky/scripts/wic b/poky/scripts/wic
index 37dfe2d..b4b7212 100755
--- a/poky/scripts/wic
+++ b/poky/scripts/wic
@@ -35,6 +35,7 @@
 import sys
 import argparse
 import logging
+import subprocess
 
 from collections import namedtuple
 from distutils import spawn
@@ -63,10 +64,7 @@
 bitbake_exe = spawn.find_executable('bitbake')
 if bitbake_exe:
     bitbake_path = scriptpath.add_bitbake_lib_path()
-    from bb import cookerdata
-    from bb.main import bitbake_main, BitBakeConfigParameters
-else:
-    bitbake_main = None
+    import bb
 
 from wic import WicError
 from wic.misc import get_bitbake_var, BB_VARS
@@ -124,7 +122,7 @@
     Command-line handling for image creation.  The real work is done
     by image.engine.wic_create()
     """
-    if options.build_rootfs and not bitbake_main:
+    if options.build_rootfs and not bitbake_exe:
         raise WicError("Can't build rootfs as bitbake is not in the $PATH")
 
     if not options.image_name:
@@ -160,9 +158,7 @@
                 argv.append("--debug")
 
             logger.info("Building rootfs...\n")
-            if bitbake_main(BitBakeConfigParameters(argv),
-                            cookerdata.CookerConfiguration()):
-                raise WicError("bitbake exited with error")
+            subprocess.check_call(argv)
 
         rootfs_dir = get_bitbake_var("IMAGE_ROOTFS", options.image_name)
         kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE", options.image_name)
@@ -179,9 +175,7 @@
 
     if not options.vars_dir and (not native_sysroot or not os.path.isdir(native_sysroot)):
         logger.info("Building wic-tools...\n")
-        if bitbake_main(BitBakeConfigParameters("bitbake wic-tools".split()),
-                        cookerdata.CookerConfiguration()):
-            raise WicError("bitbake wic-tools failed")
+        subprocess.check_call(["bitbake", "wic-tools"])
         native_sysroot = get_bitbake_var("RECIPE_SYSROOT_NATIVE", "wic-tools")
 
     if not native_sysroot: