poky: refresh thud: e4c0a8a7cb..9dfebdaf7a

Update poky to thud HEAD.

Mazliana (2):
      scripts/resulttool: enable manual execution and result creation
      resulttool/manualexecution: To output right test case id

Michael Halstead (1):
      yocto-uninative: Correct sha256sum for aarch64

Richard Purdie (12):
      resulttool: Improvements to allow integration to the autobuilder
      resulttool/resultutils: Avoids tracebacks for missing logs
      resulttool/store: Handle results files for multiple revisions
      resulttool/report: Handle missing metadata sections more cleanly
      resulttool/report: Ensure test suites with no results show up on the report
      resulttool/report: Ensure ptest results are sorted
      resulttool/store: Fix missing variable causing testresult corruption
      oeqa/utils/gitarchive: Handle case where parent is only on origin
      scripts/wic: Be consistent about how we call bitbake
      yocto-uninative: Update to 2.4
      poky.conf: Bump version for 2.6.2 thud release
      build-appliance-image: Update to thud head revision

Yeoh Ee Peng (4):
      resulttool: enable merge, store, report and regression analysis
      resulttool/regression: Ensure regressoin results are sorted
      scripts/resulttool: Enable manual result store and regression
      resulttool/report: Enable roll-up report for a commit

Change-Id: Icf3c93db794539bdd4501d2e7db15c68b6c541ae
Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
diff --git a/poky/meta/lib/oeqa/selftest/cases/resulttooltests.py b/poky/meta/lib/oeqa/selftest/cases/resulttooltests.py
new file mode 100644
index 0000000..0a089c0
--- /dev/null
+++ b/poky/meta/lib/oeqa/selftest/cases/resulttooltests.py
@@ -0,0 +1,94 @@
+import os
+import sys
+basepath = os.path.abspath(os.path.dirname(__file__) + '/../../../../../')
+lib_path = basepath + '/scripts/lib'
+sys.path = sys.path + [lib_path]
+from resulttool.report import ResultsTextReport
+from resulttool import regression as regression
+from resulttool import resultutils as resultutils
+from oeqa.selftest.case import OESelftestTestCase
+
+class ResultToolTests(OESelftestTestCase):
+    base_results_data = {'base_result1': {'configuration': {"TEST_TYPE": "runtime",
+                                                            "TESTSERIES": "series1",
+                                                            "IMAGE_BASENAME": "image",
+                                                            "IMAGE_PKGTYPE": "ipk",
+                                                            "DISTRO": "mydistro",
+                                                            "MACHINE": "qemux86"},
+                                          'result': {}},
+                         'base_result2': {'configuration': {"TEST_TYPE": "runtime",
+                                                            "TESTSERIES": "series1",
+                                                            "IMAGE_BASENAME": "image",
+                                                            "IMAGE_PKGTYPE": "ipk",
+                                                            "DISTRO": "mydistro",
+                                                            "MACHINE": "qemux86-64"},
+                                          'result': {}}}
+    target_results_data = {'target_result1': {'configuration': {"TEST_TYPE": "runtime",
+                                                                "TESTSERIES": "series1",
+                                                                "IMAGE_BASENAME": "image",
+                                                                "IMAGE_PKGTYPE": "ipk",
+                                                                "DISTRO": "mydistro",
+                                                                "MACHINE": "qemux86"},
+                                          'result': {}},
+                           'target_result2': {'configuration': {"TEST_TYPE": "runtime",
+                                                                "TESTSERIES": "series1",
+                                                                "IMAGE_BASENAME": "image",
+                                                                "IMAGE_PKGTYPE": "ipk",
+                                                                "DISTRO": "mydistro",
+                                                                "MACHINE": "qemux86"},
+                                          'result': {}},
+                           'target_result3': {'configuration': {"TEST_TYPE": "runtime",
+                                                                "TESTSERIES": "series1",
+                                                                "IMAGE_BASENAME": "image",
+                                                                "IMAGE_PKGTYPE": "ipk",
+                                                                "DISTRO": "mydistro",
+                                                                "MACHINE": "qemux86-64"},
+                                          'result': {}}}
+
+    def test_report_can_aggregate_test_result(self):
+        result_data = {'result': {'test1': {'status': 'PASSED'},
+                                  'test2': {'status': 'PASSED'},
+                                  'test3': {'status': 'FAILED'},
+                                  'test4': {'status': 'ERROR'},
+                                  'test5': {'status': 'SKIPPED'}}}
+        report = ResultsTextReport()
+        result_report = report.get_aggregated_test_result(None, result_data)
+        self.assertTrue(result_report['passed'] == 2, msg="Passed count not correct:%s" % result_report['passed'])
+        self.assertTrue(result_report['failed'] == 2, msg="Failed count not correct:%s" % result_report['failed'])
+        self.assertTrue(result_report['skipped'] == 1, msg="Skipped count not correct:%s" % result_report['skipped'])
+
+    def test_regression_can_get_regression_base_target_pair(self):
+
+        results = {}
+        resultutils.append_resultsdata(results, ResultToolTests.base_results_data)
+        resultutils.append_resultsdata(results, ResultToolTests.target_results_data)
+        self.assertTrue('target_result1' in results['runtime/mydistro/qemux86/image'], msg="Pair not correct:%s" % results)
+        self.assertTrue('target_result3' in results['runtime/mydistro/qemux86-64/image'], msg="Pair not correct:%s" % results)
+
+    def test_regrresion_can_get_regression_result(self):
+        base_result_data = {'result': {'test1': {'status': 'PASSED'},
+                                       'test2': {'status': 'PASSED'},
+                                       'test3': {'status': 'FAILED'},
+                                       'test4': {'status': 'ERROR'},
+                                       'test5': {'status': 'SKIPPED'}}}
+        target_result_data = {'result': {'test1': {'status': 'PASSED'},
+                                         'test2': {'status': 'FAILED'},
+                                         'test3': {'status': 'PASSED'},
+                                         'test4': {'status': 'ERROR'},
+                                         'test5': {'status': 'SKIPPED'}}}
+        result, text = regression.compare_result(self.logger, "BaseTestRunName", "TargetTestRunName", base_result_data, target_result_data)
+        self.assertTrue(result['test2']['base'] == 'PASSED',
+                        msg="regression not correct:%s" % result['test2']['base'])
+        self.assertTrue(result['test2']['target'] == 'FAILED',
+                        msg="regression not correct:%s" % result['test2']['target'])
+        self.assertTrue(result['test3']['base'] == 'FAILED',
+                        msg="regression not correct:%s" % result['test3']['base'])
+        self.assertTrue(result['test3']['target'] == 'PASSED',
+                        msg="regression not correct:%s" % result['test3']['target'])
+
+    def test_merge_can_merged_results(self):
+        results = {}
+        resultutils.append_resultsdata(results, ResultToolTests.base_results_data, configmap=resultutils.flatten_map)
+        resultutils.append_resultsdata(results, ResultToolTests.target_results_data, configmap=resultutils.flatten_map)
+        self.assertEqual(len(results[''].keys()), 5, msg="Flattened results not correct %s" % str(results))
+