reset upstream subtrees to HEAD

Reset the following subtrees on HEAD:
  poky: 8217b477a1(master)
  meta-xilinx: 64aa3d35ae(master)
  meta-openembedded: 0435c9e193(master)
  meta-raspberrypi: 490a4441ac(master)
  meta-security: cb6d1c85ee(master)

Squashed patches:
  meta-phosphor: drop systemd 239 patches
  meta-phosphor: mrw-api: use correct install path

Change-Id: I268e2646d9174ad305630c6bbd3fbc1a6105f43d
Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
diff --git a/poky/scripts/lib/resulttool/manualexecution.py b/poky/scripts/lib/resulttool/manualexecution.py
index 6487cd9..c94f981 100755
--- a/poky/scripts/lib/resulttool/manualexecution.py
+++ b/poky/scripts/lib/resulttool/manualexecution.py
@@ -24,30 +24,18 @@
     with open(file, "r") as f:
         return json.load(f)
 
-
 class ManualTestRunner(object):
-    def __init__(self):
-        self.jdata = ''
-        self.test_module = ''
-        self.test_cases_id = ''
-        self.configuration = ''
-        self.starttime = ''
-        self.result_id = ''
-        self.write_dir = ''
 
     def _get_testcases(self, file):
         self.jdata = load_json_file(file)
-        self.test_cases_id = []
         self.test_module = self.jdata[0]['test']['@alias'].split('.', 2)[0]
-        for i in self.jdata:
-            self.test_cases_id.append(i['test']['@alias'])
-    
+
     def _get_input(self, config):
         while True:
             output = input('{} = '.format(config))
-            if re.match('^[a-zA-Z0-9_-]+$', output):
+            if re.match('^[a-z0-9-.]+$', output):
                 break
-            print('Only alphanumeric and underscore/hyphen are allowed. Please try again')
+            print('Only lowercase alphanumeric, hyphen and dot are allowed. Please try again')
         return output
 
     def _create_config(self):
@@ -67,44 +55,42 @@
         extra_config = set(store_map['manual']) - set(self.configuration)
         for config in sorted(extra_config):
             print('---------------------------------------------')
-            print('This is configuration #%s. Please provide configuration value(use "None" if not applicable).'
-                  % config)
+            print('This is configuration #%s. Please provide configuration value(use "None" if not applicable).' % config)
             print('---------------------------------------------')
             value_conf = self._get_input('Configuration Value')
             print('---------------------------------------------\n')
             self.configuration[config] = value_conf
 
     def _create_result_id(self):
-        self.result_id = 'manual_' + self.test_module + '_' + self.starttime
+        self.result_id = 'manual_%s_%s' % (self.test_module, self.starttime)
 
-    def _execute_test_steps(self, test_id):
+    def _execute_test_steps(self, test):
         test_result = {}
-        total_steps = len(self.jdata[test_id]['test']['execution'].keys())
         print('------------------------------------------------------------------------')
-        print('Executing test case:' + '' '' + self.test_cases_id[test_id])
+        print('Executing test case: %s' % test['test']['@alias'])
         print('------------------------------------------------------------------------')
-        print('You have total ' + str(total_steps) + ' test steps to be executed.')
+        print('You have total %s test steps to be executed.' % len(test['test']['execution']))
         print('------------------------------------------------------------------------\n')
-        for step in sorted((self.jdata[test_id]['test']['execution']).keys()):
-            print('Step %s: ' % step + self.jdata[test_id]['test']['execution']['%s' % step]['action'])
-            print('Expected output: ' + self.jdata[test_id]['test']['execution']['%s' % step]['expected_results'])
-            done = input('\nPlease press ENTER when you are done to proceed to next step.\n')
+        for step, _ in sorted(test['test']['execution'].items(), key=lambda x: int(x[0])):
+            print('Step %s: %s' % (step, test['test']['execution'][step]['action']))
+            expected_output = test['test']['execution'][step]['expected_results']
+            if expected_output:
+                print('Expected output: %s' % expected_output)
         while True:
-            done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n')
-            done = done.lower()
+            done = input('\nPlease provide test results: (P)assed/(F)ailed/(B)locked/(S)kipped? \n').lower()
             result_types = {'p':'PASSED',
-                                'f':'FAILED',
-                                'b':'BLOCKED',
-                                's':'SKIPPED'}
+                            'f':'FAILED',
+                            'b':'BLOCKED',
+                            's':'SKIPPED'}
             if done in result_types:
                 for r in result_types:
                     if done == r:
                         res = result_types[r]
                         if res == 'FAILED':
                             log_input = input('\nPlease enter the error and the description of the log: (Ex:log:211 Error Bitbake)\n')
-                            test_result.update({self.test_cases_id[test_id]: {'status': '%s' % res, 'log': '%s' % log_input}})
+                            test_result.update({test['test']['@alias']: {'status': '%s' % res, 'log': '%s' % log_input}})
                         else:
-                            test_result.update({self.test_cases_id[test_id]: {'status': '%s' % res}})
+                            test_result.update({test['test']['@alias']: {'status': '%s' % res}})
                 break
             print('Invalid input!')
         return test_result
@@ -119,9 +105,9 @@
         self._create_result_id()
         self._create_write_dir()
         test_results = {}
-        print('\nTotal number of test cases in this test suite: ' + '%s\n' % len(self.jdata))
-        for i in range(0, len(self.jdata)):
-            test_result = self._execute_test_steps(i)
+        print('\nTotal number of test cases in this test suite: %s\n' % len(self.jdata))
+        for t in self.jdata:
+            test_result = self._execute_test_steps(t)
             test_results.update(test_result)
         return self.configuration, self.result_id, self.write_dir, test_results
 
@@ -129,8 +115,7 @@
     testrunner = ManualTestRunner()
     get_configuration, get_result_id, get_write_dir, get_test_results = testrunner.run_test(args.file)
     resultjsonhelper = OETestResultJSONHelper()
-    resultjsonhelper.dump_testresult_file(get_write_dir, get_configuration, get_result_id,
-                                          get_test_results)
+    resultjsonhelper.dump_testresult_file(get_write_dir, get_configuration, get_result_id, get_test_results)
     return 0
 
 def register_commands(subparsers):
diff --git a/poky/scripts/lib/resulttool/resultutils.py b/poky/scripts/lib/resulttool/resultutils.py
index 153f2b8..ad40ac8 100644
--- a/poky/scripts/lib/resulttool/resultutils.py
+++ b/poky/scripts/lib/resulttool/resultutils.py
@@ -15,6 +15,7 @@
 import os
 import json
 import scriptpath
+import copy
 scriptpath.add_oe_lib_path()
 
 flatten_map = {
@@ -60,12 +61,6 @@
         testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype])
         if testpath not in results:
             results[testpath] = {}
-        if 'ptestresult.rawlogs' in data[res]['result']:
-            del data[res]['result']['ptestresult.rawlogs']
-        if 'ptestresult.sections' in data[res]['result']:
-            for i in data[res]['result']['ptestresult.sections']:
-                if 'log' in data[res]['result']['ptestresult.sections'][i]:
-                    del data[res]['result']['ptestresult.sections'][i]['log']
         results[testpath][res] = data[res]
 
 #
@@ -93,15 +88,43 @@
                  newresults[r][i] = results[r][i]
     return newresults
 
-def save_resultsdata(results, destdir, fn="testresults.json"):
+def strip_ptestresults(results):
+    newresults = copy.deepcopy(results)
+    #for a in newresults2:
+    #  newresults = newresults2[a]
+    for res in newresults:
+        if 'result' not in newresults[res]:
+            continue
+        if 'ptestresult.rawlogs' in newresults[res]['result']:
+            del newresults[res]['result']['ptestresult.rawlogs']
+        if 'ptestresult.sections' in newresults[res]['result']:
+            for i in newresults[res]['result']['ptestresult.sections']:
+                if 'log' in newresults[res]['result']['ptestresult.sections'][i]:
+                    del newresults[res]['result']['ptestresult.sections'][i]['log']
+    return newresults
+
+def save_resultsdata(results, destdir, fn="testresults.json", ptestjson=False, ptestlogs=False):
     for res in results:
         if res:
             dst = destdir + "/" + res + "/" + fn
         else:
             dst = destdir + "/" + fn
         os.makedirs(os.path.dirname(dst), exist_ok=True)
+        resultsout = results[res]
+        if not ptestjson:
+            resultsout = strip_ptestresults(results[res])
         with open(dst, 'w') as f:
-            f.write(json.dumps(results[res], sort_keys=True, indent=4))
+            f.write(json.dumps(resultsout, sort_keys=True, indent=4))
+        for res2 in results[res]:
+            if ptestlogs and 'result' in results[res][res2]:
+                if 'ptestresult.rawlogs' in results[res][res2]['result']:
+                    with open(dst.replace(fn, "ptest-raw.log"), "w+") as f:
+                        f.write(results[res][res2]['result']['ptestresult.rawlogs']['log'])
+                if 'ptestresult.sections' in results[res][res2]['result']:
+                    for i in results[res][res2]['result']['ptestresult.sections']:
+                        if 'log' in results[res][res2]['result']['ptestresult.sections'][i]:
+                            with open(dst.replace(fn, "ptest-%s.log" % i), "w+") as f:
+                                f.write(results[res][res2]['result']['ptestresult.sections'][i]['log'])
 
 def git_get_result(repo, tags):
     git_objs = []
diff --git a/poky/scripts/lib/resulttool/store.py b/poky/scripts/lib/resulttool/store.py
index 5e33716..e4a0807 100644
--- a/poky/scripts/lib/resulttool/store.py
+++ b/poky/scripts/lib/resulttool/store.py
@@ -29,15 +29,18 @@
     try:
         results = {}
         logger.info('Reading files from %s' % args.source)
-        for root, dirs,  files in os.walk(args.source):
-            for name in files:
-                f = os.path.join(root, name)
-                if name == "testresults.json":
-                    resultutils.append_resultsdata(results, f)
-                elif args.all:
-                    dst = f.replace(args.source, tempdir + "/")
-                    os.makedirs(os.path.dirname(dst), exist_ok=True)
-                    shutil.copyfile(f, dst)
+        if os.path.isfile(args.source):
+            resultutils.append_resultsdata(results, args.source)
+        else:
+            for root, dirs,  files in os.walk(args.source):
+                for name in files:
+                    f = os.path.join(root, name)
+                    if name == "testresults.json":
+                        resultutils.append_resultsdata(results, f)
+                    elif args.all:
+                        dst = f.replace(args.source, tempdir + "/")
+                        os.makedirs(os.path.dirname(dst), exist_ok=True)
+                        shutil.copyfile(f, dst)
 
         revisions = {}
 
@@ -65,7 +68,7 @@
             results = revisions[r]
             keywords = {'commit': r[0], 'branch': r[1], "commit_count": r[2]}
             subprocess.check_call(["find", tempdir, "!", "-path", "./.git/*", "-delete"])
-            resultutils.save_resultsdata(results, tempdir)
+            resultutils.save_resultsdata(results, tempdir, ptestlogs=True)
 
             logger.info('Storing test result into git repository %s' % args.git_dir)