reset upstream subtrees to yocto 2.6

Reset the following subtrees on thud HEAD:

  poky: 87e3a9739d
  meta-openembedded: 6094ae18c8
  meta-security: 31dc4e7532
  meta-raspberrypi: a48743dc36
  meta-xilinx: c42016e2e6

Also re-apply backports that didn't make it into thud:
  poky:
    17726d0 systemd-systemctl-native: handle Install wildcards

  meta-openembedded:
    4321a5d libtinyxml2: update to 7.0.1
    042f0a3 libcereal: Add native and nativesdk classes
    e23284f libcereal: Allow empty package
    030e8d4 rsyslog: curl-less build with fmhttp PACKAGECONFIG
    179a1b9 gtest: update to 1.8.1

Squashed OpenBMC subtree compatibility updates:
  meta-aspeed:
    Brad Bishop (1):
          aspeed: add yocto 2.6 compatibility

  meta-ibm:
    Brad Bishop (1):
          ibm: prepare for yocto 2.6

  meta-ingrasys:
    Brad Bishop (1):
          ingrasys: set layer compatibility to yocto 2.6

  meta-openpower:
    Brad Bishop (1):
          openpower: set layer compatibility to yocto 2.6

  meta-phosphor:
    Brad Bishop (3):
          phosphor: set layer compatibility to thud
          phosphor: libgpg-error: drop patches
          phosphor: react to fitimage artifact rename

    Ed Tanous (4):
          Dropbear: upgrade options for latest upgrade
          yocto2.6: update openssl options
          busybox: remove upstream watchdog patch
          systemd: Rebase CONFIG_CGROUP_BPF patch

Change-Id: I7b1fe71cca880d0372a82d94b5fd785323e3a9e7
Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
diff --git a/poky/meta/lib/oe/buildhistory_analysis.py b/poky/meta/lib/oe/buildhistory_analysis.py
index b0365ab..ad7fceb 100644
--- a/poky/meta/lib/oe/buildhistory_analysis.py
+++ b/poky/meta/lib/oe/buildhistory_analysis.py
@@ -13,6 +13,7 @@
 import difflib
 import git
 import re
+import shlex
 import hashlib
 import collections
 import bb.utils
@@ -31,13 +32,6 @@
 monitor_numeric_threshold = 10
 # Image files to monitor (note that image-info.txt is handled separately)
 img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt']
-# Related context fields for reporting (note: PE, PV & PR are always reported for monitored package fields)
-related_fields = {}
-related_fields['RDEPENDS'] = ['DEPENDS']
-related_fields['RRECOMMENDS'] = ['DEPENDS']
-related_fields['FILELIST'] = ['FILES']
-related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND']
-related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'NO_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
 
 colours = {
     'colour_default': '',
@@ -67,7 +61,6 @@
         self.oldvalue = oldvalue
         self.newvalue = newvalue
         self.monitored = monitored
-        self.related = []
         self.filechanges = None
 
     def __str__(self):
@@ -123,10 +116,13 @@
                 aitems = pkglist_combine(depvera)
                 bitems = pkglist_combine(depverb)
             else:
-                aitems = self.oldvalue.split()
-                bitems = self.newvalue.split()
                 if self.fieldname == 'FILELIST':
+                    aitems = shlex.split(self.oldvalue)
+                    bitems = shlex.split(self.newvalue)
                     renamed_dirs, aitems, bitems = detect_renamed_dirs(aitems, bitems)
+                else:
+                    aitems = self.oldvalue.split()
+                    bitems = self.newvalue.split()
 
             removed = list(set(aitems) - set(bitems))
             added = list(set(bitems) - set(aitems))
@@ -206,13 +202,6 @@
         else:
             out = '{} changed from "{colour_remove}{}{colour_default}" to "{colour_add}{}{colour_default}"'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
 
-        if self.related:
-            for chg in self.related:
-                if not outer and chg.fieldname in ['PE', 'PV', 'PR']:
-                    continue
-                for line in chg._str_internal(False).splitlines():
-                    out += '\n  * %s' % line
-
         return '%s%s' % (prefix, out) if out else ''
 
 class FileChange:
@@ -424,9 +413,13 @@
                     (depvera, depverb) = compare_pkg_lists(astr, bstr)
                     if depvera == depverb:
                         continue
-                alist = astr.split()
+                if key == 'FILELIST':
+                    alist = shlex.split(astr)
+                    blist = shlex.split(bstr)
+                else:
+                    alist = astr.split()
+                    blist = bstr.split()
                 alist.sort()
-                blist = bstr.split()
                 blist.sort()
                 # We don't care about the removal of self-dependencies
                 if pkgname in alist and not pkgname in blist:
@@ -635,17 +628,6 @@
                 chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read().decode('utf-8'), '', True)
                 changes.append(chg)
 
-    # Link related changes
-    for chg in changes:
-        if chg.monitored:
-            for chg2 in changes:
-                # (Check dirname in the case of fields from recipe info files)
-                if chg.path == chg2.path or os.path.dirname(chg.path) == chg2.path:
-                    if chg2.fieldname in related_fields.get(chg.fieldname, []):
-                        chg.related.append(chg2)
-                    elif chg.path == chg2.path and chg.path.startswith('packages/') and chg2.fieldname in ['PE', 'PV', 'PR']:
-                        chg.related.append(chg2)
-
     # filter out unwanted paths
     if exclude_path:
         for chg in changes:
diff --git a/poky/meta/lib/oe/copy_buildsystem.py b/poky/meta/lib/oe/copy_buildsystem.py
index 4b94806..7cb784c 100644
--- a/poky/meta/lib/oe/copy_buildsystem.py
+++ b/poky/meta/lib/oe/copy_buildsystem.py
@@ -1,5 +1,12 @@
 # This class should provide easy access to the different aspects of the
 # buildsystem such as layers, bitbake location, etc.
+#
+# SDK_LAYERS_EXCLUDE: Layers which will be excluded from SDK layers.
+# SDK_LAYERS_EXCLUDE_PATTERN: The simiar to SDK_LAYERS_EXCLUDE, this supports
+#                             python regular expression, use space as separator,
+#                              e.g.: ".*-downloads closed-.*"
+#
+
 import stat
 import shutil
 
@@ -23,9 +30,12 @@
         self.context = context
         self.layerdirs = [os.path.abspath(pth) for pth in d.getVar('BBLAYERS').split()]
         self.layers_exclude = (d.getVar('SDK_LAYERS_EXCLUDE') or "").split()
+        self.layers_exclude_pattern = d.getVar('SDK_LAYERS_EXCLUDE_PATTERN')
 
     def copy_bitbake_and_layers(self, destdir, workspace_name=None):
+        import re
         # Copy in all metadata layers + bitbake (as repositories)
+        copied_corebase = None
         layers_copied = []
         bb.utils.mkdirhier(destdir)
         layers = list(self.layerdirs)
@@ -40,8 +50,17 @@
         # Exclude layers
         for layer_exclude in self.layers_exclude:
             if layer_exclude in layers:
+                bb.note('Excluded %s from sdk layers since it is in SDK_LAYERS_EXCLUDE' % layer_exclude)
                 layers.remove(layer_exclude)
 
+        if self.layers_exclude_pattern:
+            layers_cp = layers[:]
+            for pattern in self.layers_exclude_pattern.split():
+                for layer in layers_cp:
+                    if re.match(pattern, layer):
+                        bb.note('Excluded %s from sdk layers since matched SDK_LAYERS_EXCLUDE_PATTERN' % layer)
+                        layers.remove(layer)
+
         workspace_newname = workspace_name
         if workspace_newname:
             layernames = [os.path.basename(layer) for layer in layers]
@@ -84,17 +103,18 @@
 
             layer_relative = os.path.relpath(layerdestpath,
                                              destdir)
-            layers_copied.append(layer_relative)
-
             # Treat corebase as special since it typically will contain
             # build directories or other custom items.
             if corebase == layer:
+                copied_corebase = layer_relative
                 bb.utils.mkdirhier(layerdestpath)
                 for f in corebase_files:
                     f_basename = os.path.basename(f)
                     destname = os.path.join(layerdestpath, f_basename)
                     _smart_copy(f, destname)
             else:
+                layers_copied.append(layer_relative)
+
                 if os.path.exists(os.path.join(layerdestpath, 'conf/layer.conf')):
                     bb.note("Skipping layer %s, already handled" % layer)
                 else:
@@ -140,7 +160,7 @@
                 layers_copied.remove(layer)
                 break
 
-        return layers_copied
+        return copied_corebase, layers_copied
 
 def generate_locked_sigs(sigfile, d):
     bb.utils.mkdirhier(os.path.dirname(sigfile))
diff --git a/poky/meta/lib/oe/elf.py b/poky/meta/lib/oe/elf.py
new file mode 100644
index 0000000..0ed59ae
--- /dev/null
+++ b/poky/meta/lib/oe/elf.py
@@ -0,0 +1,128 @@
+def machine_dict(d):
+#           TARGET_OS  TARGET_ARCH   MACHINE, OSABI, ABIVERSION, Little Endian, 32bit?
+    machdata = {
+            "darwin9" : { 
+                        "arm" :       (40,     0,    0,          True,          32),
+                      },
+            "eabi" : {
+                        "arm" :       (40,     0,    0,          True,          32),
+                      },
+            "elf" : {
+                        "aarch64" :   (183,    0,    0,          True,          64),
+                        "aarch64_be" :(183,    0,    0,          False,         64),
+                        "i586" :      (3,      0,    0,          True,          32),
+                        "x86_64":     (62,     0,    0,          True,          64),
+                        "epiphany":   (4643,   0,    0,          True,          32),
+                        "lm32":       (138,    0,    0,          False,         32),
+                        "mips":       ( 8,     0,    0,          False,         32),
+                        "mipsel":     ( 8,     0,    0,          True,          32),
+                        "microblaze":  (189,   0,    0,          False,         32),
+                        "microblazeeb":(189,   0,    0,          False,         32),
+                        "microblazeel":(189,   0,    0,          True,          32),
+                        "powerpc":    (20,     0,    0,          False,         32),
+                        "riscv32":    (243,    0,    0,          True,          32),
+                        "riscv64":    (243,    0,    0,          True,          64),
+                      },
+            "linux" : { 
+                        "aarch64" :   (183,    0,    0,          True,          64),
+                        "aarch64_be" :(183,    0,    0,          False,         64),
+                        "arm" :       (40,    97,    0,          True,          32),
+                        "armeb":      (40,    97,    0,          False,         32),
+                        "powerpc":    (20,     0,    0,          False,         32),
+                        "powerpc64":  (21,     0,    0,          False,         64),
+                        "i386":       ( 3,     0,    0,          True,          32),
+                        "i486":       ( 3,     0,    0,          True,          32),
+                        "i586":       ( 3,     0,    0,          True,          32),
+                        "i686":       ( 3,     0,    0,          True,          32),
+                        "x86_64":     (62,     0,    0,          True,          64),
+                        "ia64":       (50,     0,    0,          True,          64),
+                        "alpha":      (36902,  0,    0,          True,          64),
+                        "hppa":       (15,     3,    0,          False,         32),
+                        "m68k":       ( 4,     0,    0,          False,         32),
+                        "mips":       ( 8,     0,    0,          False,         32),
+                        "mipsel":     ( 8,     0,    0,          True,          32),
+                        "mips64":     ( 8,     0,    0,          False,         64),
+                        "mips64el":   ( 8,     0,    0,          True,          64),
+                        "mipsisa32r6":   ( 8,  0,    0,          False,         32),
+                        "mipsisa32r6el": ( 8,  0,    0,          True,          32),
+                        "mipsisa64r6":   ( 8,  0,    0,          False,         64),
+                        "mipsisa64r6el": ( 8,  0,    0,          True,          64),
+                        "nios2":      (113,    0,    0,          True,          32),
+                        "riscv32":    (243,    0,    0,          True,          32),
+                        "riscv64":    (243,    0,    0,          True,          64),
+                        "s390":       (22,     0,    0,          False,         32),
+                        "sh4":        (42,     0,    0,          True,          32),
+                        "sparc":      ( 2,     0,    0,          False,         32),
+                        "microblaze":  (189,   0,    0,          False,         32),
+                        "microblazeeb":(189,   0,    0,          False,         32),
+                        "microblazeel":(189,   0,    0,          True,          32),
+                      },
+            "linux-musl" : { 
+                        "aarch64" :   (183,    0,    0,            True,          64),
+                        "aarch64_be" :(183,    0,    0,            False,         64),
+                        "arm" :       (  40,    97,    0,          True,          32),
+                        "armeb":      (  40,    97,    0,          False,         32),
+                        "powerpc":    (  20,     0,    0,          False,         32),
+                        "i386":       (   3,     0,    0,          True,          32),
+                        "i486":       (   3,     0,    0,          True,          32),
+                        "i586":       (   3,     0,    0,          True,          32),
+                        "i686":       (   3,     0,    0,          True,          32),
+                        "x86_64":     (  62,     0,    0,          True,          64),
+                        "mips":       (   8,     0,    0,          False,         32),
+                        "mipsel":     (   8,     0,    0,          True,          32),
+                        "mips64":     (   8,     0,    0,          False,         64),
+                        "mips64el":   (   8,     0,    0,          True,          64),
+                        "microblaze":  (189,     0,    0,          False,         32),
+                        "microblazeeb":(189,     0,    0,          False,         32),
+                        "microblazeel":(189,     0,    0,          True,          32),
+                        "riscv32":    (243,      0,    0,          True,          32),
+                        "riscv64":    (243,      0,    0,          True,          64),
+                        "sh4":        (  42,     0,    0,          True,          32),
+                      },
+            "uclinux-uclibc" : {
+                        "bfin":       ( 106,     0,    0,          True,         32),
+                      }, 
+            "linux-gnueabi" : {
+                        "arm" :       (40,     0,    0,          True,          32),
+                        "armeb" :     (40,     0,    0,          False,         32),
+                      },
+            "linux-musleabi" : {
+                        "arm" :       (40,     0,    0,          True,          32),
+                        "armeb" :     (40,     0,    0,          False,         32),
+                      },
+            "linux-gnuspe" : {
+                        "powerpc":    (20,     0,    0,          False,         32),
+                      },
+            "linux-muslspe" : {
+                        "powerpc":    (20,     0,    0,          False,         32),
+                      },
+            "linux-gnu" :       {
+                        "powerpc":    (20,     0,    0,          False,         32),
+                        "sh4":        (42,     0,    0,          True,          32),
+                      },
+            "linux-gnu_ilp32" :     {
+                        "aarch64" :   (183,    0,    0,          True,          32),
+                      },
+            "linux-gnux32" :       {
+                        "x86_64":     (62,     0,    0,          True,          32),
+                      },
+            "linux-muslx32" :       {
+                        "x86_64":     (62,     0,    0,          True,          32),
+                      },
+            "linux-gnun32" :       {
+                        "mips64":       ( 8,     0,    0,          False,         32),
+                        "mips64el":     ( 8,     0,    0,          True,          32),
+                        "mipsisa64r6":  ( 8,     0,    0,          False,         32),
+                        "mipsisa64r6el":( 8,     0,    0,          True,          32),
+                      },
+        }
+
+    # Add in any extra user supplied data which may come from a BSP layer, removing the
+    # need to always change this class directly
+    extra_machdata = (d and d.getVar("PACKAGEQA_EXTRA_MACHDEFFUNCS" or None) or "").split()
+    for m in extra_machdata:
+        call = m + "(machdata, d)"
+        locs = { "machdata" : machdata, "d" : d}
+        machdata = bb.utils.better_eval(call, locs)
+
+    return machdata
diff --git a/poky/meta/lib/oe/gpg_sign.py b/poky/meta/lib/oe/gpg_sign.py
index b172729..ccd5aee 100644
--- a/poky/meta/lib/oe/gpg_sign.py
+++ b/poky/meta/lib/oe/gpg_sign.py
@@ -3,6 +3,8 @@
 
 import bb
 import oe.utils
+import subprocess
+import shlex
 
 class LocalSigner(object):
     """Class for handling local (on the build host) signing"""
@@ -23,10 +25,7 @@
         if armor:
             cmd += "--armor "
         cmd += keyid
-        status, output = oe.utils.getstatusoutput(cmd)
-        if status:
-            raise bb.build.FuncFailed('Failed to export gpg public key (%s): %s' %
-                                      (keyid, output))
+        subprocess.check_output(shlex.split(cmd), stderr=subprocess.STDOUT)
 
     def sign_rpms(self, files, keyid, passphrase, digest, sign_chunk, fsk=None, fsk_password=None):
         """Sign RPM files"""
@@ -48,13 +47,10 @@
 
         # Sign in chunks
         for i in range(0, len(files), sign_chunk):
-            status, output = oe.utils.getstatusoutput(cmd + ' '.join(files[i:i+sign_chunk]))
-            if status:
-                raise bb.build.FuncFailed("Failed to sign RPM packages: %s" % output)
+            subprocess.check_output(shlex.split(cmd + ' '.join(files[i:i+sign_chunk])), stderr=subprocess.STDOUT)
 
     def detach_sign(self, input_file, keyid, passphrase_file, passphrase=None, armor=True):
         """Create a detached signature of a file"""
-        import subprocess
 
         if passphrase_file and passphrase:
             raise Exception("You should use either passphrase_file of passphrase, not both")
@@ -100,7 +96,6 @@
 
     def get_gpg_version(self):
         """Return the gpg version as a tuple of ints"""
-        import subprocess
         try:
             ver_str = subprocess.check_output((self.gpg_bin, "--version", "--no-permission-warning")).split()[2].decode("utf-8")
             return tuple([int(i) for i in ver_str.split("-")[0].split('.')])
@@ -114,7 +109,7 @@
         if self.gpg_path:
             cmd += "--homedir %s " % self.gpg_path
         cmd += sig_file
-        status, _ = oe.utils.getstatusoutput(cmd)
+        status = subprocess.call(shlex.split(cmd))
         ret = False if status else True
         return ret
 
diff --git a/poky/meta/lib/oe/maketype.py b/poky/meta/lib/oe/maketype.py
index f88981d..c36e7b5 100644
--- a/poky/meta/lib/oe/maketype.py
+++ b/poky/meta/lib/oe/maketype.py
@@ -7,7 +7,12 @@
 
 import inspect
 import oe.types as types
-import collections
+try:
+    # Python 3.7+
+    from collections.abc import Callable
+except ImportError:
+    # Python < 3.7
+    from collections import Callable
 
 available_types = {}
 
@@ -96,7 +101,7 @@
         continue
 
     obj = getattr(types, name)
-    if not isinstance(obj, collections.Callable):
+    if not isinstance(obj, Callable):
         continue
 
     register(name, obj)
diff --git a/poky/meta/lib/oe/package.py b/poky/meta/lib/oe/package.py
index 4f3e21a..efd36b3 100644
--- a/poky/meta/lib/oe/package.py
+++ b/poky/meta/lib/oe/package.py
@@ -1,15 +1,17 @@
+import stat
+import mmap
+import subprocess
+
 def runstrip(arg):
     # Function to strip a single file, called from split_and_strip_files below
     # A working 'file' (one which works on the target architecture)
     #
-    # The elftype is a bit pattern (explained in split_and_strip_files) to tell
+    # The elftype is a bit pattern (explained in is_elf below) to tell
     # us what type of file we're processing...
     # 4 - executable
     # 8 - shared library
     # 16 - kernel module
 
-    import stat, subprocess
-
     (file, elftype, strip) = arg
 
     newmode = None
@@ -19,11 +21,15 @@
         os.chmod(file, newmode)
 
     stripcmd = [strip]
-
+    skip_strip = False
     # kernel module    
     if elftype & 16:
-        stripcmd.extend(["--strip-debug", "--remove-section=.comment",
-            "--remove-section=.note", "--preserve-dates"])
+        if is_kernel_module_signed(file):
+            bb.debug(1, "Skip strip on signed module %s" % file)
+            skip_strip = True
+        else:
+            stripcmd.extend(["--strip-debug", "--remove-section=.comment",
+                "--remove-section=.note", "--preserve-dates"])
     # .so and shared library
     elif ".so" in file and elftype & 8:
         stripcmd.extend(["--remove-section=.comment", "--remove-section=.note", "--strip-unneeded"])
@@ -34,18 +40,59 @@
     stripcmd.append(file)
     bb.debug(1, "runstrip: %s" % stripcmd)
 
-    try:
+    if not skip_strip:
         output = subprocess.check_output(stripcmd, stderr=subprocess.STDOUT)
-    except subprocess.CalledProcessError as e:
-        bb.error("runstrip: '%s' strip command failed with %s (%s)" % (stripcmd, e.returncode, e.output))
 
     if newmode:
         os.chmod(file, origmode)
 
-    return
+# Detect .ko module by searching for "vermagic=" string
+def is_kernel_module(path):
+    with open(path) as f:
+        return mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ).find(b"vermagic=") >= 0
 
+# Detect if .ko module is signed
+def is_kernel_module_signed(path):
+    with open(path, "rb") as f:
+        f.seek(-28, 2)
+        module_tail = f.read()
+        return "Module signature appended" in "".join(chr(c) for c in bytearray(module_tail))
 
-def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, qa_already_stripped=False):
+# Return type (bits):
+# 0 - not elf
+# 1 - ELF
+# 2 - stripped
+# 4 - executable
+# 8 - shared library
+# 16 - kernel module
+def is_elf(path):
+    exec_type = 0
+    result = subprocess.check_output(["file", "-b", path], stderr=subprocess.STDOUT).decode("utf-8")
+
+    if "ELF" in result:
+        exec_type |= 1
+        if "not stripped" not in result:
+            exec_type |= 2
+        if "executable" in result:
+            exec_type |= 4
+        if "shared" in result:
+            exec_type |= 8
+        if "relocatable" in result:
+            if path.endswith(".ko") and path.find("/lib/modules/") != -1 and is_kernel_module(path):
+                exec_type |= 16
+    return (path, exec_type)
+
+def is_static_lib(path):
+    if path.endswith('.a') and not os.path.islink(path):
+        with open(path, 'rb') as fh:
+            # The magic must include the first slash to avoid
+            # matching golang static libraries
+            magic = b'!<arch>\x0a/'
+            start = fh.read(len(magic))
+            return start == magic
+    return False
+
+def strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d, qa_already_stripped=False):
     """
     Strip executable code (like executables, shared libraries) _in_place_
     - Based on sysroot_strip in staging.bbclass
@@ -56,39 +103,7 @@
     :param qa_already_stripped: Set to True if already-stripped' in ${INSANE_SKIP}
     This is for proper logging and messages only.
     """
-    import stat, errno, oe.path, oe.utils, mmap
-
-    # Detect .ko module by searching for "vermagic=" string
-    def is_kernel_module(path):
-        with open(path) as f:
-            return mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ).find(b"vermagic=") >= 0
-
-    # Return type (bits):
-    # 0 - not elf
-    # 1 - ELF
-    # 2 - stripped
-    # 4 - executable
-    # 8 - shared library
-    # 16 - kernel module
-    def is_elf(path):
-        exec_type = 0
-        ret, result = oe.utils.getstatusoutput("file -b '%s'" % path)
-
-        if ret:
-            bb.error("split_and_strip_files: 'file %s' failed" % path)
-            return exec_type
-
-        if "ELF" in result:
-            exec_type |= 1
-            if "not stripped" not in result:
-                exec_type |= 2
-            if "executable" in result:
-                exec_type |= 4
-            if "shared" in result:
-                exec_type |= 8
-            if "relocatable" in result and is_kernel_module(path):
-                exec_type |= 16
-        return exec_type
+    import stat, errno, oe.path, oe.utils
 
     elffiles = {}
     inodes = {}
@@ -98,6 +113,8 @@
     #
     # First lets figure out all of the files we may have to process
     #
+    checkelf = []
+    inodecache = {}
     for root, dirs, files in os.walk(dstdir):
         for f in files:
             file = os.path.join(root, f)
@@ -123,7 +140,11 @@
 
                 # It's a file (or hardlink), not a link
                 # ...but is it ELF, and is it already stripped?
-                elf_file = is_elf(file)
+                checkelf.append(file)
+                inodecache[file] = s.st_ino
+    results = oe.utils.multiprocess_launch(is_elf, checkelf, d)
+    for (file, elf_file) in results:
+                #elf_file = is_elf(file)
                 if elf_file & 1:
                     if elf_file & 2:
                         if qa_already_stripped:
@@ -132,13 +153,13 @@
                             bb.warn("File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dstdir):], pn))
                         continue
 
-                    if s.st_ino in inodes:
+                    if inodecache[file] in inodes:
                         os.unlink(file)
-                        os.link(inodes[s.st_ino], file)
+                        os.link(inodes[inodecache[file]], file)
                     else:
                         # break hardlinks so that we do not strip the original.
-                        inodes[s.st_ino] = file
-                        bb.utils.copyfile(file, file)
+                        inodes[inodecache[file]] = file
+                        bb.utils.break_hardlinks(file)
                         elffiles[file] = elf_file
 
     #
@@ -149,8 +170,7 @@
         elf_file = int(elffiles[file])
         sfiles.append((file, elf_file, strip_cmd))
 
-    oe.utils.multiprocess_exec(sfiles, runstrip)
-
+    oe.utils.multiprocess_launch(runstrip, sfiles, d)
 
 
 def file_translate(file):
diff --git a/poky/meta/lib/oe/package_manager.py b/poky/meta/lib/oe/package_manager.py
index 2d8aeba..882e7c4 100644
--- a/poky/meta/lib/oe/package_manager.py
+++ b/poky/meta/lib/oe/package_manager.py
@@ -3,7 +3,6 @@
 import glob
 import subprocess
 import shutil
-import multiprocessing
 import re
 import collections
 import bb
@@ -13,6 +12,7 @@
 import string
 from oe.gpg_sign import get_signer
 import hashlib
+import fnmatch
 
 # this can be used by all PM backends to create the index files in parallel
 def create_index(arg):
@@ -84,12 +84,55 @@
 
     return output
 
-# Note: this should be bb.fatal in the future.
-def failed_postinsts_warn(pkgs, log_path):
-    bb.warn("""Intentionally failing postinstall scriptlets of %s to defer them to first boot is deprecated. Please place them into pkg_postinst_ontarget_${PN} ().
-If deferring to first boot wasn't the intent, then scriptlet failure may mean an issue in the recipe, or a regression elsewhere.
+def failed_postinsts_abort(pkgs, log_path):
+    bb.fatal("""Postinstall scriptlets of %s have failed. If the intention is to defer them to first boot,
+then please place them into pkg_postinst_ontarget_${PN} ().
+Deferring to first boot via 'exit 1' is no longer supported.
 Details of the failure are in %s.""" %(pkgs, log_path))
 
+def generate_locale_archive(d, rootfs, target_arch, localedir):
+    # Pretty sure we don't need this for locale archive generation but
+    # keeping it to be safe...
+    locale_arch_options = { \
+        "arm": ["--uint32-align=4", "--little-endian"],
+        "armeb": ["--uint32-align=4", "--big-endian"],
+        "aarch64": ["--uint32-align=4", "--little-endian"],
+        "aarch64_be": ["--uint32-align=4", "--big-endian"],
+        "sh4": ["--uint32-align=4", "--big-endian"],
+        "powerpc": ["--uint32-align=4", "--big-endian"],
+        "powerpc64": ["--uint32-align=4", "--big-endian"],
+        "mips": ["--uint32-align=4", "--big-endian"],
+        "mipsisa32r6": ["--uint32-align=4", "--big-endian"],
+        "mips64": ["--uint32-align=4", "--big-endian"],
+        "mipsisa64r6": ["--uint32-align=4", "--big-endian"],
+        "mipsel": ["--uint32-align=4", "--little-endian"],
+        "mipsisa32r6el": ["--uint32-align=4", "--little-endian"],
+        "mips64el": ["--uint32-align=4", "--little-endian"],
+        "mipsisa64r6el": ["--uint32-align=4", "--little-endian"],
+        "riscv64": ["--uint32-align=4", "--little-endian"],
+        "riscv32": ["--uint32-align=4", "--little-endian"],
+        "i586": ["--uint32-align=4", "--little-endian"],
+        "i686": ["--uint32-align=4", "--little-endian"],
+        "x86_64": ["--uint32-align=4", "--little-endian"]
+    }
+    if target_arch in locale_arch_options:
+        arch_options = locale_arch_options[target_arch]
+    else:
+        bb.error("locale_arch_options not found for target_arch=" + target_arch)
+        bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
+
+    # Need to set this so cross-localedef knows where the archive is
+    env = dict(os.environ)
+    env["LOCALEARCHIVE"] = oe.path.join(localedir, "locale-archive")
+
+    for name in os.listdir(localedir):
+        path = os.path.join(localedir, name)
+        if os.path.isdir(path):
+            cmd = ["cross-localedef", "--verbose"]
+            cmd += arch_options
+            cmd += ["--add-to-archive", path]
+            subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT)
+
 class Indexer(object, metaclass=ABCMeta):
     def __init__(self, d, deploy_dir):
         self.d = d
@@ -177,7 +220,7 @@
             bb.note("There are no packages in %s!" % self.deploy_dir)
             return
 
-        oe.utils.multiprocess_exec(index_cmds, create_index)
+        oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
 
         if signer:
             feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
@@ -258,7 +301,7 @@
             bb.note("There are no packages in %s" % self.deploy_dir)
             return
 
-        oe.utils.multiprocess_exec(index_cmds, create_index)
+        oe.utils.multiprocess_launch(create_index, index_cmds, self.d)
         if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
             raise NotImplementedError('Package feed signing not implementd for dpkg')
 
@@ -336,17 +379,24 @@
 
     def _initialize_intercepts(self):
         bb.note("Initializing intercept dir for %s" % self.target_rootfs)
-        postinst_intercepts_dir = self.d.getVar("POSTINST_INTERCEPTS_DIR")
-        if not postinst_intercepts_dir:
-            postinst_intercepts_dir = self.d.expand("${COREBASE}/scripts/postinst-intercepts")
         # As there might be more than one instance of PackageManager operating at the same time
         # we need to isolate the intercept_scripts directories from each other,
         # hence the ugly hash digest in dir name.
-        self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'),
-                                      "intercept_scripts-%s" %(hashlib.sha256(self.target_rootfs.encode()).hexdigest()) )
+        self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'), "intercept_scripts-%s" %
+                                           (hashlib.sha256(self.target_rootfs.encode()).hexdigest()))
 
+        postinst_intercepts = (self.d.getVar("POSTINST_INTERCEPTS") or "").split()
+        if not postinst_intercepts:
+            postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_PATH")
+            if not postinst_intercepts_path:
+                postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_DIR") or self.d.expand("${COREBASE}/scripts/postinst-intercepts")
+            postinst_intercepts = oe.path.which_wild('*', postinst_intercepts_path)
+
+        bb.debug(1, 'Collected intercepts:\n%s' % ''.join('  %s\n' % i for i in postinst_intercepts))
         bb.utils.remove(self.intercepts_dir, True)
-        shutil.copytree(postinst_intercepts_dir, self.intercepts_dir)
+        bb.utils.mkdirhier(self.intercepts_dir)
+        for intercept in postinst_intercepts:
+            bb.utils.copyfile(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept)))
 
     @abstractmethod
     def _handle_intercept_failure(self, failed_script):
@@ -370,7 +420,7 @@
                 self._handle_intercept_failure(registered_pkgs)
 
 
-    def run_intercepts(self):
+    def run_intercepts(self, populate_sdk=None):
         intercepts_dir = self.intercepts_dir
 
         bb.note("Running intercept scripts:")
@@ -382,7 +432,8 @@
             if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
                 continue
 
-            if script == "delay_to_first_boot":
+            # we do not want to run any multilib variant of this
+            if script.startswith("delay_to_first_boot"):
                 self._postpone_to_first_boot(script_full)
                 continue
 
@@ -392,9 +443,22 @@
                 output = subprocess.check_output(script_full, stderr=subprocess.STDOUT)
                 if output: bb.note(output.decode("utf-8"))
             except subprocess.CalledProcessError as e:
-                bb.warn("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
                 bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
-                self._postpone_to_first_boot(script_full)
+                if populate_sdk == 'host':
+                    bb.warn("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+                elif populate_sdk == 'target':
+                    if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
+                        bb.warn("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
+                                % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+                    else:
+                        bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+                else:
+                    if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
+                        bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
+                                % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+                        self._postpone_to_first_boot(script_full)
+                    else:
+                        bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
 
     @abstractmethod
     def update(self):
@@ -523,6 +587,13 @@
                          "'%s' returned %d:\n%s" %
                          (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
 
+        target_arch = self.d.getVar('TARGET_ARCH')
+        localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale")
+        if os.path.exists(localedir) and os.listdir(localedir):
+            generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir)
+            # And now delete the binary locales
+            self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False)
+
     def deploy_dir_lock(self):
         if self.deploy_dir is None:
             raise RuntimeError("deploy_dir is not set!")
@@ -559,7 +630,7 @@
             return res
         return _append(uris, base_paths)
 
-def create_packages_dir(d, rpm_repo_dir, deploydir, taskname, filterbydependencies):
+def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencies):
     """
     Go through our do_package_write_X dependencies and hardlink the packages we depend
     upon into the repo directory. This prevents us seeing other packages that may
@@ -574,15 +645,13 @@
     seendirs = set()
     multilibs = {}
    
-    rpm_subrepo_dir = oe.path.join(rpm_repo_dir, "rpm")
-
-    bb.utils.remove(rpm_subrepo_dir, recurse=True)
-    bb.utils.mkdirhier(rpm_subrepo_dir)
+    bb.utils.remove(subrepo_dir, recurse=True)
+    bb.utils.mkdirhier(subrepo_dir)
 
     # Detect bitbake -b usage
     nodeps = d.getVar("BB_LIMITEDDEPS") or False
     if nodeps or not filterbydependencies:
-        oe.path.symlink(deploydir, rpm_subrepo_dir, True)
+        oe.path.symlink(deploydir, subrepo_dir, True)
         return
 
     start = None
@@ -593,24 +662,24 @@
             break
     if start is None:
         bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
-    rpmdeps = set()
+    pkgdeps = set()
     start = [start]
     seen = set(start)
-    # Support direct dependencies (do_rootfs -> rpms)
-    # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> rpms)
+    # Support direct dependencies (do_rootfs -> do_package_write_X)
+    # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> do_package_write_X)
     while start:
         next = []
         for dep2 in start:
             for dep in taskdepdata[dep2][3]:
                 if taskdepdata[dep][0] != pn:
                     if "do_" + taskname in dep:
-                        rpmdeps.add(dep)
+                        pkgdeps.add(dep)
                 elif dep not in seen:
                     next.append(dep)
                     seen.add(dep)
         start = next
 
-    for dep in rpmdeps:
+    for dep in pkgdeps:
         c = taskdepdata[dep][0]
         manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs)
         if not manifest:
@@ -620,8 +689,12 @@
         with open(manifest, "r") as f:
             for l in f:
                 l = l.strip()
-                dest = l.replace(deploydir, "")
-                dest = rpm_subrepo_dir + dest
+                deploydir = os.path.normpath(deploydir)
+                if bb.data.inherits_class('packagefeed-stability', d):
+                    dest = l.replace(deploydir + "-prediff", "")
+                else:
+                    dest = l.replace(deploydir, "")
+                dest = subrepo_dir + dest
                 if l.endswith("/"):
                     if dest not in seendirs:
                         bb.utils.mkdirhier(dest)
@@ -663,12 +736,12 @@
             self.primary_arch = self.d.getVar('MACHINE_ARCH')
 
         self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir)
-        create_packages_dir(self.d, self.rpm_repo_dir, d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies)
+        create_packages_dir(self.d, oe.path.join(self.rpm_repo_dir, "rpm"), d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies)
 
         self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name)
         if not os.path.exists(self.d.expand('${T}/saved_packaging_data')):
             bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data'))
-        self.packaging_data_dirs = ['var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf']
+        self.packaging_data_dirs = ['etc/rpm', 'etc/rpmrc', 'etc/dnf', 'var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf']
         self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
                                                self.task_name)
         if not os.path.exists(self.d.expand('${T}/saved')):
@@ -697,7 +770,9 @@
         rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/")
         bb.utils.mkdirhier(platformconfdir)
         open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch)
-        open(rpmrcconfdir + "rpmrc", 'w').write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
+        with open(rpmrcconfdir + "rpmrc", 'w') as f:
+            f.write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
+            f.write("buildarch_compat: %s: noarch\n" % self.primary_arch)
 
         open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n")
         if self.d.getVar('RPM_PREFER_ELF_ARCH'):
@@ -789,13 +864,12 @@
                 failed_scriptlets_pkgnames[line.split()[-1]] = True
 
         if len(failed_scriptlets_pkgnames) > 0:
-            failed_postinsts_warn(list(failed_scriptlets_pkgnames.keys()), self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
-        for pkg in failed_scriptlets_pkgnames.keys():
-            self.save_rpmpostinst(pkg)
+            failed_postinsts_abort(list(failed_scriptlets_pkgnames.keys()), self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
 
     def remove(self, pkgs, with_dependencies = True):
-        if len(pkgs) == 0:
+        if not pkgs:
             return
+
         self._prepare_pkg_transaction()
 
         if with_dependencies:
@@ -832,7 +906,10 @@
         for i in self.packaging_data_dirs:
             source_dir = oe.path.join(self.target_rootfs, i)
             target_dir = oe.path.join(self.saved_packaging_data, i)
-            shutil.copytree(source_dir, target_dir, symlinks=True)
+            if os.path.isdir(source_dir):
+                shutil.copytree(source_dir, target_dir, symlinks=True)
+            elif os.path.isfile(source_dir):
+                shutil.copy2(source_dir, target_dir)
 
     def recovery_packaging_data(self):
         # Move the rpmlib back
@@ -842,9 +919,10 @@
                 if os.path.exists(target_dir):
                     bb.utils.remove(target_dir, True)
                 source_dir = oe.path.join(self.saved_packaging_data, i)
-                shutil.copytree(source_dir,
-                            target_dir,
-                            symlinks=True)
+                if os.path.isdir(source_dir):
+                    shutil.copytree(source_dir, target_dir, symlinks=True)
+                elif os.path.isfile(source_dir):
+                    shutil.copy2(source_dir, target_dir)
 
     def list_installed(self):
         output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{name}-%{version}-%{release}.%{arch}.rpm\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"],
@@ -884,7 +962,7 @@
         os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs
 
         dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf")
-        standard_dnf_args = (["-v", "--rpmverbosity=debug"] if self.d.getVar('ROOTFS_RPM_DEBUG') else []) + ["-y",
+        standard_dnf_args = ["-v", "--rpmverbosity=debug", "-y",
                              "-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"),
                              "--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")),
                              "--repofrompath=oe-repo,%s" % (self.rpm_repo_dir),
@@ -896,7 +974,7 @@
         try:
             output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8")
             if print_output:
-                bb.note(output)
+                bb.debug(1, output)
             return output
         except subprocess.CalledProcessError as e:
             if print_output:
@@ -1060,19 +1138,22 @@
         self.mark_packages("unpacked", registered_pkgs.split())
 
 class OpkgPM(OpkgDpkgPM):
-    def __init__(self, d, target_rootfs, config_file, archs, task_name='target'):
+    def __init__(self, d, target_rootfs, config_file, archs, task_name='target', ipk_repo_workdir="oe-rootfs-repo", filterbydependencies=True, prepare_index=True):
         super(OpkgPM, self).__init__(d, target_rootfs)
 
         self.config_file = config_file
         self.pkg_archs = archs
         self.task_name = task_name
 
-        self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK")
+        self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), ipk_repo_workdir)
         self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
         self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
         self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs)
         self.opkg_args += self.d.getVar("OPKG_ARGS")
 
+        if prepare_index:
+            create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_IPK"), "package_write_ipk", filterbydependencies)
+
         opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
         if opkg_lib_dir[0] == "/":
             opkg_lib_dir = opkg_lib_dir[1:]
@@ -1245,7 +1326,11 @@
         if not pkgs:
             return
 
-        cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
+        cmd = "%s %s" % (self.opkg_cmd, self.opkg_args)
+        for exclude in (self.d.getVar("PACKAGE_EXCLUDE") or "").split():
+            cmd += " --add-exclude %s" % exclude
+        cmd += " install "
+        cmd += " ".join(pkgs)
 
         os.environ['D'] = self.target_rootfs
         os.environ['OFFLINE_ROOT'] = self.target_rootfs
@@ -1265,13 +1350,16 @@
                     bb.warn(line)
                     failed_pkgs.append(line.split(".")[0])
             if failed_pkgs:
-                failed_postinsts_warn(failed_pkgs, self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
+                failed_postinsts_abort(failed_pkgs, self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
         except subprocess.CalledProcessError as e:
             (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
                                               "Command '%s' returned %d:\n%s" %
                                               (cmd, e.returncode, e.output.decode("utf-8")))
 
     def remove(self, pkgs, with_dependencies=True):
+        if not pkgs:
+            return
+
         if with_dependencies:
             cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \
                 (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
@@ -1437,9 +1525,12 @@
         return tmp_dir
 
 class DpkgPM(OpkgDpkgPM):
-    def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None):
+    def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None, deb_repo_workdir="oe-rootfs-repo", filterbydependencies=True):
         super(DpkgPM, self).__init__(d, target_rootfs)
-        self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB')
+        self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), deb_repo_workdir)
+
+        create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_DEB"), "package_write_deb", filterbydependencies)
+
         if apt_conf_dir is None:
             self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
         else:
@@ -1515,7 +1606,6 @@
         os.environ['INTERCEPT_DIR'] = self.intercepts_dir
         os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
 
-        failed_pkgs = []
         for pkg_name in installed_pkgs:
             for control_script in control_scripts:
                 p_full = os.path.join(info_dir, pkg_name + control_script.suffix)
@@ -1530,12 +1620,7 @@
                         bb.warn("%s for package %s failed with %d:\n%s" %
                                 (control_script.name, pkg_name, e.returncode,
                                     e.output.decode("utf-8")))
-                        failed_postinsts_warn([pkg_name], self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
-                        failed_pkgs.append(pkg_name)
-                        break
-
-        if len(failed_pkgs):
-            self.mark_packages("unpacked", failed_pkgs)
+                        failed_postinsts_abort([pkg_name], self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
 
     def update(self):
         os.environ['APT_CONFIG'] = self.apt_conf_file
@@ -1585,6 +1670,9 @@
 
 
     def remove(self, pkgs, with_dependencies=True):
+        if not pkgs:
+            return
+
         if with_dependencies:
             os.environ['APT_CONFIG'] = self.apt_conf_file
             cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs))
diff --git a/poky/meta/lib/oe/patch.py b/poky/meta/lib/oe/patch.py
index af7aa52..e0f0604 100644
--- a/poky/meta/lib/oe/patch.py
+++ b/poky/meta/lib/oe/patch.py
@@ -21,6 +21,7 @@
 
 def runcmd(args, dir = None):
     import pipes
+    import subprocess
 
     if dir:
         olddir = os.path.abspath(os.curdir)
@@ -33,7 +34,7 @@
         args = [ pipes.quote(str(arg)) for arg in args ]
         cmd = " ".join(args)
         # print("cmd: %s" % cmd)
-        (exitstatus, output) = oe.utils.getstatusoutput(cmd)
+        (exitstatus, output) = subprocess.getstatusoutput(cmd)
         if exitstatus != 0:
             raise CmdError(cmd, exitstatus >> 8, output)
         if " fuzz " in output:
diff --git a/poky/meta/lib/oe/path.py b/poky/meta/lib/oe/path.py
index 76c58fa..1e24d05 100644
--- a/poky/meta/lib/oe/path.py
+++ b/poky/meta/lib/oe/path.py
@@ -86,7 +86,7 @@
     # This way we also preserve hardlinks between files in the tree.
 
     bb.utils.mkdirhier(dst)
-    cmd = "tar --xattrs --xattrs-include='*' -cf - -C %s -p . | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dst)
+    cmd = "tar --xattrs --xattrs-include='*' -cf - -S -C %s -p . | tar --xattrs --xattrs-include='*' -xf - -C %s" % (src, dst)
     subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
 
 def copyhardlinktree(src, dst):
@@ -98,7 +98,7 @@
     if (os.stat(src).st_dev ==  os.stat(dst).st_dev):
         # Need to copy directories only with tar first since cp will error if two 
         # writers try and create a directory at the same time
-        cmd = "cd %s; find . -type d -print | tar --xattrs --xattrs-include='*' -cf - -C %s -p --no-recursion --files-from - | tar --xattrs --xattrs-include='*' -xhf - -C %s" % (src, src, dst)
+        cmd = "cd %s; find . -type d -print | tar --xattrs --xattrs-include='*' -cf - -S -C %s -p --no-recursion --files-from - | tar --xattrs --xattrs-include='*' -xhf - -C %s" % (src, src, dst)
         subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
         source = ''
         if os.path.isdir(src):
@@ -259,3 +259,37 @@
         if not path_abs.startswith(possible_parent_abs):
             return False
     return True
+
+def which_wild(pathname, path=None, mode=os.F_OK, *, reverse=False, candidates=False):
+    """Search a search path for pathname, supporting wildcards.
+
+    Return all paths in the specific search path matching the wildcard pattern
+    in pathname, returning only the first encountered for each file. If
+    candidates is True, information on all potential candidate paths are
+    included.
+    """
+    paths = (path or os.environ.get('PATH', os.defpath)).split(':')
+    if reverse:
+        paths.reverse()
+
+    seen, files = set(), []
+    for index, element in enumerate(paths):
+        if not os.path.isabs(element):
+            element = os.path.abspath(element)
+
+        candidate = os.path.join(element, pathname)
+        globbed = glob.glob(candidate)
+        if globbed:
+            for found_path in sorted(globbed):
+                if not os.access(found_path, mode):
+                    continue
+                rel = os.path.relpath(found_path, element)
+                if rel not in seen:
+                    seen.add(rel)
+                    if candidates:
+                        files.append((found_path, [os.path.join(p, rel) for p in paths[:index+1]]))
+                    else:
+                        files.append(found_path)
+
+    return files
+
diff --git a/poky/meta/lib/oe/qa.py b/poky/meta/lib/oe/qa.py
index 3231e60..59c72ce 100644
--- a/poky/meta/lib/oe/qa.py
+++ b/poky/meta/lib/oe/qa.py
@@ -158,7 +158,8 @@
             0x2A: "SuperH",
             0x32: "IA-64",
             0x3E: "x86-64",
-            0xB7: "AArch64"
+            0xB7: "AArch64",
+            0xF7: "BPF"
         }[machine]
     except:
         return "Unknown (%s)" % repr(machine)
diff --git a/poky/meta/lib/oe/rootfs.py b/poky/meta/lib/oe/rootfs.py
index c156607..e5512d0 100644
--- a/poky/meta/lib/oe/rootfs.py
+++ b/poky/meta/lib/oe/rootfs.py
@@ -144,6 +144,20 @@
         bb.note("  Install complementary '*-dbg' packages...")
         self.pm.install_complementary('*-dbg')
 
+        if self.d.getVar('PACKAGE_DEBUG_SPLIT_STYLE') == 'debug-with-srcpkg':
+            bb.note("  Install complementary '*-src' packages...")
+            self.pm.install_complementary('*-src')
+
+        """
+        Install additional debug packages. Possibility to install additional packages,
+        which are not automatically installed as complementary package of
+        standard one, e.g. debug package of static libraries.
+        """
+        extra_debug_pkgs = self.d.getVar('IMAGE_INSTALL_DEBUGFS')
+        if extra_debug_pkgs:
+            bb.note("  Install extra debug packages...")
+            self.pm.install(extra_debug_pkgs.split(), True)
+
         bb.note("  Rename debug rootfs...")
         try:
             shutil.rmtree(self.image_rootfs + '-dbg')
@@ -472,7 +486,8 @@
         self._log_check_error()
 
     def _cleanup(self):
-        self.pm._invoke_dnf(["clean", "all"])
+        if bb.utils.contains("IMAGE_FEATURES", "package-management", True, False, self.d):
+            self.pm._invoke_dnf(["clean", "all"])
 
 
 class DpkgOpkgRootfs(Rootfs):
@@ -560,6 +575,9 @@
         return pkg_list
 
     def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
+        if bb.utils.contains("IMAGE_FEATURES", "package-management",
+                         True, False, self.d):
+            return
         num = 0
         for p in self._get_delayed_postinsts():
             bb.utils.mkdirhier(dst_postinst_dir)
@@ -782,7 +800,7 @@
             ml_opkg_conf = os.path.join(ml_temp,
                                         variant + "-" + os.path.basename(self.opkg_conf))
 
-            ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs)
+            ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs, prepare_index=False)
 
             ml_pm.update()
             ml_pm.install(pkgs)
diff --git a/poky/meta/lib/oe/sdk.py b/poky/meta/lib/oe/sdk.py
index d6a5033..153b07d 100644
--- a/poky/meta/lib/oe/sdk.py
+++ b/poky/meta/lib/oe/sdk.py
@@ -7,52 +7,6 @@
 import glob
 import traceback
 
-def generate_locale_archive(d, rootfs):
-    # Pretty sure we don't need this for SDK archive generation but
-    # keeping it to be safe...
-    target_arch = d.getVar('SDK_ARCH')
-    locale_arch_options = { \
-        "arm": ["--uint32-align=4", "--little-endian"],
-        "armeb": ["--uint32-align=4", "--big-endian"],
-        "aarch64": ["--uint32-align=4", "--little-endian"],
-        "aarch64_be": ["--uint32-align=4", "--big-endian"],
-        "sh4": ["--uint32-align=4", "--big-endian"],
-        "powerpc": ["--uint32-align=4", "--big-endian"],
-        "powerpc64": ["--uint32-align=4", "--big-endian"],
-        "mips": ["--uint32-align=4", "--big-endian"],
-        "mipsisa32r6": ["--uint32-align=4", "--big-endian"],
-        "mips64": ["--uint32-align=4", "--big-endian"],
-        "mipsisa64r6": ["--uint32-align=4", "--big-endian"],
-        "mipsel": ["--uint32-align=4", "--little-endian"],
-        "mipsisa32r6el": ["--uint32-align=4", "--little-endian"],
-        "mips64el": ["--uint32-align=4", "--little-endian"],
-        "mipsisa64r6el": ["--uint32-align=4", "--little-endian"],
-        "i586": ["--uint32-align=4", "--little-endian"],
-        "i686": ["--uint32-align=4", "--little-endian"],
-        "x86_64": ["--uint32-align=4", "--little-endian"]
-    }
-    if target_arch in locale_arch_options:
-        arch_options = locale_arch_options[target_arch]
-    else:
-        bb.error("locale_arch_options not found for target_arch=" + target_arch)
-        bb.fatal("unknown arch:" + target_arch + " for locale_arch_options")
-
-    localedir = oe.path.join(rootfs, d.getVar("libdir_nativesdk"), "locale")
-    # Need to set this so cross-localedef knows where the archive is
-    env = dict(os.environ)
-    env["LOCALEARCHIVE"] = oe.path.join(localedir, "locale-archive")
-
-    for name in os.listdir(localedir):
-        path = os.path.join(localedir, name)
-        if os.path.isdir(path):
-            try:
-                cmd = ["cross-localedef", "--verbose"]
-                cmd += arch_options
-                cmd += ["--add-to-archive", path]
-                subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT)
-            except Exception as e:
-                bb.fatal("Cannot create locale archive: %s" % e.output)
-
 class Sdk(object, metaclass=ABCMeta):
     def __init__(self, d, manifest_dir):
         self.d = d
@@ -144,7 +98,10 @@
                 for lang in linguas.split():
                     pm.install("nativesdk-glibc-binary-localedata-%s.utf-8" % lang)
             # Generate a locale archive of them
-            generate_locale_archive(self.d, oe.path.join(self.sdk_host_sysroot, self.sdk_native_path))
+            target_arch = self.d.getVar('SDK_ARCH')
+            rootfs = oe.path.join(self.sdk_host_sysroot, self.sdk_native_path)
+            localedir = oe.path.join(rootfs, self.d.getVar("libdir_nativesdk"), "locale")
+            generate_locale_archive(self.d, rootfs, target_arch, localedir)
             # And now delete the binary locales
             pkgs = fnmatch.filter(pm.list_installed(), "nativesdk-glibc-binary-localedata-*.utf-8")
             pm.remove(pkgs)
@@ -209,7 +166,7 @@
 
         self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
 
-        self.target_pm.run_intercepts()
+        self.target_pm.run_intercepts(populate_sdk='target')
 
         execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
 
@@ -220,7 +177,7 @@
         self._populate_sysroot(self.host_pm, self.host_manifest)
         self.install_locales(self.host_pm)
 
-        self.host_pm.run_intercepts()
+        self.host_pm.run_intercepts(populate_sdk='host')
 
         execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
 
@@ -270,11 +227,17 @@
         self.host_manifest = OpkgManifest(d, self.manifest_dir,
                                           Manifest.MANIFEST_TYPE_SDK_HOST)
 
+        ipk_repo_workdir = "oe-sdk-repo"
+        if "sdk_ext" in d.getVar("BB_RUNTASK"):
+            ipk_repo_workdir = "oe-sdk-ext-repo"
+
         self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf,
-                                self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"))
+                                self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"), 
+                                ipk_repo_workdir=ipk_repo_workdir)
 
         self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf,
-                              self.d.getVar("SDK_PACKAGE_ARCHS"))
+                              self.d.getVar("SDK_PACKAGE_ARCHS"),
+                                ipk_repo_workdir=ipk_repo_workdir)
 
     def _populate_sysroot(self, pm, manifest):
         pkgs_to_install = manifest.parse_initial_manifest()
@@ -297,7 +260,7 @@
 
         self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
 
-        self.target_pm.run_intercepts()
+        self.target_pm.run_intercepts(populate_sdk='target')
 
         execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
 
@@ -308,7 +271,7 @@
         self._populate_sysroot(self.host_pm, self.host_manifest)
         self.install_locales(self.host_pm)
 
-        self.host_pm.run_intercepts()
+        self.host_pm.run_intercepts(populate_sdk='host')
 
         execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
 
@@ -350,15 +313,21 @@
         self.host_manifest = DpkgManifest(d, self.manifest_dir,
                                           Manifest.MANIFEST_TYPE_SDK_HOST)
 
+        deb_repo_workdir = "oe-sdk-repo"
+        if "sdk_ext" in d.getVar("BB_RUNTASK"):
+            deb_repo_workdir = "oe-sdk-ext-repo"
+
         self.target_pm = DpkgPM(d, self.sdk_target_sysroot,
                                 self.d.getVar("PACKAGE_ARCHS"),
                                 self.d.getVar("DPKG_ARCH"),
-                                self.target_conf_dir)
+                                self.target_conf_dir,
+                                deb_repo_workdir=deb_repo_workdir)
 
         self.host_pm = DpkgPM(d, self.sdk_host_sysroot,
                               self.d.getVar("SDK_PACKAGE_ARCHS"),
                               self.d.getVar("DEB_SDK_ARCH"),
-                              self.host_conf_dir)
+                              self.host_conf_dir,
+                              deb_repo_workdir=deb_repo_workdir)
 
     def _copy_apt_dir_to(self, dst_dir):
         staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE")
@@ -386,7 +355,7 @@
 
         self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
 
-        self.target_pm.run_intercepts()
+        self.target_pm.run_intercepts(populate_sdk='target')
 
         execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
 
@@ -399,7 +368,7 @@
         self._populate_sysroot(self.host_pm, self.host_manifest)
         self.install_locales(self.host_pm)
 
-        self.host_pm.run_intercepts()
+        self.host_pm.run_intercepts(populate_sdk='host')
 
         execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
 
diff --git a/poky/meta/lib/oe/sstatesig.py b/poky/meta/lib/oe/sstatesig.py
index b82e0f4..18c5a35 100644
--- a/poky/meta/lib/oe/sstatesig.py
+++ b/poky/meta/lib/oe/sstatesig.py
@@ -45,7 +45,7 @@
 
     # allarch packagegroups are assumed to have well behaved names which don't change between architecures/tunes
     if isPackageGroup(fn) and isAllArch(fn) and not isNative(depname):
-        return False  
+        return False
 
     # Exclude well defined machine specific configurations which don't change ABI
     if depname in siggen.abisaferecipes and not isImage(fn):
@@ -150,16 +150,23 @@
         if recipename in self.unlockedrecipes:
             unlocked = True
         else:
+            def get_mc(tid):
+                tid = tid.rsplit('.', 1)[0]
+                if tid.startswith('multiconfig:'):
+                    elems = tid.split(':')
+                    return elems[1]
             def recipename_from_dep(dep):
                 # The dep entry will look something like
                 # /path/path/recipename.bb.task, virtual:native:/p/foo.bb.task,
                 # ...
+
                 fn = dep.rsplit('.', 1)[0]
                 return dataCache.pkg_fn[fn]
 
+            mc = get_mc(fn)
             # If any unlocked recipe is in the direct dependencies then the
             # current recipe should be unlocked as well.
-            depnames = [ recipename_from_dep(x) for x in deps ]
+            depnames = [ recipename_from_dep(x) for x in deps if mc == get_mc(x)]
             if any(x in y for y in depnames for x in self.unlockedrecipes):
                 self.unlockedrecipes[recipename] = ''
                 unlocked = True
@@ -372,8 +379,14 @@
 def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache):
     d2 = d
     variant = ''
+    curr_variant = ''
+    if d.getVar("BBEXTENDCURR") == "multilib":
+        curr_variant = d.getVar("BBEXTENDVARIANT")
+        if "virtclass-multilib" not in d.getVar("OVERRIDES"):
+            curr_variant = "invalid"
     if taskdata2.startswith("virtual:multilib"):
         variant = taskdata2.split(":")[2]
+    if curr_variant != variant:
         if variant not in multilibcache:
             multilibcache[variant] = oe.utils.get_multilib_datastore(variant, d)
         d2 = multilibcache[variant]
diff --git a/poky/meta/lib/oe/terminal.py b/poky/meta/lib/oe/terminal.py
index 94afe39..caeb5e3 100644
--- a/poky/meta/lib/oe/terminal.py
+++ b/poky/meta/lib/oe/terminal.py
@@ -112,7 +112,7 @@
             bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id,
                                               0.5, 10), d)
         else:
-            logger.warn(msg)
+            logger.warning(msg)
 
 class TmuxRunning(Terminal):
     """Open a new pane in the current running tmux window"""
@@ -168,7 +168,7 @@
         if d:
             bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d)
         else:
-            logger.warn(msg)
+            logger.warning(msg)
 
 class Custom(Terminal):
     command = 'false' # This is a placeholder
@@ -180,7 +180,7 @@
             if not '{command}' in self.command:
                 self.command += ' {command}'
             Terminal.__init__(self, sh_cmd, title, env, d)
-            logger.warn('Custom terminal was started.')
+            logger.warning('Custom terminal was started.')
         else:
             logger.debug(1, 'No custom terminal (OE_TERMINAL_CUSTOMCMD) set')
             raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set')
diff --git a/poky/meta/lib/oe/types.py b/poky/meta/lib/oe/types.py
index 4ae58ac..f401713 100644
--- a/poky/meta/lib/oe/types.py
+++ b/poky/meta/lib/oe/types.py
@@ -103,8 +103,13 @@
     """OpenEmbedded 'boolean' type
 
     Valid values for true: 'yes', 'y', 'true', 't', '1'
-    Valid values for false: 'no', 'n', 'false', 'f', '0'
+    Valid values for false: 'no', 'n', 'false', 'f', '0', None
     """
+    if value is None:
+        return False
+
+    if isinstance(value, bool):
+        return value
 
     if not isinstance(value, str):
         raise TypeError("boolean accepts a string, not '%s'" % type(value))
diff --git a/poky/meta/lib/oe/utils.py b/poky/meta/lib/oe/utils.py
index 80f0442..8a584d6 100644
--- a/poky/meta/lib/oe/utils.py
+++ b/poky/meta/lib/oe/utils.py
@@ -1,4 +1,6 @@
 import subprocess
+import multiprocessing
+import traceback
 
 def read_file(filename):
     try:
@@ -23,6 +25,13 @@
     else:
         return falsevalue
 
+def vartrue(var, iftrue, iffalse, d):
+    import oe.types
+    if oe.types.boolean(d.getVar(var)):
+        return iftrue
+    else:
+        return iffalse
+
 def less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
     if float(d.getVar(variable)) <= float(checkvalue):
         return truevalue
@@ -246,38 +255,73 @@
             bb.note("Executing %s ..." % cmd)
             bb.build.exec_func(cmd, d)
 
-def multiprocess_exec(commands, function):
-    import signal
-    import multiprocessing
+# For each item in items, call the function 'target' with item as the first 
+# argument, extraargs as the other arguments and handle any exceptions in the
+# parent thread
+def multiprocess_launch(target, items, d, extraargs=None):
 
-    if not commands:
-        return []
+    class ProcessLaunch(multiprocessing.Process):
+        def __init__(self, *args, **kwargs):
+            multiprocessing.Process.__init__(self, *args, **kwargs)
+            self._pconn, self._cconn = multiprocessing.Pipe()
+            self._exception = None
+            self._result = None
 
-    def init_worker():
-        signal.signal(signal.SIGINT, signal.SIG_IGN)
+        def run(self):
+            try:
+                ret = self._target(*self._args, **self._kwargs)
+                self._cconn.send((None, ret))
+            except Exception as e:
+                tb = traceback.format_exc()
+                self._cconn.send((e, tb))
 
-    fails = []
+        def update(self):
+            if self._pconn.poll():
+                (e, tb) = self._pconn.recv()
+                if e is not None:
+                    self._exception = (e, tb)
+                else:
+                    self._result = tb
 
-    def failures(res):
-        fails.append(res)
+        @property
+        def exception(self):
+            self.update()
+            return self._exception
 
-    nproc = min(multiprocessing.cpu_count(), len(commands))
-    pool = bb.utils.multiprocessingpool(nproc, init_worker)
+        @property
+        def result(self):
+            self.update()
+            return self._result
 
-    try:
-        mapresult = pool.map_async(function, commands, error_callback=failures)
-
-        pool.close()
-        pool.join()
-        results = mapresult.get()
-    except KeyboardInterrupt:
-        pool.terminate()
-        pool.join()
-        raise
-
-    if fails:
-        raise fails[0]
-
+    max_process = int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
+    launched = []
+    errors = []
+    results = []
+    items = list(items)
+    while (items and not errors) or launched:
+        if not errors and items and len(launched) < max_process:
+            args = (items.pop(),)
+            if extraargs is not None:
+                args = args + extraargs
+            p = ProcessLaunch(target=target, args=args)
+            p.start()
+            launched.append(p)
+        for q in launched:
+            # The finished processes are joined when calling is_alive()
+            if not q.is_alive():
+                if q.exception:
+                    errors.append(q.exception)
+                if q.result:
+                    results.append(q.result)
+                launched.remove(q)
+    # Paranoia doesn't hurt
+    for p in launched:
+        p.join()
+    if errors:
+        msg = ""
+        for (e, tb) in errors:
+            msg = msg + str(e) + ": " + str(tb) + "\n"
+        bb.fatal("Fatal errors occurred in subprocesses:\n%s" % msg)
     return results
 
 def squashspaces(string):
@@ -304,11 +348,20 @@
         for pkg in sorted(pkg_dict):
             output.append(pkg)
 
-    return '\n'.join(output)
+    output_str = '\n'.join(output)
 
-def host_gcc_version(d):
+    if output_str:
+        # make sure last line is newline terminated
+        output_str += '\n'
+
+    return output_str
+
+def host_gcc_version(d, taskcontextonly=False):
     import re, subprocess
 
+    if taskcontextonly and d.getVar('BB_WORKERCONTEXT') != '1':
+        return
+
     compiler = d.getVar("BUILD_CC")
     try:
         env = os.environ.copy()
@@ -327,9 +380,18 @@
 
 def get_multilib_datastore(variant, d):
     localdata = bb.data.createCopy(d)
-    overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant
-    localdata.setVar("OVERRIDES", overrides)
-    localdata.setVar("MLPREFIX", variant + "-")
+    if variant:
+        overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant
+        localdata.setVar("OVERRIDES", overrides)
+        localdata.setVar("MLPREFIX", variant + "-")
+    else:
+        origdefault = localdata.getVar("DEFAULTTUNE_MULTILIB_ORIGINAL")
+        if origdefault:
+            localdata.setVar("DEFAULTTUNE", origdefault)
+        overrides = localdata.getVar("OVERRIDES", False).split(":")
+        overrides = ":".join([x for x in overrides if not x.startswith("virtclass-multilib-")])
+        localdata.setVar("OVERRIDES", overrides)
+        localdata.setVar("MLPREFIX", "")
     return localdata
 
 #
@@ -419,3 +481,4 @@
             msg = msg + ' (%s)' % self.description
 
         return msg
+