reset upstream subtrees to yocto 2.6

Reset the following subtrees on thud HEAD:

  poky: 87e3a9739d
  meta-openembedded: 6094ae18c8
  meta-security: 31dc4e7532
  meta-raspberrypi: a48743dc36
  meta-xilinx: c42016e2e6

Also re-apply backports that didn't make it into thud:
  poky:
    17726d0 systemd-systemctl-native: handle Install wildcards

  meta-openembedded:
    4321a5d libtinyxml2: update to 7.0.1
    042f0a3 libcereal: Add native and nativesdk classes
    e23284f libcereal: Allow empty package
    030e8d4 rsyslog: curl-less build with fmhttp PACKAGECONFIG
    179a1b9 gtest: update to 1.8.1

Squashed OpenBMC subtree compatibility updates:
  meta-aspeed:
    Brad Bishop (1):
          aspeed: add yocto 2.6 compatibility

  meta-ibm:
    Brad Bishop (1):
          ibm: prepare for yocto 2.6

  meta-ingrasys:
    Brad Bishop (1):
          ingrasys: set layer compatibility to yocto 2.6

  meta-openpower:
    Brad Bishop (1):
          openpower: set layer compatibility to yocto 2.6

  meta-phosphor:
    Brad Bishop (3):
          phosphor: set layer compatibility to thud
          phosphor: libgpg-error: drop patches
          phosphor: react to fitimage artifact rename

    Ed Tanous (4):
          Dropbear: upgrade options for latest upgrade
          yocto2.6: update openssl options
          busybox: remove upstream watchdog patch
          systemd: Rebase CONFIG_CGROUP_BPF patch

Change-Id: I7b1fe71cca880d0372a82d94b5fd785323e3a9e7
Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
diff --git a/poky/meta/classes/allarch.bbclass b/poky/meta/classes/allarch.bbclass
index 51ba509..5bd5c44 100644
--- a/poky/meta/classes/allarch.bbclass
+++ b/poky/meta/classes/allarch.bbclass
@@ -2,7 +2,18 @@
 # This class is used for architecture independent recipes/data files (usually scripts)
 #
 
-PACKAGE_ARCH = "all"
+python allarch_package_arch_handler () {
+    if bb.data.inherits_class("native", d) or bb.data.inherits_class("nativesdk", d) \
+        or bb.data.inherits_class("crosssdk", d):
+        return
+
+    variants = d.getVar("MULTILIB_VARIANTS")
+    if not variants:
+        d.setVar("PACKAGE_ARCH", "all" )
+}
+
+addhandler allarch_package_arch_handler
+allarch_package_arch_handler[eventmask] = "bb.event.RecipePreFinalise"
 
 python () {
     # Allow this class to be included but overridden - only set
@@ -45,6 +56,7 @@
         # These multilib values shouldn't change allarch packages so exclude them
         d.appendVarFlag("emit_pkgdata", "vardepsexclude", " MULTILIB_VARIANTS")
         d.appendVarFlag("write_specfile", "vardepsexclude", " MULTILIBS")
+        d.appendVarFlag("do_package", "vardepsexclude", " package_do_shlibs")
     elif bb.data.inherits_class('packagegroup', d) and not bb.data.inherits_class('nativesdk', d):
         bb.error("Please ensure recipe %s sets PACKAGE_ARCH before inherit packagegroup" % d.getVar("FILE"))
 }
diff --git a/poky/meta/classes/archiver.bbclass b/poky/meta/classes/archiver.bbclass
index d68add9..0a9fe4f 100644
--- a/poky/meta/classes/archiver.bbclass
+++ b/poky/meta/classes/archiver.bbclass
@@ -98,9 +98,12 @@
 
         # There is a corner case with "gcc-source-${PV}" recipes, they don't have
         # the "do_configure" task, so we need to use "do_preconfigure"
-        if pn.startswith("gcc-source-"):
+        def hasTask(task):
+            return bool(d.getVarFlag(task, "task", False)) and not bool(d.getVarFlag(task, "noexec", False))
+
+        if hasTask("do_preconfigure"):
             d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_preconfigure' % pn)
-        else:
+        elif hasTask("do_configure"):
             d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_configure' % pn)
         d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_configured' % pn)
 
@@ -243,21 +246,26 @@
         # do_configure, we archive the already configured ${S} to
         # instead of.
         elif pn != 'libtool-native':
+            def runTask(task):
+                prefuncs = d.getVarFlag(task, 'prefuncs') or ''
+                for func in prefuncs.split():
+                    if func != "sysroot_cleansstate":
+                        bb.build.exec_func(func, d)
+                bb.build.exec_func(task, d)
+                postfuncs = d.getVarFlag(task, 'postfuncs') or ''
+                for func in postfuncs.split():
+                    if func != 'do_qa_configure':
+                        bb.build.exec_func(func, d)
+
             # Change the WORKDIR to make do_configure run in another dir.
             d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR'))
-            if bb.data.inherits_class('kernel-yocto', d):
-                bb.build.exec_func('do_kernel_configme', d)
-            if bb.data.inherits_class('cmake', d):
-                bb.build.exec_func('do_generate_toolchain_file', d)
-            prefuncs = d.getVarFlag('do_configure', 'prefuncs')
-            for func in (prefuncs or '').split():
-                if func != "sysroot_cleansstate":
-                    bb.build.exec_func(func, d)
-            bb.build.exec_func('do_configure', d)
-            postfuncs = d.getVarFlag('do_configure', 'postfuncs')
-            for func in (postfuncs or '').split():
-                if func != "do_qa_configure":
-                    bb.build.exec_func(func, d)
+
+            preceeds = bb.build.preceedtask('do_configure', False, d)
+            for task in preceeds:
+                if task != 'do_patch' and task != 'do_prepare_recipe_sysroot':
+                    runTask(task)
+            runTask('do_configure')
+
         srcdir = d.getVar('S')
         builddir = d.getVar('B')
         if srcdir != builddir:
diff --git a/poky/meta/classes/autotools.bbclass b/poky/meta/classes/autotools.bbclass
index cc857ac..8768a6a 100644
--- a/poky/meta/classes/autotools.bbclass
+++ b/poky/meta/classes/autotools.bbclass
@@ -225,7 +225,7 @@
 			find ${S} -ignore_readdir_race -name $i -delete
 		done
 
-		bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
+		bbnote Executing ACLOCAL=\"$ACLOCAL\" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths
 		ACLOCAL="$ACLOCAL" autoreconf -Wcross --verbose --install --force ${EXTRA_AUTORECONF} $acpaths || die "autoreconf execution failed."
 		cd $olddir
 	fi
@@ -237,7 +237,7 @@
 }
 
 autotools_do_compile() {
-    oe_runmake
+	oe_runmake
 }
 
 autotools_do_install() {
diff --git a/poky/meta/classes/base.bbclass b/poky/meta/classes/base.bbclass
index 2a6a6cb..bc9b236 100644
--- a/poky/meta/classes/base.bbclass
+++ b/poky/meta/classes/base.bbclass
@@ -128,6 +128,12 @@
                 os.symlink(srctool, desttool)
             else:
                 notfound.append(tool)
+    # Force "python" -> "python2"
+    desttool = os.path.join(dest, "python")
+    if not os.path.exists(desttool):
+        srctool = "python2"
+        os.symlink(srctool, desttool)
+
     if notfound and fatal:
         bb.fatal("The following required tools (as specified by HOSTTOOLS) appear to be unavailable in PATH, please install them in order to proceed:\n  %s" % " ".join(notfound))
 
@@ -152,7 +158,7 @@
 addtask unpack after do_fetch
 do_unpack[dirs] = "${WORKDIR}"
 
-do_unpack[cleandirs] = "${@d.getVar('S') if d.getVar('S') != d.getVar('WORKDIR') else os.path.join('${S}', 'patches')}"
+do_unpack[cleandirs] = "${@d.getVar('S') if os.path.normpath(d.getVar('S')) != os.path.normpath(d.getVar('WORKDIR')) else os.path.join('${S}', 'patches')}"
 
 python base_do_unpack() {
     src_uri = (d.getVar('SRC_URI') or "").split()
@@ -522,19 +528,18 @@
             incompatwl = []
             for lic in bad_licenses:
                 spdx_license = return_spdx(d, lic)
-                for w in ["LGPLv2_WHITELIST_", "WHITELIST_"]:
-                    whitelist.extend((d.getVar(w + lic) or "").split())
-                    if spdx_license:
-                        whitelist.extend((d.getVar(w + spdx_license) or "").split())
-                    '''
-                    We need to track what we are whitelisting and why. If pn is
-                    incompatible we need to be able to note that the image that
-                    is created may infact contain incompatible licenses despite
-                    INCOMPATIBLE_LICENSE being set.
-                    '''
-                    incompatwl.extend((d.getVar(w + lic) or "").split())
-                    if spdx_license:
-                        incompatwl.extend((d.getVar(w + spdx_license) or "").split())
+                whitelist.extend((d.getVar("WHITELIST_" + lic) or "").split())
+                if spdx_license:
+                    whitelist.extend((d.getVar("WHITELIST_" + spdx_license) or "").split())
+                '''
+                We need to track what we are whitelisting and why. If pn is
+                incompatible we need to be able to note that the image that
+                is created may infact contain incompatible licenses despite
+                INCOMPATIBLE_LICENSE being set.
+                '''
+                incompatwl.extend((d.getVar("WHITELIST_" + lic) or "").split())
+                if spdx_license:
+                    incompatwl.extend((d.getVar("WHITELIST_" + spdx_license) or "").split())
 
             if not pn in whitelist:
                 pkgs = d.getVar('PACKAGES').split()
@@ -624,7 +629,7 @@
         elif path.endswith('.zip') or path.endswith('.jar'):
             d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
 
-        # file is needed by rpm2cpio.sh
+        # Some rpm files may be compressed internally using xz (for example, rpms from Fedora)
         elif path.endswith('.rpm'):
             d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
 
diff --git a/poky/meta/classes/buildhistory.bbclass b/poky/meta/classes/buildhistory.bbclass
index 63980f7..40b292b 100644
--- a/poky/meta/classes/buildhistory.bbclass
+++ b/poky/meta/classes/buildhistory.bbclass
@@ -77,6 +77,7 @@
 
     import re
     import json
+    import shlex
     import errno
 
     pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE')
@@ -244,7 +245,7 @@
                 key = item[0]
                 if key.endswith('_' + pkg):
                     key = key[:-len(pkg)-1]
-                pkgdata[key] = item[1]
+                pkgdata[key] = item[1].encode('latin-1').decode('unicode_escape')
 
         pkge = pkgdata.get('PKGE', '0')
         pkgv = pkgdata['PKGV']
@@ -287,7 +288,7 @@
         dictval = json.loads(val)
         filelist = list(dictval.keys())
         filelist.sort()
-        pkginfo.filelist = " ".join(filelist)
+        pkginfo.filelist = " ".join([shlex.quote(x) for x in filelist])
 
         pkginfo.size = int(pkgdata['PKGSIZE'])
 
diff --git a/poky/meta/classes/clutter.bbclass b/poky/meta/classes/clutter.bbclass
index f5cd04f..8550363 100644
--- a/poky/meta/classes/clutter.bbclass
+++ b/poky/meta/classes/clutter.bbclass
@@ -1,4 +1,3 @@
-
 def get_minor_dir(v):
     import re
     m = re.match("^([0-9]+)\.([0-9]+)", v)
@@ -12,11 +11,7 @@
 VERMINOR = "${@get_minor_dir("${PV}")}"
 REALNAME = "${@get_real_name("${BPN}")}"
 
-CLUTTER_SRC_FTP = "${GNOME_MIRROR}/${REALNAME}/${VERMINOR}/${REALNAME}-${PV}.tar.xz;name=archive"
-
-CLUTTER_SRC_GIT = "git://gitlab.gnome.org/GNOME/${REALNAME};protocol=https"
-
-SRC_URI = "${CLUTTER_SRC_FTP}"
+SRC_URI = "${GNOME_MIRROR}/${REALNAME}/${VERMINOR}/${REALNAME}-${PV}.tar.xz;name=archive"
 S = "${WORKDIR}/${REALNAME}-${PV}"
 
 inherit autotools pkgconfig gtk-doc gettext
diff --git a/poky/meta/classes/cmake.bbclass b/poky/meta/classes/cmake.bbclass
index fcfd5dd..fd40a98 100644
--- a/poky/meta/classes/cmake.bbclass
+++ b/poky/meta/classes/cmake.bbclass
@@ -54,8 +54,6 @@
 OECMAKE_TARGET_COMPILE ?= "all"
 OECMAKE_TARGET_INSTALL ?= "install"
 
-FILES_${PN}-dev += "${libdir}/cmake ${datadir}/cmake"
-
 # CMake expects target architectures in the format of uname(2),
 # which do not always match TARGET_ARCH, so all the necessary
 # conversions should happen here.
diff --git a/poky/meta/classes/compress_doc.bbclass b/poky/meta/classes/compress_doc.bbclass
index 069d534..45bb8ff 100644
--- a/poky/meta/classes/compress_doc.bbclass
+++ b/poky/meta/classes/compress_doc.bbclass
@@ -79,6 +79,7 @@
     return hardlink_dict
 
 def _process_hardlink(hardlink_dict, compress_mode, shell_cmds, decompress=False):
+    import subprocess
     for target in hardlink_dict:
         if decompress:
             compress_format = _get_compress_format(target, shell_cmds.keys())
@@ -87,7 +88,7 @@
         else:
             cmd = "%s -f %s" % (shell_cmds[compress_mode], target)
             bb.note('compress hardlink %s' % target)
-        (retval, output) = oe.utils.getstatusoutput(cmd)
+        (retval, output) = subprocess.getstatusoutput(cmd)
         if retval:
             bb.warn("de/compress file failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
             return
@@ -176,7 +177,7 @@
                 # Normal file
                 elif os.path.isfile(file):
                     cmd = "%s %s" % (compress_cmds[compress_mode], file)
-                    (retval, output) = oe.utils.getstatusoutput(cmd)
+                    (retval, output) = subprocess.getstatusoutput(cmd)
                     if retval:
                         bb.warn("compress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
                         continue
@@ -206,7 +207,7 @@
                 # Normal file
                 elif os.path.isfile(file):
                     cmd = "%s %s" % (decompress_cmds[compress_format], file)
-                    (retval, output) = oe.utils.getstatusoutput(cmd)
+                    (retval, output) = subprocess.getstatusoutput(cmd)
                     if retval:
                         bb.warn("decompress failed %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
                         continue
diff --git a/poky/meta/classes/cpan.bbclass b/poky/meta/classes/cpan.bbclass
index 8e079e0..a5bc301 100644
--- a/poky/meta/classes/cpan.bbclass
+++ b/poky/meta/classes/cpan.bbclass
@@ -16,8 +16,7 @@
 export PERLHOSTLIB = "${STAGING_LIBDIR_NATIVE}/perl-native/perl/${@get_perl_version(d)}/"
 
 cpan_do_configure () {
-	export PERL5LIB="${PERL_ARCHLIB}"
-	yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL INSTALLDIRS=vendor ${EXTRA_CPANFLAGS}
+	yes '' | perl ${EXTRA_PERLFLAGS} Makefile.PL INSTALLDIRS=vendor NO_PERLLOCAL=1 NO_PACKLIST=1 ${EXTRA_CPANFLAGS}
 
 	# Makefile.PLs can exit with success without generating a
 	# Makefile, e.g. in cases of missing configure time
@@ -41,6 +40,16 @@
 	fi
 }
 
+do_configure_append_class-target() {
+       find . -name Makefile | xargs sed -E -i \
+           -e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
+}
+
+do_configure_append_class-nativesdk() {
+       find . -name Makefile | xargs sed -E -i \
+           -e 's:LD_RUN_PATH ?= ?"?[^"]*"?::g'
+}
+
 cpan_do_compile () {
 	oe_runmake PASTHRU_INC="${CFLAGS}" LD="${CCLD}"
 }
diff --git a/poky/meta/classes/cpan_build.bbclass b/poky/meta/classes/cpan_build.bbclass
index fac074d..9a2ad89 100644
--- a/poky/meta/classes/cpan_build.bbclass
+++ b/poky/meta/classes/cpan_build.bbclass
@@ -30,7 +30,7 @@
 }
 
 cpan_build_do_compile () {
-        perl Build verbose=1
+        perl Build --perl "${bindir}/perl" verbose=1
 }
 
 cpan_build_do_install () {
diff --git a/poky/meta/classes/cross-canadian.bbclass b/poky/meta/classes/cross-canadian.bbclass
index ee8aa61..acde331 100644
--- a/poky/meta/classes/cross-canadian.bbclass
+++ b/poky/meta/classes/cross-canadian.bbclass
@@ -30,7 +30,7 @@
     if d.getVar("MODIFYTOS") != "1":
         return
 
-    if d.getVar("TCLIBC") == "baremetal":
+    if d.getVar("TCLIBC") in [ 'baremetal', 'newlib' ]:
         return
 
     tos = d.getVar("TARGET_OS")
diff --git a/poky/meta/classes/cross.bbclass b/poky/meta/classes/cross.bbclass
index 4e85cab..34d7951 100644
--- a/poky/meta/classes/cross.bbclass
+++ b/poky/meta/classes/cross.bbclass
@@ -37,7 +37,6 @@
 CFLAGS = "${BUILD_CFLAGS}"
 CXXFLAGS = "${BUILD_CFLAGS}"
 LDFLAGS = "${BUILD_LDFLAGS}"
-LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE}"
 
 TOOLCHAIN_OPTIONS = ""
 
diff --git a/poky/meta/classes/devicetree.bbclass b/poky/meta/classes/devicetree.bbclass
new file mode 100644
index 0000000..8fe5a5e
--- /dev/null
+++ b/poky/meta/classes/devicetree.bbclass
@@ -0,0 +1,143 @@
+# This bbclass implements device tree compliation for user provided device tree
+# sources. The compilation of the device tree sources is the same as the kernel
+# device tree compilation process, this includes being able to include sources
+# from the kernel such as soc dtsi files or header files such as gpio.h. In
+# addition to device trees this bbclass also handles compilation of device tree
+# overlays.
+#
+# The output of this class behaves similar to how kernel-devicetree.bbclass
+# operates in that the output files are installed into /boot/devicetree.
+# However this class on purpose separates the deployed device trees into the
+# 'devicetree' subdirectory. This prevents clashes with the kernel-devicetree
+# output. Additionally the device trees are populated into the sysroot for
+# access via the sysroot from within other recipes.
+
+SECTION ?= "bsp"
+
+# The default inclusion of kernel device tree includes and headers means that
+# device trees built with them are at least GPLv2 (and in some cases dual
+# licensed). Default to GPLv2 if the recipe does not specify a license.
+LICENSE ?= "GPLv2"
+LIC_FILES_CHKSUM ?= "file://${COMMON_LICENSE_DIR}/GPL-2.0;md5=801f80980d171dd6425610833a22dbe6"
+
+INHIBIT_DEFAULT_DEPS = "1"
+DEPENDS += "dtc-native"
+
+inherit deploy kernel-arch
+
+COMPATIBLE_MACHINE ?= "^$"
+
+PACKAGE_ARCH = "${MACHINE_ARCH}"
+
+SYSROOT_DIRS += "/boot/devicetree"
+FILES_${PN} = "/boot/devicetree/*.dtb /boot/devicetree/*.dtbo"
+
+S = "${WORKDIR}"
+B = "${WORKDIR}/build"
+
+# Default kernel includes, these represent what are normally used for in-kernel
+# sources.
+KERNEL_INCLUDE ??= " \
+        ${STAGING_KERNEL_DIR}/arch/${ARCH}/boot/dts \
+        ${STAGING_KERNEL_DIR}/arch/${ARCH}/boot/dts/* \
+        ${STAGING_KERNEL_DIR}/scripts/dtc/include-prefixes \
+        "
+
+DT_INCLUDE[doc] = "Search paths to be made available to both the device tree compiler and preprocessor for inclusion."
+DT_INCLUDE ?= "${DT_FILES_PATH} ${KERNEL_INCLUDE}"
+DT_FILES_PATH[doc] = "Defaults to source directory, can be used to select dts files that are not in source (e.g. generated)."
+DT_FILES_PATH ?= "${S}"
+
+DT_PADDING_SIZE[doc] = "Size of padding on the device tree blob, used as extra space typically for additional properties during boot."
+DT_PADDING_SIZE ??= "0x3000"
+DT_RESERVED_MAP[doc] = "Number of reserved map entires."
+DT_RESERVED_MAP ??= "8"
+DT_BOOT_CPU[doc] = "The boot cpu, defaults to 0"
+DT_BOOT_CPU ??= "0"
+
+DTC_FLAGS ?= "-R ${DT_RESERVED_MAP} -b ${DT_BOOT_CPU}"
+DTC_PPFLAGS ?= "-nostdinc -undef -D__DTS__ -x assembler-with-cpp"
+DTC_BFLAGS ?= "-p ${DT_PADDING_SIZE}"
+DTC_OFLAGS ?= "-p 0 -@ -H epapr"
+
+python () {
+    if d.getVar("KERNEL_INCLUDE"):
+        # auto add dependency on kernel tree, but only if kernel include paths
+        # are specified.
+        d.appendVarFlag("do_compile", "depends", " virtual/kernel:do_configure")
+}
+
+def expand_includes(varname, d):
+    import glob
+    includes = set()
+    # expand all includes with glob
+    for i in (d.getVar(varname) or "").split():
+        for g in glob.glob(i):
+            if os.path.isdir(g): # only add directories to include path
+                includes.add(g)
+    return includes
+
+def devicetree_source_is_overlay(path):
+    # determine if a dts file is an overlay by checking if it uses "/plugin/;"
+    with open(path, "r") as f:
+        for i in f:
+            if i.startswith("/plugin/;"):
+                return True
+    return False
+
+def devicetree_compile(dtspath, includes, d):
+    import subprocess
+    dts = os.path.basename(dtspath)
+    dtname = os.path.splitext(dts)[0]
+    bb.note("Processing {0} [{1}]".format(dtname, dts))
+
+    # preprocess
+    ppargs = d.getVar("BUILD_CPP").split()
+    ppargs += (d.getVar("DTC_PPFLAGS") or "").split()
+    for i in includes:
+        ppargs.append("-I{0}".format(i))
+    ppargs += ["-o", "{0}.pp".format(dts), dtspath]
+    bb.note("Running {0}".format(" ".join(ppargs)))
+    subprocess.run(ppargs, check = True)
+
+    # determine if the file is an overlay or not (using the preprocessed file)
+    isoverlay = devicetree_source_is_overlay("{0}.pp".format(dts))
+
+    # compile
+    dtcargs = ["dtc"] + (d.getVar("DTC_FLAGS") or "").split()
+    if isoverlay:
+        dtcargs += (d.getVar("DTC_OFLAGS") or "").split()
+    else:
+        dtcargs += (d.getVar("DTC_BFLAGS") or "").split()
+    for i in includes:
+        dtcargs += ["-i", i]
+    dtcargs += ["-o", "{0}.{1}".format(dtname, "dtbo" if isoverlay else "dtb")]
+    dtcargs += ["-I", "dts", "-O", "dtb", "{0}.pp".format(dts)]
+    bb.note("Running {0}".format(" ".join(dtcargs)))
+    subprocess.run(dtcargs, check = True)
+
+python devicetree_do_compile() {
+    includes = expand_includes("DT_INCLUDE", d)
+    listpath = d.getVar("DT_FILES_PATH")
+    for dts in os.listdir(listpath):
+        if not dts.endswith(".dts"):
+            continue # skip non-.dts files
+        dtspath = os.path.join(listpath, dts)
+        devicetree_compile(dtspath, includes, d)
+}
+
+devicetree_do_install() {
+    for DTB_FILE in `ls *.dtb *.dtbo`; do
+        install -Dm 0644 ${B}/${DTB_FILE} ${D}/boot/devicetree/${DTB_FILE}
+    done
+}
+
+devicetree_do_deploy() {
+    for DTB_FILE in `ls *.dtb *.dtbo`; do
+        install -Dm 0644 ${B}/${DTB_FILE} ${DEPLOYDIR}/devicetree/${DTB_FILE}
+    done
+}
+addtask deploy before do_build after do_install
+
+EXPORT_FUNCTIONS do_compile do_install do_deploy
+
diff --git a/poky/meta/classes/devtool-source.bbclass b/poky/meta/classes/devtool-source.bbclass
index 56882a4..1372e32 100644
--- a/poky/meta/classes/devtool-source.bbclass
+++ b/poky/meta/classes/devtool-source.bbclass
@@ -90,11 +90,23 @@
                         fname in files])
         return ret
 
+    is_kernel_yocto = bb.data.inherits_class('kernel-yocto', d)
     # Move local source files into separate subdir
     recipe_patches = [os.path.basename(patch) for patch in
                         oe.recipeutils.get_recipe_patches(d)]
     local_files = oe.recipeutils.get_recipe_local_files(d)
 
+    if is_kernel_yocto:
+        for key in local_files.copy():
+            if key.endswith('scc'):
+                sccfile = open(local_files[key], 'r')
+                for l in sccfile:
+                    line = l.split()
+                    if line and line[0] in ('kconf', 'patch'):
+                        local_files[line[-1]] = os.path.join(os.path.dirname(local_files[key]), line[-1])
+                        shutil.copy2(os.path.join(os.path.dirname(local_files[key]), line[-1]), workdir)
+                sccfile.close()
+
     # Ignore local files with subdir={BP}
     srcabspath = os.path.abspath(srcsubdir)
     local_files = [fname for fname in local_files if
@@ -171,14 +183,14 @@
 
     extra_overrides = d.getVar('DEVTOOL_EXTRA_OVERRIDES')
     if extra_overrides:
-        extra_override_list = extra_overrides.split(':')
+        extra_overrides = set(extra_overrides.split(':'))
         devbranch = d.getVar('DEVTOOL_DEVBRANCH')
         default_overrides = d.getVar('OVERRIDES').split(':')
         no_overrides = []
         # First, we may have some overrides that are referred to in the recipe set in
         # our configuration, so we need to make a branch that excludes those
         for override in default_overrides:
-            if override not in extra_override_list:
+            if override not in extra_overrides:
                 no_overrides.append(override)
         if default_overrides != no_overrides:
             # Some overrides are active in the current configuration, so
@@ -196,7 +208,7 @@
         else:
             bb.process.run('git checkout %s -b devtool-no-overrides' % devbranch, cwd=srcsubdir)
 
-        for override in extra_override_list:
+        for override in extra_overrides:
             localdata = bb.data.createCopy(d)
             if override in default_overrides:
                 bb.process.run('git branch devtool-override-%s %s' % (override, devbranch), cwd=srcsubdir)
diff --git a/poky/meta/classes/distutils-common-base.bbclass b/poky/meta/classes/distutils-common-base.bbclass
index 824a1b6..94b5fd4 100644
--- a/poky/meta/classes/distutils-common-base.bbclass
+++ b/poky/meta/classes/distutils-common-base.bbclass
@@ -1,6 +1,18 @@
 export STAGING_INCDIR
 export STAGING_LIBDIR
 
+# LDSHARED is the ld *command* used to create shared library
+export LDSHARED  = "${CCLD} -shared"
+# LDXXSHARED is the ld *command* used to create shared library of C++
+# objects
+export LDCXXSHARED  = "${CXX} -shared"
+# CCSHARED are the C *flags* used to create objects to go into a shared
+# library (module)
+export CCSHARED  = "-fPIC -DPIC"
+# LINKFORSHARED are the flags passed to the $(CC) command that links
+# the python executable
+export LINKFORSHARED = "{SECURITY_CFLAGS} -Xlinker -export-dynamic"
+
 FILES_${PN} += "${libdir}/* ${libdir}/${PYTHON_DIR}/*"
 
 FILES_${PN}-staticdev += "\
diff --git a/poky/meta/classes/distutils.bbclass b/poky/meta/classes/distutils.bbclass
index 1930c35..9862731 100644
--- a/poky/meta/classes/distutils.bbclass
+++ b/poky/meta/classes/distutils.bbclass
@@ -4,20 +4,30 @@
 DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
 DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
     --install-data=${STAGING_DATADIR}"
-DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
-    --install-data=${D}/${datadir}"
+DISTUTILS_INSTALL_ARGS ?= "--root=${D} \
+    --prefix=${prefix} \
+    --install-lib=${PYTHON_SITEPACKAGES_DIR} \
+    --install-data=${datadir}"
+
+distutils_do_configure() {
+        if [ "${CLEANBROKEN}" != "1" ] ; then
+                NO_FETCH_BUILD=1 \
+                ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py clean ${DISTUTILS_BUILD_ARGS}
+        fi
+}
 
 distutils_do_compile() {
+         NO_FETCH_BUILD=1 \
          STAGING_INCDIR=${STAGING_INCDIR} \
          STAGING_LIBDIR=${STAGING_LIBDIR} \
          ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \
-         bbfatal_log "${PYTHON_PN} setup.py build execution failed."
+         bbfatal_log "'${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS}' execution failed."
 }
 
 distutils_stage_headers() {
         install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
         ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
-        bbfatal_log "${PYTHON_PN} setup.py install_headers execution failed."
+        bbfatal_log "'${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS}' execution for stage_headers failed."
 }
 
 distutils_stage_all() {
@@ -26,7 +36,7 @@
         install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
         PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
         ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
-        bbfatal_log "${PYTHON_PN} setup.py install (stage) execution failed."
+        bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS}' execution for stage_all failed."
 }
 
 distutils_do_install() {
@@ -34,8 +44,8 @@
         STAGING_INCDIR=${STAGING_INCDIR} \
         STAGING_LIBDIR=${STAGING_LIBDIR} \
         PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
-        ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
-        bbfatal_log "${PYTHON_PN} setup.py install execution failed."
+        ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS} || \
+        bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS}' execution failed."
 
         # support filenames with *spaces*
         # only modify file if it contains path  and recompile it
@@ -77,6 +87,6 @@
 	fi
 }
 
-EXPORT_FUNCTIONS do_compile do_install
+EXPORT_FUNCTIONS do_configure do_compile do_install
 
 export LDSHARED="${CCLD} -shared"
diff --git a/poky/meta/classes/distutils3.bbclass b/poky/meta/classes/distutils3.bbclass
index 6c30306..834e322 100644
--- a/poky/meta/classes/distutils3.bbclass
+++ b/poky/meta/classes/distutils3.bbclass
@@ -5,22 +5,32 @@
 DISTUTILS_STAGE_HEADERS_ARGS ?= "--install-dir=${STAGING_INCDIR}/${PYTHON_DIR}"
 DISTUTILS_STAGE_ALL_ARGS ?= "--prefix=${STAGING_DIR_HOST}${prefix} \
     --install-data=${STAGING_DATADIR}"
-DISTUTILS_INSTALL_ARGS ?= "--prefix=${D}/${prefix} \
-    --install-data=${D}/${datadir}"
+DISTUTILS_INSTALL_ARGS ?= "--root=${D} \
+    --prefix=${prefix} \
+    --install-lib=${PYTHON_SITEPACKAGES_DIR} \
+    --install-data=${datadir}"
+
+distutils3_do_configure() {
+	if [ "${CLEANBROKEN}" != "1" ] ; then
+		NO_FETCH_BUILD=1 \
+		${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py clean ${DISTUTILS_BUILD_ARGS}
+	fi
+}
 
 distutils3_do_compile() {
+        NO_FETCH_BUILD=1 \
         STAGING_INCDIR=${STAGING_INCDIR} \
         STAGING_LIBDIR=${STAGING_LIBDIR} \
         ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
         build ${DISTUTILS_BUILD_ARGS} || \
-        bbfatal_log "${PYTHON_PN} setup.py build_ext execution failed."
+        bbfatal_log "'${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS}' execution failed."
 }
 distutils3_do_compile[vardepsexclude] = "MACHINE"
 
 distutils3_stage_headers() {
         install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
         ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
-        bbfatal_log "${PYTHON_PN} setup.py install_headers execution failed."
+        bbfatal_log "'${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS}' execution for stage_headers failed."
 }
 distutils3_stage_headers[vardepsexclude] = "MACHINE"
 
@@ -30,7 +40,7 @@
         install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
         PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
         ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
-        bbfatal_log "${PYTHON_PN} setup.py install (stage) execution failed."
+        bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS}' execution for stage_all failed."
 }
 distutils3_stage_all[vardepsexclude] = "MACHINE"
 
@@ -39,8 +49,8 @@
         STAGING_INCDIR=${STAGING_INCDIR} \
         STAGING_LIBDIR=${STAGING_LIBDIR} \
         PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
-        ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
-        bbfatal_log "${PYTHON_PN} setup.py install execution failed."
+        ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS} || \
+        bbfatal_log "'${PYTHON_PN} setup.py install ${DISTUTILS_INSTALL_ARGS}' execution failed."
 
         # support filenames with *spaces*
         find ${D} -name "*.py" -exec grep -q ${D} {} \; -exec sed -i -e s:${D}::g {} \;
@@ -71,6 +81,6 @@
 }
 distutils3_do_install[vardepsexclude] = "MACHINE"
 
-EXPORT_FUNCTIONS do_compile do_install
+EXPORT_FUNCTIONS do_configure do_compile do_install
 
 export LDSHARED="${CCLD} -shared"
diff --git a/poky/meta/classes/dos2unix.bbclass b/poky/meta/classes/dos2unix.bbclass
new file mode 100644
index 0000000..3fc17e2
--- /dev/null
+++ b/poky/meta/classes/dos2unix.bbclass
@@ -0,0 +1,14 @@
+# Class for use to convert all CRLF line terminators to LF
+# provided that some projects are being developed/maintained
+# on Windows so they have different line terminators(CRLF) vs
+# on Linux(LF), which can cause annoying patching errors during
+# git push/checkout processes.
+
+do_convert_crlf_to_lf[depends] += "dos2unix-native:do_populate_sysroot"
+
+# Convert CRLF line terminators to LF
+do_convert_crlf_to_lf () {
+	find ${S} -type f -exec dos2unix {} \;
+}
+
+addtask convert_crlf_to_lf after do_unpack before do_patch
diff --git a/poky/meta/classes/externalsrc.bbclass b/poky/meta/classes/externalsrc.bbclass
index 78a08c8..3618b99 100644
--- a/poky/meta/classes/externalsrc.bbclass
+++ b/poky/meta/classes/externalsrc.bbclass
@@ -77,6 +77,9 @@
             # Dummy value because the default function can't be called with blank SRC_URI
             d.setVar('SRCPV', '999')
 
+        if d.getVar('CONFIGUREOPT_DEPTRACK') == '--disable-dependency-tracking':
+            d.setVar('CONFIGUREOPT_DEPTRACK', '')
+
         tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
 
         for task in tasks:
diff --git a/poky/meta/classes/fontcache.bbclass b/poky/meta/classes/fontcache.bbclass
index e763311..f71a754 100644
--- a/poky/meta/classes/fontcache.bbclass
+++ b/poky/meta/classes/fontcache.bbclass
@@ -17,7 +17,7 @@
 FONTCONFIG_CACHE_ENV ?= "FC_DEBUG=1"
 fontcache_common() {
 if [ -n "$D" ] ; then
-	$INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} \
+	$INTERCEPT_DIR/postinst_intercept update_font_cache ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} \
 		'bindir="${bindir}"' \
 		'libdir="${libdir}"' \
 		'base_libdir="${base_libdir}"' \
diff --git a/poky/meta/classes/gio-module-cache.bbclass b/poky/meta/classes/gio-module-cache.bbclass
index 0520c22..e429bd3 100644
--- a/poky/meta/classes/gio-module-cache.bbclass
+++ b/poky/meta/classes/gio-module-cache.bbclass
@@ -2,7 +2,6 @@
 inherit qemu
 
 GIO_MODULE_PACKAGES ??= "${PN}"
-GIO_MODULE_PACKAGES_class-nativesdk = ""
 
 gio_module_cache_common() {
 if [ "x$D" != "x" ]; then
diff --git a/poky/meta/classes/go.bbclass b/poky/meta/classes/go.bbclass
index d33d83e..af331f8 100644
--- a/poky/meta/classes/go.bbclass
+++ b/poky/meta/classes/go.bbclass
@@ -8,7 +8,7 @@
 export GOROOT
 export GOROOT_FINAL = "${libdir}/go"
 
-DEPENDS_GOLANG_class-target = "virtual/${TARGET_PREFIX}go virtual/${TARGET_PREFIX}go-runtime"
+DEPENDS_GOLANG_class-target = "virtual/${TUNE_PKGARCH}-go virtual/${TARGET_PREFIX}go-runtime"
 DEPENDS_GOLANG_class-native = "go-native"
 DEPENDS_GOLANG_class-nativesdk = "virtual/${TARGET_PREFIX}go-crosssdk virtual/${TARGET_PREFIX}go-runtime"
 
@@ -179,3 +179,13 @@
 
 INSANE_SKIP_${PN} += "ldflags"
 INSANE_SKIP_${PN}-ptest += "ldflags"
+
+# Add -buildmode=pie to GOBUILDFLAGS to satisfy "textrel" QA checking, but mips
+# doesn't support -buildmode=pie, so skip the QA checking for mips and its
+# variants.
+python() {
+    if 'mips' in d.getVar('TARGET_ARCH'):
+        d.appendVar('INSANE_SKIP_%s' % d.getVar('PN'), " textrel")
+    else:
+        d.appendVar('GOBUILDFLAGS', ' -buildmode=pie')
+}
diff --git a/poky/meta/classes/goarch.bbclass b/poky/meta/classes/goarch.bbclass
index f54c516..b2c94fa 100644
--- a/poky/meta/classes/goarch.bbclass
+++ b/poky/meta/classes/goarch.bbclass
@@ -33,9 +33,13 @@
 COMPATIBLE_HOST_powerpc = "null"
 COMPATIBLE_HOST_powerpc64 = "null"
 COMPATIBLE_HOST_mipsarchn32 = "null"
-ARM_INSTRUCTION_SET = "arm"
+
+ARM_INSTRUCTION_SET_armv4 = "arm"
+ARM_INSTRUCTION_SET_armv5 = "arm"
+ARM_INSTRUCTION_SET_armv6 = "arm"
+
 TUNE_CCARGS_remove = "-march=mips32r2"
-SECURITY_CFLAGS_mips = "${SECURITY_NOPIE_CFLAGS}"
+SECURITY_CFLAGS_mipsarch = "${SECURITY_NOPIE_CFLAGS}"
 SECURITY_NOPIE_CFLAGS ??= ""
 
 def go_map_arch(a, d):
diff --git a/poky/meta/classes/gobject-introspection.bbclass b/poky/meta/classes/gobject-introspection.bbclass
index b6160b8..a323c1f 100644
--- a/poky/meta/classes/gobject-introspection.bbclass
+++ b/poky/meta/classes/gobject-introspection.bbclass
@@ -40,4 +40,4 @@
     
 # .gir files go to dev package, as they're needed for developing (but not for
 # running) things that depends on introspection.
-FILES_${PN}-dev_append = " ${datadir}/gir-*/*.gir"
+FILES_${PN}-dev_append = " ${datadir}/gir-*/*.gir ${libdir}/gir-*/*.gir"
diff --git a/poky/meta/classes/gtk-immodules-cache.bbclass b/poky/meta/classes/gtk-immodules-cache.bbclass
index 3d82dbe..9bb0af8 100644
--- a/poky/meta/classes/gtk-immodules-cache.bbclass
+++ b/poky/meta/classes/gtk-immodules-cache.bbclass
@@ -10,53 +10,39 @@
 
 gtk_immodule_cache_postinst() {
 if [ "x$D" != "x" ]; then
-        if [ -x $D${bindir}/gtk-query-immodules-2.0 ]; then
-            IMFILES=$(ls $D${libdir}/gtk-2.0/*/immodules/*.so)
-            ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-2.0')} \
-                $IMFILES > $D${libdir}/gtk-2.0/2.10.0/immodules.cache 2>/dev/null &&
-                sed -i -e "s:$D::" $D${libdir}/gtk-2.0/2.10.0/immodules.cache
-        fi
-        if [ -x $D${bindir}/gtk-query-immodules-3.0 ]; then
-            IMFILES=$(ls $D${libdir}/gtk-3.0/*/immodules/*.so)
-            ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-3.0')} \
-                $IMFILES > $D${libdir}/gtk-3.0/3.0.0/immodules.cache 2>/dev/null &&
-                sed -i -e "s:$D::" $D${libdir}/gtk-3.0/3.0.0/immodules.cache
-        fi
-
-    [ $? -ne 0 ] && exit 1
-    exit 0
-fi
-if [ ! -z `which gtk-query-immodules-2.0` ]; then
-    gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
-fi
-if [ ! -z `which gtk-query-immodules-3.0` ]; then
-    gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
+    $INTERCEPT_DIR/postinst_intercept update_gtk_immodules_cache ${PKG} \
+            mlprefix=${MLPREFIX} \
+            binprefix=${MLPREFIX} \
+            libdir=${libdir} \
+            libexecdir=${libexecdir} \
+            base_libdir=${base_libdir} \
+            bindir=${bindir}
+else
+    if [ ! -z `which gtk-query-immodules-2.0` ]; then
+        gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
+    fi
+    if [ ! -z `which gtk-query-immodules-3.0` ]; then
+        gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
+    fi
 fi
 }
 
 gtk_immodule_cache_postrm() {
 if [ "x$D" != "x" ]; then
-        if [ -x $D${bindir}/gtk-query-immodules-2.0 ]; then
-            IMFILES=$(ls $D${libdir}/gtk-2.0/*/immodules/*.so)
-            ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-2.0')} \
-                $IMFILES > $D${libdir}/gtk-2.0/2.10.0/immodules.cache 2>/dev/null &&
-                sed -i -e "s:$D::" $D${libdir}/gtk-2.0/2.10.0/immodules.cache
-        fi
-        if [ -x $D${bindir}/gtk-query-immodules-3.0 ]; then
-            IMFILES=$(ls $D${libdir}/gtk-3.0/*/immodules/*.so)
-            ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-3.0')} \
-                $IMFILES > $D${libdir}/gtk-3.0/3.0.0/immodules.cache 2>/dev/null &&
-                sed -i -e "s:$D::" $D${libdir}/gtk-3.0/3.0.0/immodules.cache
-        fi
-
-    [ $? -ne 0 ] && exit 1
-    exit 0
-fi
-if [ ! -z `which gtk-query-immodules-2.0` ]; then
-    gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
-fi
-if [ ! -z `which gtk-query-immodules-3.0` ]; then
-    gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
+    $INTERCEPT_DIR/postinst_intercept update_gtk_immodules_cache ${PKG} \
+            mlprefix=${MLPREFIX} \
+            binprefix=${MLPREFIX} \
+            libdir=${libdir} \
+            libexecdir=${libexecdir} \
+            base_libdir=${base_libdir} \
+            bindir=${bindir}
+else
+    if [ ! -z `which gtk-query-immodules-2.0` ]; then
+        gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
+    fi
+    if [ ! -z `which gtk-query-immodules-3.0` ]; then
+        gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
+    fi
 fi
 }
 
diff --git a/poky/meta/classes/icecc.bbclass b/poky/meta/classes/icecc.bbclass
index 0ca8de8..b5a8457 100644
--- a/poky/meta/classes/icecc.bbclass
+++ b/poky/meta/classes/icecc.bbclass
@@ -171,7 +171,7 @@
     return "yes"
 
 def icecc_is_allarch(bb, d):
-    return d.getVar("PACKAGE_ARCH") == "all" or bb.data.inherits_class('allarch', d)
+    return d.getVar("PACKAGE_ARCH") == "all"
 
 def icecc_is_kernel(bb, d):
     return \
diff --git a/poky/meta/classes/image-live.bbclass b/poky/meta/classes/image-live.bbclass
index 966277c..af71be5 100644
--- a/poky/meta/classes/image-live.bbclass
+++ b/poky/meta/classes/image-live.bbclass
@@ -19,8 +19,6 @@
 
 # External variables (also used by syslinux.bbclass)
 # ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
-# ${NOISO}  - skip building the ISO image if set to 1
-# ${NOHDD}  - skip building the HDD image if set to 1
 # ${HDDIMG_ID} - FAT image volume-id
 # ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
 
@@ -80,8 +78,8 @@
 }
 
 build_iso() {
-	# Only create an ISO if we have an INITRD and NOISO was not set
-	if [ -z "${INITRD}" ] || [ "${NOISO}" = "1" ]; then
+	# Only create an ISO if we have an INITRD and the live or iso image type was selected
+	if [ -z "${INITRD}" ] || [ "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso', '1', '0', d)}" != "1" ]; then
 		bbnote "ISO image will not be created."
 		return
 	fi
@@ -217,7 +215,7 @@
 
 build_hddimg() {
 	# Create an HDD image
-	if [ "${NOHDD}" != "1" ] ; then
+	if [ "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live hddimg', '1', '0', d)}" = "1" ] ; then
 		populate_live ${HDDDIR}
 
 		if [ "${PCBIOS}" = "1" ]; then
@@ -232,11 +230,11 @@
 		if [ -f ${HDDDIR}/rootfs.img ]; then
 			rootfs_img_size=`stat -c '%s' ${HDDDIR}/rootfs.img`
 			max_size=`expr 4 \* 1024 \* 1024 \* 1024`
-			if [ $rootfs_img_size -gt $max_size ]; then
-				bberror "${HDDDIR}/rootfs.img execeeds 4GB,"
-				bberror "this doesn't work on FAT filesystem, you can try either of:"
-				bberror "1) Reduce the size of rootfs.img"
-				bbfatal "2) Use iso, vmdk or vdi to instead of hddimg\n"
+			if [ $rootfs_img_size -ge $max_size ]; then
+				bberror "${HDDDIR}/rootfs.img rootfs size is greather than or equal to 4GB,"
+				bberror "and this doesn't work on a FAT filesystem. You can either:"
+				bberror "1) Reduce the size of rootfs.img, or,"
+				bbfatal "2) Use wic, vmdk or vdi instead of hddimg\n"
 			fi
 		fi
 
diff --git a/poky/meta/classes/image-mklibs.bbclass b/poky/meta/classes/image-mklibs.bbclass
index 5f6df1b..68e11d4 100644
--- a/poky/meta/classes/image-mklibs.bbclass
+++ b/poky/meta/classes/image-mklibs.bbclass
@@ -19,7 +19,7 @@
 		echo $i
 	done > ${WORKDIR}/mklibs/executables.list
 
-	dynamic_loader=$(linuxloader)
+	dynamic_loader=${@get_linuxloader(d)}
 
 	mklibs -v \
 		--ldlib ${dynamic_loader} \
diff --git a/poky/meta/classes/image-postinst-intercepts.bbclass b/poky/meta/classes/image-postinst-intercepts.bbclass
new file mode 100644
index 0000000..ed30bbd
--- /dev/null
+++ b/poky/meta/classes/image-postinst-intercepts.bbclass
@@ -0,0 +1,23 @@
+# Gather existing and candidate postinst intercepts from BBPATH
+POSTINST_INTERCEPTS_DIR ?= "${COREBASE}/scripts/postinst-intercepts"
+POSTINST_INTERCEPTS_PATHS ?= "${@':'.join('%s/postinst-intercepts' % p for p in '${BBPATH}'.split(':'))}:${POSTINST_INTERCEPTS_DIR}"
+
+python find_intercepts() {
+    intercepts = {}
+    search_paths = []
+    paths = d.getVar('POSTINST_INTERCEPTS_PATHS').split(':')
+    overrides = (':' + d.getVar('FILESOVERRIDES')).split(':') + ['']
+    search_paths = [os.path.join(p, op) for p in paths for op in overrides]
+    searched = oe.path.which_wild('*', ':'.join(search_paths), candidates=True)
+    files, chksums = [], []
+    for pathname, candidates in searched:
+        if os.path.isfile(pathname):
+            files.append(pathname)
+            chksums.append('%s:True' % pathname)
+            chksums.extend('%s:False' % c for c in candidates[:-1])
+
+    d.setVar('POSTINST_INTERCEPT_CHECKSUMS', ' '.join(chksums))
+    d.setVar('POSTINST_INTERCEPTS', ' '.join(files))
+}
+find_intercepts[eventmask] += "bb.event.RecipePreFinalise"
+addhandler find_intercepts
diff --git a/poky/meta/classes/image-prelink.bbclass b/poky/meta/classes/image-prelink.bbclass
index 6a8afa8..04dd57c 100644
--- a/poky/meta/classes/image-prelink.bbclass
+++ b/poky/meta/classes/image-prelink.bbclass
@@ -33,7 +33,7 @@
 	fi
 	cat ${STAGING_DIR_TARGET}${sysconfdir}/ld.so.conf >> $ldsoconf
 
-	dynamic_loader=$(linuxloader)
+	dynamic_loader=${@get_linuxloader(d)}
 
 	# prelink!
 	if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
diff --git a/poky/meta/classes/image.bbclass b/poky/meta/classes/image.bbclass
index 2247b30..452b1ad 100644
--- a/poky/meta/classes/image.bbclass
+++ b/poky/meta/classes/image.bbclass
@@ -1,9 +1,21 @@
-inherit rootfs_${IMAGE_PKGTYPE}
 
+IMAGE_CLASSES ??= ""
+
+# rootfs bootstrap install
+# warning -  image-container resets this
+ROOTFS_BOOTSTRAP_INSTALL = "run-postinsts"
+
+# Handle inherits of any of the image classes we need
+IMGCLASSES = "rootfs_${IMAGE_PKGTYPE} image_types ${IMAGE_CLASSES}"
 # Only Linux SDKs support populate_sdk_ext, fall back to populate_sdk_base
 # in the non-Linux SDK_OS case, such as mingw32
-SDKEXTCLASS ?= "${@['populate_sdk_base', 'populate_sdk_ext']['linux' in d.getVar("SDK_OS")]}"
-inherit ${SDKEXTCLASS}
+IMGCLASSES += "${@['populate_sdk_base', 'populate_sdk_ext']['linux' in d.getVar("SDK_OS")]}"
+IMGCLASSES += "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso hddimg', 'image-live', '', d)}"
+IMGCLASSES += "${@bb.utils.contains('IMAGE_FSTYPES', 'container', 'image-container', '', d)}"
+IMGCLASSES += "image_types_wic"
+IMGCLASSES += "rootfs-postcommands"
+IMGCLASSES += "image-postinst-intercepts"
+inherit ${IMGCLASSES}
 
 TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
 TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}"
@@ -11,25 +23,23 @@
 
 LICENSE ?= "MIT"
 PACKAGES = ""
-DEPENDS += "${MLPREFIX}qemuwrapper-cross depmodwrapper-cross"
+DEPENDS += "${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross depmodwrapper-cross cross-localedef-native"
 RDEPENDS += "${PACKAGE_INSTALL} ${LINGUAS_INSTALL}"
 RRECOMMENDS += "${PACKAGE_INSTALL_ATTEMPTONLY}"
+PATH_prepend = "${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
 
 INHIBIT_DEFAULT_DEPS = "1"
 
-TESTIMAGECLASS = "${@oe.utils.conditional('TEST_IMAGE', '1', 'testimage-auto', '', d)}"
-inherit ${TESTIMAGECLASS}
-
 # IMAGE_FEATURES may contain any available package group
 IMAGE_FEATURES ?= ""
 IMAGE_FEATURES[type] = "list"
-IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs empty-root-password allow-empty-password post-install-logging"
+IMAGE_FEATURES[validitems] += "debug-tweaks read-only-rootfs empty-root-password allow-empty-password allow-root-login post-install-logging"
 
 # Generate companion debugfs?
 IMAGE_GEN_DEBUGFS ?= "0"
 
-# rootfs bootstrap install
-ROOTFS_BOOTSTRAP_INSTALL = "run-postinsts"
+# These pacackages will be installed as additional into debug rootfs
+IMAGE_INSTALL_DEBUGFS ?= ""
 
 # These packages will be removed from a read-only rootfs after all other
 # packages have been installed
@@ -117,7 +127,7 @@
                  'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS',
                  'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS',
                  'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
-                 'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS']
+                 'CONVERSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED', 'IMGDEPLOYDIR', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'REPRODUCIBLE_TIMESTAMP_ROOTFS', 'IMAGE_INSTALL_DEBUGFS']
     variables.extend(rootfs_command_variables(d))
     variables.extend(variable_depends(d))
     return " ".join(variables)
@@ -126,23 +136,6 @@
 
 do_build[depends] += "virtual/kernel:do_deploy"
 
-def build_live(d):
-    if bb.utils.contains("IMAGE_FSTYPES", "live", "live", "0", d) == "0": # live is not set but hob might set iso or hddimg
-        d.setVar('NOISO', bb.utils.contains('IMAGE_FSTYPES', "iso", "0", "1", d))
-        d.setVar('NOHDD', bb.utils.contains('IMAGE_FSTYPES', "hddimg", "0", "1", d))
-        if d.getVar('NOISO') == "0" or d.getVar('NOHDD') == "0":
-            return "image-live"
-        return ""
-    return "image-live"
-
-IMAGE_TYPE_live = "${@build_live(d)}"
-inherit ${IMAGE_TYPE_live}
-
-IMAGE_TYPE_container = '${@bb.utils.contains("IMAGE_FSTYPES", "container", "image-container", "", d)}'
-inherit ${IMAGE_TYPE_container}
-
-IMAGE_TYPE_wic = "image_types_wic"
-inherit ${IMAGE_TYPE_wic}
 
 python () {
     def extraimage_getdepends(task):
@@ -151,7 +144,6 @@
             deps += " %s:%s" % (dep, task)
         return deps
 
-    d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_populate_lic'))
     d.appendVarFlag('do_image_complete', 'depends', extraimage_getdepends('do_populate_sysroot'))
 
     deps = " " + imagetypes_getdepends(d)
@@ -177,9 +169,6 @@
     check_image_features(d)
 }
 
-IMAGE_CLASSES += "image_types"
-inherit ${IMAGE_CLASSES}
-
 IMAGE_POSTPROCESS_COMMAND ?= ""
 
 # some default locales
@@ -191,8 +180,6 @@
 # aren't yet available.
 PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}"
 
-inherit rootfs-postcommands
-
 PACKAGE_EXCLUDE ??= ""
 PACKAGE_EXCLUDE[type] = "list"
 
@@ -262,6 +249,7 @@
 do_rootfs[dirs] = "${TOPDIR}"
 do_rootfs[cleandirs] += "${S} ${IMGDEPLOYDIR}"
 do_rootfs[umask] = "022"
+do_rootfs[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}"
 addtask rootfs after do_prepare_recipe_sysroot
 
 fakeroot python do_image () {
@@ -666,7 +654,10 @@
 reproducible_final_image_task () {
     if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
         if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then
-            REPRODUCIBLE_TIMESTAMP_ROOTFS=`git log -1 --pretty=%ct`
+            REPRODUCIBLE_TIMESTAMP_ROOTFS=`git -C "${COREBASE}" log -1 --pretty=%ct 2>/dev/null` || true
+            if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" = "" ]; then
+                REPRODUCIBLE_TIMESTAMP_ROOTFS=`stat -c%Y ${@bb.utils.which(d.getVar("BBPATH"), "conf/bitbake.conf")}`
+            fi
         fi
         # Set mtime of all files to a reproducible value
         bbnote "reproducible_final_image_task: mtime set to $REPRODUCIBLE_TIMESTAMP_ROOTFS"
diff --git a/poky/meta/classes/image_types.bbclass b/poky/meta/classes/image_types.bbclass
index 00a00d3..05e5b0a 100644
--- a/poky/meta/classes/image_types.bbclass
+++ b/poky/meta/classes/image_types.bbclass
@@ -154,7 +154,7 @@
 	local ubinize_args="$2"
     
         # Added prompt error message for ubi and ubifs image creation.
-        if [ -z "$mkubifs_args"] || [ -z "$ubinize_args" ]; then
+        if [ -z "$mkubifs_args" ] || [ -z "$ubinize_args" ]; then
             bbfatal "MKUBIFS_ARGS and UBINIZE_ARGS have to be set, see http://www.linux-mtd.infradead.org/faq/ubifs.html for details"
         fi
     
diff --git a/poky/meta/classes/insane.bbclass b/poky/meta/classes/insane.bbclass
index eb2d967..4644221 100644
--- a/poky/meta/classes/insane.bbclass
+++ b/poky/meta/classes/insane.bbclass
@@ -46,139 +46,6 @@
 
 UNKNOWN_CONFIGURE_WHITELIST ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --with-libtool-sysroot --disable-static"
 
-#
-# dictionary for elf headers
-#
-# feel free to add and correct.
-#
-#           TARGET_OS  TARGET_ARCH   MACHINE, OSABI, ABIVERSION, Little Endian, 32bit?
-def package_qa_get_machine_dict(d):
-    machdata = {
-            "darwin9" : { 
-                        "arm" :       (40,     0,    0,          True,          32),
-                      },
-            "eabi" : {
-                        "arm" :       (40,     0,    0,          True,          32),
-                      },
-            "elf" : {
-                        "aarch64" :   (183,    0,    0,          True,          64),
-                        "aarch64_be" :(183,    0,    0,          False,         64),
-                        "i586" :      (3,      0,    0,          True,          32),
-                        "x86_64":     (62,     0,    0,          True,          64),
-                        "epiphany":   (4643,   0,    0,          True,          32),
-                        "mips":       ( 8,     0,    0,          False,         32),
-                        "mipsel":     ( 8,     0,    0,          True,          32),
-                        "microblaze":  (189,   0,    0,          False,         32),
-                        "microblazeeb":(189,   0,    0,          False,         32),
-                        "microblazeel":(189,   0,    0,          True,          32),
-                        "riscv32":    (243,    0,    0,          True,          32),
-                        "riscv64":    (243,    0,    0,          True,          64),
-                      },
-            "linux" : { 
-                        "aarch64" :   (183,    0,    0,          True,          64),
-                        "aarch64_be" :(183,    0,    0,          False,         64),
-                        "arm" :       (40,    97,    0,          True,          32),
-                        "armeb":      (40,    97,    0,          False,         32),
-                        "powerpc":    (20,     0,    0,          False,         32),
-                        "powerpc64":  (21,     0,    0,          False,         64),
-                        "i386":       ( 3,     0,    0,          True,          32),
-                        "i486":       ( 3,     0,    0,          True,          32),
-                        "i586":       ( 3,     0,    0,          True,          32),
-                        "i686":       ( 3,     0,    0,          True,          32),
-                        "x86_64":     (62,     0,    0,          True,          64),
-                        "ia64":       (50,     0,    0,          True,          64),
-                        "alpha":      (36902,  0,    0,          True,          64),
-                        "hppa":       (15,     3,    0,          False,         32),
-                        "m68k":       ( 4,     0,    0,          False,         32),
-                        "mips":       ( 8,     0,    0,          False,         32),
-                        "mipsel":     ( 8,     0,    0,          True,          32),
-                        "mips64":     ( 8,     0,    0,          False,         64),
-                        "mips64el":   ( 8,     0,    0,          True,          64),
-                        "mipsisa32r6":   ( 8,  0,    0,          False,         32),
-                        "mipsisa32r6el": ( 8,  0,    0,          True,          32),
-                        "mipsisa64r6":   ( 8,  0,    0,          False,         64),
-                        "mipsisa64r6el": ( 8,  0,    0,          True,          64),
-                        "nios2":      (113,    0,    0,          True,          32),
-                        "riscv32":    (243,    0,    0,          True,          32),
-                        "riscv64":    (243,    0,    0,          True,          64),
-                        "s390":       (22,     0,    0,          False,         32),
-                        "sh4":        (42,     0,    0,          True,          32),
-                        "sparc":      ( 2,     0,    0,          False,         32),
-                        "microblaze":  (189,   0,    0,          False,         32),
-                        "microblazeeb":(189,   0,    0,          False,         32),
-                        "microblazeel":(189,   0,    0,          True,          32),
-                      },
-            "linux-musl" : { 
-                        "aarch64" :   (183,    0,    0,            True,          64),
-                        "aarch64_be" :(183,    0,    0,            False,         64),
-                        "arm" :       (  40,    97,    0,          True,          32),
-                        "armeb":      (  40,    97,    0,          False,         32),
-                        "powerpc":    (  20,     0,    0,          False,         32),
-                        "i386":       (   3,     0,    0,          True,          32),
-                        "i486":       (   3,     0,    0,          True,          32),
-                        "i586":       (   3,     0,    0,          True,          32),
-                        "i686":       (   3,     0,    0,          True,          32),
-                        "x86_64":     (  62,     0,    0,          True,          64),
-                        "mips":       (   8,     0,    0,          False,         32),
-                        "mipsel":     (   8,     0,    0,          True,          32),
-                        "mips64":     (   8,     0,    0,          False,         64),
-                        "mips64el":   (   8,     0,    0,          True,          64),
-                        "microblaze":  (189,     0,    0,          False,         32),
-                        "microblazeeb":(189,     0,    0,          False,         32),
-                        "microblazeel":(189,     0,    0,          True,          32),
-                        "riscv32":    (243,      0,    0,          True,          32),
-                        "riscv64":    (243,      0,    0,          True,          64),
-                        "sh4":        (  42,     0,    0,          True,          32),
-                      },
-            "uclinux-uclibc" : {
-                        "bfin":       ( 106,     0,    0,          True,         32),
-                      }, 
-            "linux-gnueabi" : {
-                        "arm" :       (40,     0,    0,          True,          32),
-                        "armeb" :     (40,     0,    0,          False,         32),
-                      },
-            "linux-musleabi" : {
-                        "arm" :       (40,     0,    0,          True,          32),
-                        "armeb" :     (40,     0,    0,          False,         32),
-                      },
-            "linux-gnuspe" : {
-                        "powerpc":    (20,     0,    0,          False,         32),
-                      },
-            "linux-muslspe" : {
-                        "powerpc":    (20,     0,    0,          False,         32),
-                      },
-            "linux-gnu" :       {
-                        "powerpc":    (20,     0,    0,          False,         32),
-                        "sh4":        (42,     0,    0,          True,          32),
-                      },
-            "linux-gnu_ilp32" :     {
-                        "aarch64" :   (183,    0,    0,          True,          32),
-                      },
-            "linux-gnux32" :       {
-                        "x86_64":     (62,     0,    0,          True,          32),
-                      },
-            "linux-muslx32" :       {
-                        "x86_64":     (62,     0,    0,          True,          32),
-                      },
-            "linux-gnun32" :       {
-                        "mips64":       ( 8,     0,    0,          False,         32),
-                        "mips64el":     ( 8,     0,    0,          True,          32),
-                        "mipsisa64r6":  ( 8,     0,    0,          False,         32),
-                        "mipsisa64r6el":( 8,     0,    0,          True,          32),
-                      },
-        }
-
-    # Add in any extra user supplied data which may come from a BSP layer, removing the
-    # need to always change this class directly
-    extra_machdata = (d.getVar("PACKAGEQA_EXTRA_MACHDEFFUNCS") or "").split()
-    for m in extra_machdata:
-        call = m + "(machdata, d)"
-        locs = { "machdata" : machdata, "d" : d}
-        machdata = bb.utils.better_eval(call, locs)
-
-    return machdata
-
-
 def package_qa_clean_path(path, d, pkg=None):
     """
     Remove redundant paths from the path for display.  If pkg isn't set then
@@ -403,7 +270,7 @@
     """
     Check if archs are compatible
     """
-    import re
+    import re, oe.elf
 
     if not elf:
         return
@@ -430,12 +297,14 @@
 
     #if this will throw an exception, then fix the dict above
     (machine, osabi, abiversion, littleendian, bits) \
-        = package_qa_get_machine_dict(d)[target_os][target_arch]
+        = oe.elf.machine_dict(d)[target_os][target_arch]
 
     # Check the architecture and endiannes of the binary
     is_32 = (("virtual/kernel" in provides) or bb.data.inherits_class("module", d)) and \
-            (target_os == "linux-gnux32" or target_os == "linux-muslx32" or target_os == "linux-gnu_ilp32" or re.match('mips64.*32', d.getVar('DEFAULTTUNE')))
-    if not ((machine == elf.machine()) or is_32):
+            (target_os == "linux-gnux32" or target_os == "linux-muslx32" or \
+            target_os == "linux-gnu_ilp32" or re.match('mips64.*32', d.getVar('DEFAULTTUNE')))
+    is_bpf = (oe.qa.elf_machine_to_string(elf.machine()) == "BPF")
+    if not ((machine == elf.machine()) or is_32 or is_bpf):
         package_qa_add_message(messages, "arch", "Architecture did not match (%s, expected %s) on %s" % \
                  (oe.qa.elf_machine_to_string(elf.machine()), oe.qa.elf_machine_to_string(machine), package_qa_clean_path(path,d)))
     elif not ((bits == elf.abiSize()) or is_32):
@@ -599,7 +468,7 @@
         return
 
     if not lic_files and d.getVar('SRC_URI'):
-        sane = package_qa_handle_error("license-checksum", pn + ": Recipe file fetches files and does not have license file information (LIC_FILES_CHKSUM)", d)
+        sane &= package_qa_handle_error("license-checksum", pn + ": Recipe file fetches files and does not have license file information (LIC_FILES_CHKSUM)", d)
 
     srcdir = d.getVar('S')
     corebase_licensefile = d.getVar('COREBASE') + "/LICENSE"
@@ -607,11 +476,11 @@
         try:
             (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
         except bb.fetch.MalformedUrl:
-            sane = package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
+            sane &= package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
             continue
         srclicfile = os.path.join(srcdir, path)
         if not os.path.isfile(srclicfile):
-            package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile, d)
+            sane &= package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile, d)
             continue
 
         if (srclicfile == corebase_licensefile):
@@ -695,7 +564,7 @@
             else:
                 msg = pn + ": LIC_FILES_CHKSUM is not specified for " +  url
                 msg = msg + "\n" + pn + ": The md5 checksum is " + md5chksum
-            sane = package_qa_handle_error("license-checksum", msg, d)
+            sane &= package_qa_handle_error("license-checksum", msg, d)
 
     if not sane:
         bb.fatal("Fatal QA errors found, failing task.")
@@ -732,14 +601,14 @@
                     file_content = file_content.replace(recipesysroot, "")
                     if workdir in file_content:
                         error_msg = "%s failed sanity test (workdir) in path %s" % (file,root)
-                        sane = package_qa_handle_error("la", error_msg, d)
+                        sane &= package_qa_handle_error("la", error_msg, d)
             elif file.endswith(".pc"):
                 with open(path) as f:
                     file_content = f.read()
                     file_content = file_content.replace(recipesysroot, "")
                     if pkgconfigcheck in file_content:
                         error_msg = "%s failed sanity test (tmpdir) in path %s" % (file,root)
-                        sane = package_qa_handle_error("pkgconfig", error_msg, d)
+                        sane &= package_qa_handle_error("pkgconfig", error_msg, d)
 
     return sane
 
diff --git a/poky/meta/classes/kernel-arch.bbclass b/poky/meta/classes/kernel-arch.bbclass
index 09793fc..07ec242 100644
--- a/poky/meta/classes/kernel-arch.bbclass
+++ b/poky/meta/classes/kernel-arch.bbclass
@@ -22,12 +22,14 @@
     valid_archs = d.getVar('valid_archs').split()
 
     if   re.match('(i.86|athlon|x86.64)$', a):  return 'x86'
+    elif re.match('arceb$', a):                 return 'arc'
     elif re.match('armeb$', a):                 return 'arm'
     elif re.match('aarch64$', a):               return 'arm64'
     elif re.match('aarch64_be$', a):            return 'arm64'
     elif re.match('aarch64_ilp32$', a):         return 'arm64'
     elif re.match('aarch64_be_ilp32$', a):      return 'arm64'
     elif re.match('mips(isa|)(32|64|)(r6|)(el|)$', a):      return 'mips'
+    elif re.match('mcf', a):                    return 'm68k'
     elif re.match('riscv(32|64|)(eb|)$', a):    return 'riscv'
     elif re.match('p(pc|owerpc)(|64)', a):      return 'powerpc'
     elif re.match('sh(3|4)$', a):               return 'sh'
diff --git a/poky/meta/classes/kernel-artifact-names.bbclass b/poky/meta/classes/kernel-artifact-names.bbclass
new file mode 100644
index 0000000..bbeecba
--- /dev/null
+++ b/poky/meta/classes/kernel-artifact-names.bbclass
@@ -0,0 +1,18 @@
+KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}"
+KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}"
+
+KERNEL_IMAGE_NAME ?= "${KERNEL_ARTIFACT_NAME}"
+KERNEL_IMAGE_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
+
+KERNEL_DTB_NAME ?= "${KERNEL_ARTIFACT_NAME}"
+KERNEL_DTB_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
+
+KERNEL_FIT_NAME ?= "${KERNEL_ARTIFACT_NAME}"
+KERNEL_FIT_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
+
+MODULE_TARBALL_NAME ?= "${KERNEL_ARTIFACT_NAME}"
+MODULE_TARBALL_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}"
+MODULE_TARBALL_DEPLOY ?= "1"
+
+INITRAMFS_NAME ?= "initramfs-${KERNEL_ARTIFACT_NAME}"
+INITRAMFS_LINK_NAME ?= "initramfs-${KERNEL_ARTIFACT_LINK_NAME}"
diff --git a/poky/meta/classes/kernel-devicetree.bbclass b/poky/meta/classes/kernel-devicetree.bbclass
index 4f80cc6..867b776 100644
--- a/poky/meta/classes/kernel-devicetree.bbclass
+++ b/poky/meta/classes/kernel-devicetree.bbclass
@@ -10,21 +10,21 @@
 KERNEL_DEVICETREE_BUNDLE ?= "0"
 
 normalize_dtb () {
-	DTB="$1"
-	if echo ${DTB} | grep -q '/dts/'; then
-		bbwarn "${DTB} contains the full path to the the dts file, but only the dtb name should be used."
-		DTB=`basename ${DTB} | sed 's,\.dts$,.dtb,g'`
+	dtb="$1"
+	if echo $dtb | grep -q '/dts/'; then
+		bbwarn "$dtb contains the full path to the the dts file, but only the dtb name should be used."
+		dtb=`basename $dtb | sed 's,\.dts$,.dtb,g'`
 	fi
-	echo "${DTB}"
+	echo "$dtb"
 }
 
 get_real_dtb_path_in_kernel () {
-	DTB="$1"
-	DTB_PATH="${B}/arch/${ARCH}/boot/dts/${DTB}"
-	if [ ! -e "${DTB_PATH}" ]; then
-		DTB_PATH="${B}/arch/${ARCH}/boot/${DTB}"
+	dtb="$1"
+	dtb_path="${B}/arch/${ARCH}/boot/dts/$dtb"
+	if [ ! -e "$dtb_path" ]; then
+		dtb_path="${B}/arch/${ARCH}/boot/$dtb"
 	fi
-	echo "${DTB_PATH}"
+	echo "$dtb_path"
 }
 
 do_configure_append() {
@@ -50,61 +50,44 @@
 }
 
 do_compile_append() {
-	for DTB in ${KERNEL_DEVICETREE}; do
-		DTB=`normalize_dtb "${DTB}"`
-		oe_runmake ${DTB}
+	for dtbf in ${KERNEL_DEVICETREE}; do
+		dtb=`normalize_dtb "$dtbf"`
+		oe_runmake $dtb
 	done
 }
 
 do_install_append() {
-	for DTB in ${KERNEL_DEVICETREE}; do
-		DTB=`normalize_dtb "${DTB}"`
-		DTB_EXT=${DTB##*.}
-		DTB_PATH=`get_real_dtb_path_in_kernel "${DTB}"`
-		DTB_BASE_NAME=`basename ${DTB} ."${DTB_EXT}"`
-		install -m 0644 ${DTB_PATH} ${D}/${KERNEL_IMAGEDEST}/${DTB_BASE_NAME}.${DTB_EXT}
-		for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do
-			symlink_name=${type}"-"${KERNEL_IMAGE_SYMLINK_NAME}
-			DTB_SYMLINK_NAME=`echo ${symlink_name} | sed "s/${MACHINE}/${DTB_BASE_NAME}/g"`
-			ln -sf ${DTB_BASE_NAME}.${DTB_EXT} ${D}/${KERNEL_IMAGEDEST}/devicetree-${DTB_SYMLINK_NAME}.${DTB_EXT}
-
-			if [ "$type" = "zImage" ] && [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
-				cat ${D}/${KERNEL_IMAGEDEST}/$type \
-					${D}/${KERNEL_IMAGEDEST}/${DTB_BASE_NAME}.${DTB_EXT} \
-					> ${D}/${KERNEL_IMAGEDEST}/$type-${DTB_BASE_NAME}.${DTB_EXT}.bin
-			fi
-		done
+	for dtbf in ${KERNEL_DEVICETREE}; do
+		dtb=`normalize_dtb "$dtbf"`
+		dtb_ext=${dtb##*.}
+		dtb_base_name=`basename $dtb .$dtb_ext`
+		dtb_path=`get_real_dtb_path_in_kernel "$dtb"`
+		install -m 0644 $dtb_path ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext
 	done
 }
 
 do_deploy_append() {
-	for DTB in ${KERNEL_DEVICETREE}; do
-		DTB=`normalize_dtb "${DTB}"`
-		DTB_EXT=${DTB##*.}
-		DTB_BASE_NAME=`basename ${DTB} ."${DTB_EXT}"`
+	for dtbf in ${KERNEL_DEVICETREE}; do
+		dtb=`normalize_dtb "$dtbf"`
+		dtb_ext=${dtb##*.}
+		dtb_base_name=`basename $dtb .$dtb_ext`
+		install -d ${DEPLOYDIR}
+		install -m 0644 ${D}/${KERNEL_IMAGEDEST}/$dtb_base_name.$dtb_ext ${DEPLOYDIR}/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
+		ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext ${DEPLOYDIR}/$dtb_base_name.$dtb_ext
+		ln -sf $dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext ${DEPLOYDIR}/$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext
 		for type in ${KERNEL_IMAGETYPE_FOR_MAKE}; do
-			base_name=${type}"-"${KERNEL_IMAGE_BASE_NAME}
-			symlink_name=${type}"-"${KERNEL_IMAGE_SYMLINK_NAME}
-			DTB_NAME=`echo ${base_name} | sed "s/${MACHINE}/${DTB_BASE_NAME}/g"`
-			DTB_SYMLINK_NAME=`echo ${symlink_name} | sed "s/${MACHINE}/${DTB_BASE_NAME}/g"`
-			DTB_PATH=`get_real_dtb_path_in_kernel "${DTB}"`
-			install -d ${DEPLOYDIR}
-			install -m 0644 ${DTB_PATH} ${DEPLOYDIR}/${DTB_NAME}.${DTB_EXT}
-			ln -sf ${DTB_NAME}.${DTB_EXT} ${DEPLOYDIR}/${DTB_SYMLINK_NAME}.${DTB_EXT}
-			ln -sf ${DTB_NAME}.${DTB_EXT} ${DEPLOYDIR}/${DTB_BASE_NAME}.${DTB_EXT}
-
 			if [ "$type" = "zImage" ] && [ "${KERNEL_DEVICETREE_BUNDLE}" = "1" ]; then
-				cat ${DEPLOYDIR}/$type \
-					${DEPLOYDIR}/${DTB_NAME}.${DTB_EXT} \
-					> ${DEPLOYDIR}/${DTB_NAME}.${DTB_EXT}.bin
-				ln -sf ${DTB_NAME}.${DTB_EXT}.bin ${DEPLOYDIR}/$type-${DTB_BASE_NAME}.${DTB_EXT}.bin
-
+				cat ${D}/${KERNEL_IMAGEDEST}/$type \
+					${DEPLOYDIR}/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext \
+					> ${DEPLOYDIR}/$type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
+				ln -sf $type-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin \
+					${DEPLOYDIR}/$type-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
 				if [ -e "${KERNEL_OUTPUT_DIR}/${type}.initramfs" ]; then
 					cat ${KERNEL_OUTPUT_DIR}/${type}.initramfs \
-						${DEPLOYDIR}/${DTB_NAME}.${DTB_EXT} \
-						> ${DEPLOYDIR}/${type}-${INITRAMFS_BASE_NAME}-${DTB_BASE_NAME}.${DTB_EXT}.bin
-					ln -sf ${type}-${INITRAMFS_BASE_NAME}-${DTB_BASE_NAME}.${DTB_EXT}.bin \
-					       ${DEPLOYDIR}/${type}-initramfs-${DTB_BASE_NAME}.${DTB_EXT}-${MACHINE}.bin
+						${DEPLOYDIR}/$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext
+						>  ${DEPLOYDIR}/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin
+					ln -sf ${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_NAME}.$dtb_ext.bin \
+						${DEPLOYDIR}/${type}-${INITRAMFS_NAME}-$dtb_base_name-${KERNEL_DTB_LINK_NAME}.$dtb_ext.bin
 				fi
 			fi
 		done
diff --git a/poky/meta/classes/kernel-fitimage.bbclass b/poky/meta/classes/kernel-fitimage.bbclass
index 50a91e1..4c4fd99 100644
--- a/poky/meta/classes/kernel-fitimage.bbclass
+++ b/poky/meta/classes/kernel-fitimage.bbclass
@@ -1,4 +1,4 @@
-inherit kernel-uboot uboot-sign
+inherit kernel-uboot kernel-artifact-names uboot-sign
 
 python __anonymous () {
     kerneltypes = d.getVar('KERNEL_IMAGETYPES') or ""
@@ -135,6 +135,15 @@
 
 	dtb_csum="sha1"
 
+	dtb_loadline=""
+	dtb_ext=${DTB##*.}
+	if [ "${dtb_ext}" = "dtbo" ]; then
+		if [ -n "${UBOOT_DTBO_LOADADDRESS}" ]; then
+			dtb_loadline="load = <${UBOOT_DTBO_LOADADDRESS}>;"
+		fi
+	elif [ -n "${UBOOT_DTB_LOADADDRESS}" ]; then
+		dtb_loadline="load = <${UBOOT_DTB_LOADADDRESS}>;"
+	fi
 	cat << EOF >> ${1}
                 fdt@${2} {
                         description = "Flattened Device Tree blob";
@@ -142,6 +151,7 @@
                         type = "flat_dt";
                         arch = "${UBOOT_ARCH}";
                         compression = "none";
+                        ${dtb_loadline}
                         hash@1 {
                                 algo = "${dtb_csum}";
                         };
@@ -248,25 +258,34 @@
 	fi
 
 	# Test if we have any DTBs at all
-	conf_desc="Linux kernel"
-	kernel_line="kernel = \"kernel@${2}\";"
+	sep=""
+	conf_desc=""
+	kernel_line=""
 	fdt_line=""
 	ramdisk_line=""
 	setup_line=""
 	default_line=""
 
+	if [ -n "${2}" ]; then
+		conf_desc="Linux kernel"
+		sep=", "
+		kernel_line="kernel = \"kernel@${2}\";"
+	fi
+
 	if [ -n "${3}" ]; then
-		conf_desc="${conf_desc}, FDT blob"
+		conf_desc="${conf_desc}${sep}FDT blob"
+		sep=", "
 		fdt_line="fdt = \"fdt@${3}\";"
 	fi
 
 	if [ -n "${4}" ]; then
-		conf_desc="${conf_desc}, ramdisk"
+		conf_desc="${conf_desc}${sep}ramdisk"
+		sep=", "
 		ramdisk_line="ramdisk = \"ramdisk@${4}\";"
 	fi
 
 	if [ -n "${5}" ]; then
-		conf_desc="${conf_desc}, setup"
+		conf_desc="${conf_desc}${sep}setup"
 		setup_line="setup = \"setup@${5}\";"
 	fi
 
@@ -289,18 +308,26 @@
 
 	if [ ! -z "${conf_sign_keyname}" ] ; then
 
-		sign_line="sign-images = \"kernel\""
+		sign_line="sign-images = "
+		sep=""
+
+		if [ -n "${2}" ]; then
+			sign_line="${sign_line}${sep}\"kernel\""
+			sep=", "
+		fi
 
 		if [ -n "${3}" ]; then
-			sign_line="${sign_line}, \"fdt\""
+			sign_line="${sign_line}${sep}\"fdt\""
+			sep=", "
 		fi
 
 		if [ -n "${4}" ]; then
-			sign_line="${sign_line}, \"ramdisk\""
+			sign_line="${sign_line}${sep}\"ramdisk\""
+			sep=", "
 		fi
 
 		if [ -n "${5}" ]; then
-			sign_line="${sign_line}, \"setup\""
+			sign_line="${sign_line}${sep}\"setup\""
 		fi
 
 		sign_line="${sign_line};"
@@ -377,7 +404,7 @@
 	#
 	if [ "x${ramdiskcount}" = "x1" ] ; then
 		# Find and use the first initramfs image archive type we find
-		for img in cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.gz cpio; do
+		for img in cpio.lz4 cpio.lzo cpio.lzma cpio.xz cpio.gz ext2.gz cpio; do
 			initramfs_path="${DEPLOY_DIR_IMAGE}/${INITRAMFS_IMAGE_NAME}.${img}"
 			echo "Using $initramfs_path"
 			if [ -e "${initramfs_path}" ]; then
@@ -403,7 +430,12 @@
 	if [ -n "${DTBS}" ]; then
 		i=1
 		for DTB in ${DTBS}; do
-			fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${setupcount}" "`expr ${i} = ${dtbcount}`"
+			dtb_ext=${DTB##*.}
+			if [ "${dtb_ext}" = "dtbo" ]; then
+				fitimage_emit_section_config ${1} "" "${DTB}" "" "" "`expr ${i} = ${dtbcount}`"
+			else
+				fitimage_emit_section_config ${1} "${kernelcount}" "${DTB}" "${ramdiskcount}" "${setupcount}" "`expr ${i} = ${dtbcount}`"
+			fi
 			i=`expr ${i} + 1`
 		done
 	fi
@@ -427,7 +459,7 @@
 		uboot-mkimage \
 			${@'-D "${UBOOT_MKIMAGE_DTCOPTS}"' if len('${UBOOT_MKIMAGE_DTCOPTS}') else ''} \
 			-F -k "${UBOOT_SIGN_KEYDIR}" \
-			-K "${DEPLOY_DIR_IMAGE}/${UBOOT_DTB_BINARY}" \
+			${@'-K "${DEPLOY_DIR_IMAGE}/${UBOOT_DTB_BINARY}"' if len('${UBOOT_DTB_BINARY}') else ''} \
 			-r arch/${ARCH}/boot/${2}
 	fi
 }
@@ -456,32 +488,22 @@
 kernel_do_deploy_append() {
 	# Update deploy directory
 	if echo ${KERNEL_IMAGETYPES} | grep -wq "fitImage"; then
-		cd ${B}
 		echo "Copying fit-image.its source file..."
-		its_base_name="fitImage-its-${PV}-${PR}-${MACHINE}-${DATETIME}"
-		its_symlink_name=fitImage-its-${MACHINE}
-		install -m 0644 fit-image.its ${DEPLOYDIR}/${its_base_name}.its
-		linux_bin_base_name="fitImage-linux.bin-${PV}-${PR}-${MACHINE}-${DATETIME}"
-		linux_bin_symlink_name=fitImage-linux.bin-${MACHINE}
-		install -m 0644 linux.bin ${DEPLOYDIR}/${linux_bin_base_name}.bin
+		install -m 0644 ${B}/fit-image.its ${DEPLOYDIR}/fitImage-its-${KERNEL_FIT_NAME}.its
+		ln -snf fitImage-its-${KERNEL_FIT_NAME}.its ${DEPLOYDIR}/fitImage-its-${KERNEL_FIT_LINK_NAME}
+
+		echo "Copying linux.bin file..."
+		install -m 0644 ${B}/linux.bin ${DEPLOYDIR}/fitImage-linux.bin-${KERNEL_FIT_NAME}.bin
+		ln -snf fitImage-linux.bin-${KERNEL_FIT_NAME}.bin ${DEPLOYDIR}/fitImage-linux.bin-${KERNEL_FIT_LINK_NAME}
 
 		if [ -n "${INITRAMFS_IMAGE}" ]; then
 			echo "Copying fit-image-${INITRAMFS_IMAGE}.its source file..."
-			its_initramfs_base_name="fitImage-its-${INITRAMFS_IMAGE_NAME}-${PV}-${PR}-${DATETIME}"
-			its_initramfs_symlink_name=fitImage-its-${INITRAMFS_IMAGE_NAME}
-			install -m 0644 fit-image-${INITRAMFS_IMAGE}.its ${DEPLOYDIR}/${its_initramfs_base_name}.its
-			fit_initramfs_base_name="fitImage-${INITRAMFS_IMAGE_NAME}-${PV}-${PR}-${DATETIME}"
-			fit_initramfs_symlink_name=fitImage-${INITRAMFS_IMAGE_NAME}
-			install -m 0644 arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} ${DEPLOYDIR}/${fit_initramfs_base_name}.bin
-		fi
+			install -m 0644 ${B}/fit-image-${INITRAMFS_IMAGE}.its ${DEPLOYDIR}/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its
+			ln -snf fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.its ${DEPLOYDIR}/fitImage-its-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}
 
-		cd ${DEPLOYDIR}
-		ln -sf ${its_base_name}.its ${its_symlink_name}.its
-		ln -sf ${linux_bin_base_name}.bin ${linux_bin_symlink_name}.bin
-
-		if [ -n "${INITRAMFS_IMAGE}" ]; then
-			ln -sf ${its_initramfs_base_name}.its ${its_initramfs_symlink_name}.its
-			ln -sf ${fit_initramfs_base_name}.bin ${fit_initramfs_symlink_name}.bin
+			echo "Copying fitImage-${INITRAMFS_IMAGE} file..."
+			install -m 0644 ${B}/arch/${ARCH}/boot/fitImage-${INITRAMFS_IMAGE} ${DEPLOYDIR}/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin
+			ln -snf fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_NAME}.bin ${DEPLOYDIR}/fitImage-${INITRAMFS_IMAGE_NAME}-${KERNEL_FIT_LINK_NAME}
 		fi
 	fi
 }
diff --git a/poky/meta/classes/kernel-uimage.bbclass b/poky/meta/classes/kernel-uimage.bbclass
index 1d8656e..c2de6bb 100644
--- a/poky/meta/classes/kernel-uimage.bbclass
+++ b/poky/meta/classes/kernel-uimage.bbclass
@@ -1,7 +1,7 @@
 inherit kernel-uboot
 
 python __anonymous () {
-    if "uImage" in (d.getVar('KERNEL_IMAGETYPES') or "").split():
+    if "uImage" in d.getVar('KERNEL_IMAGETYPES'):
         depends = d.getVar("DEPENDS")
         depends = "%s u-boot-mkimage-native" % depends
         d.setVar("DEPENDS", depends)
diff --git a/poky/meta/classes/kernel-yocto.bbclass b/poky/meta/classes/kernel-yocto.bbclass
index 82d8074..496c8a7 100644
--- a/poky/meta/classes/kernel-yocto.bbclass
+++ b/poky/meta/classes/kernel-yocto.bbclass
@@ -140,6 +140,8 @@
 			includes="$includes -I${WORKDIR}/$f/kernel-meta"
 	        elif [ -d "${WORKDIR}/$f" ]; then
 			includes="$includes -I${WORKDIR}/$f"
+		elif [ -d "${WORKDIR}/../oe-local-files/$f" ]; then
+			includes="$includes -I${WORKDIR}/../oe-local-files/$f"
 		fi
 	done
 	for s in ${sccs} ${patches}; do
@@ -319,7 +321,7 @@
 addtask kernel_configme before do_configure after do_patch
 
 python do_kernel_configcheck() {
-    import re, string, sys
+    import re, string, sys, subprocess
 
     # if KMETA isn't set globally by a recipe using this routine, we need to
     # set the default to 'meta'. Otherwise, kconf_check is not passed a valid
@@ -328,13 +330,24 @@
     if not os.path.exists(kmeta):
         kmeta = "." + kmeta
 
-    pathprefix = "export PATH=%s:%s; " % (d.getVar('PATH'), "${S}/scripts/util/")
+    s = d.getVar('S')
 
-    cmd = d.expand("scc --configs -o ${S}/.kernel-meta")
-    ret, configs = oe.utils.getstatusoutput("%s%s" % (pathprefix, cmd))
+    env = os.environ.copy()
+    env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/")
 
-    cmd = d.expand("cd ${S}; kconf_check --report -o ${S}/%s/cfg/ ${B}/.config ${S} %s" % (kmeta,configs))
-    ret, result = oe.utils.getstatusoutput("%s%s" % (pathprefix, cmd))
+    try:
+        configs = subprocess.check_output(['scc', '--configs', '-o', s + '/.kernel-meta'], env=env).decode('utf-8')
+    except subprocess.CalledProcessError as e:
+        bb.fatal( "Cannot gather config fragments for audit: %s" % e.output.decode("utf-8") )
+
+    try:
+        subprocess.check_call(['kconf_check', '--report', '-o',
+                '%s/%s/cfg' % (s, kmeta), d.getVar('B') + '/.config', s, configs], cwd=s, env=env)
+    except subprocess.CalledProcessError:
+        # The configuration gathering can return different exit codes, but
+        # we interpret them based on the KCONF_AUDIT_LEVEL variable, so we catch
+        # everything here, and let the run continue.
+        pass
 
     config_check_visibility = int(d.getVar("KCONF_AUDIT_LEVEL") or 0)
     bsp_check_visibility = int(d.getVar("KCONF_BSP_AUDIT_LEVEL") or 0)
@@ -346,6 +359,27 @@
             with open (mismatch_file, "r") as myfile:
                 results = myfile.read()
                 bb.warn( "[kernel config]: specified values did not make it into the kernel's final configuration:\n\n%s" % results)
+
+    if bsp_check_visibility:
+        invalid_file = d.expand("${S}/%s/cfg/invalid.cfg" % kmeta)
+        if os.path.exists(invalid_file) and os.stat(invalid_file).st_size > 0:
+            with open (invalid_file, "r") as myfile:
+                results = myfile.read()
+                bb.warn( "[kernel config]: This BSP sets config options that are not offered anywhere within this kernel:\n\n%s" % results)
+        errors_file = d.expand("${S}/%s/cfg/fragment_errors.txt" % kmeta)
+        if os.path.exists(errors_file) and os.stat(errors_file).st_size > 0:
+            with open (errors_file, "r") as myfile:
+               results = myfile.read()
+               bb.warn( "[kernel config]: This BSP contains fragments with errors:\n\n%s" % results)
+
+    # if the audit level is greater than two, we report if a fragment has overriden
+    # a value from a base fragment. This is really only used for new kernel introduction
+    if bsp_check_visibility > 2:
+        redefinition_file = d.expand("${S}/%s/cfg/redefinition.txt" % kmeta)
+        if os.path.exists(redefinition_file) and os.stat(redefinition_file).st_size > 0:
+            with open (redefinition_file, "r") as myfile:
+                results = myfile.read()
+                bb.warn( "[kernel config]: This BSP has configuration options defined in more than one config, with differing values:\n\n%s" % results)
 }
 
 # Ensure that the branches (BSP and meta) are on the locations specified by
diff --git a/poky/meta/classes/kernel.bbclass b/poky/meta/classes/kernel.bbclass
index 7278514..e04d2fe 100644
--- a/poky/meta/classes/kernel.bbclass
+++ b/poky/meta/classes/kernel.bbclass
@@ -62,31 +62,36 @@
     type = d.getVar('KERNEL_IMAGETYPE') or ""
     alttype = d.getVar('KERNEL_ALT_IMAGETYPE') or ""
     types = d.getVar('KERNEL_IMAGETYPES') or ""
-    kname = d.getVar('KERNEL_PACKAGE_NAME') or "kernel"
     if type not in types.split():
         types = (type + ' ' + types).strip()
     if alttype not in types.split():
         types = (alttype + ' ' + types).strip()
     d.setVar('KERNEL_IMAGETYPES', types)
 
-    # some commonly used kernel images aren't generated by the kernel build system, such as vmlinux.gz
-    # typeformake lists only valid kernel make targets, and post processing can be done after the kernel
-    # is built (such as using gzip to compress vmlinux)
-    typeformake = types.replace('vmlinux.gz', 'vmlinux')
-    d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', typeformake)
+    # KERNEL_IMAGETYPES may contain a mixture of image types supported directly
+    # by the kernel build system and types which are created by post-processing
+    # the output of the kernel build system (e.g. compressing vmlinux ->
+    # vmlinux.gz in kernel_do_compile()).
+    # KERNEL_IMAGETYPE_FOR_MAKE should contain only image types supported
+    # directly by the kernel build system.
+    if not d.getVar('KERNEL_IMAGETYPE_FOR_MAKE'):
+        typeformake = set()
+        for type in types.split():
+            if type == 'vmlinux.gz':
+                type = 'vmlinux'
+            typeformake.add(type)
+
+        d.setVar('KERNEL_IMAGETYPE_FOR_MAKE', ' '.join(sorted(typeformake)))
+
+    kname = d.getVar('KERNEL_PACKAGE_NAME') or "kernel"
+    imagedest = d.getVar('KERNEL_IMAGEDEST')
 
     for type in types.split():
         typelower = type.lower()
-        imagedest = d.getVar('KERNEL_IMAGEDEST')
-
         d.appendVar('PACKAGES', ' %s-image-%s' % (kname, typelower))
-
         d.setVar('FILES_' + kname + '-image-' + typelower, '/' + imagedest + '/' + type + '-${KERNEL_VERSION_NAME}' + ' /' + imagedest + '/' + type)
-
         d.appendVar('RDEPENDS_%s-image' % kname, ' %s-image-%s' % (kname, typelower))
-
         d.setVar('PKG_%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower))
-
         d.setVar('ALLOW_EMPTY_%s-image-%s' % (kname, typelower), '1')
 
     image = d.getVar('INITRAMFS_IMAGE')
@@ -159,7 +164,7 @@
 
 # The directory where built kernel lies in the kernel tree
 KERNEL_OUTPUT_DIR ?= "arch/${ARCH}/boot"
-KERNEL_IMAGEDEST = "boot"
+KERNEL_IMAGEDEST ?= "boot"
 
 #
 # configuration
@@ -224,39 +229,37 @@
 	echo "Finished copy of initramfs into ./usr"
 }
 
-INITRAMFS_BASE_NAME ?= "initramfs-${PV}-${PR}-${MACHINE}-${DATETIME}"
-INITRAMFS_BASE_NAME[vardepsexclude] = "DATETIME"
 do_bundle_initramfs () {
 	if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
 		echo "Creating a kernel image with a bundled initramfs..."
 		copy_initramfs
 		# Backing up kernel image relies on its type(regular file or symbolic link)
 		tmp_path=""
-		for type in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
-			if [ -h ${KERNEL_OUTPUT_DIR}/$type ] ; then
-				linkpath=`readlink -n ${KERNEL_OUTPUT_DIR}/$type`
-				realpath=`readlink -fn ${KERNEL_OUTPUT_DIR}/$type`
+		for imageType in ${KERNEL_IMAGETYPE_FOR_MAKE} ; do
+			if [ -h ${KERNEL_OUTPUT_DIR}/$imageType ] ; then
+				linkpath=`readlink -n ${KERNEL_OUTPUT_DIR}/$imageType`
+				realpath=`readlink -fn ${KERNEL_OUTPUT_DIR}/$imageType`
 				mv -f $realpath $realpath.bak
-				tmp_path=$tmp_path" "$type"#"$linkpath"#"$realpath
-			elif [ -f ${KERNEL_OUTPUT_DIR}/$type ]; then
-				mv -f ${KERNEL_OUTPUT_DIR}/$type ${KERNEL_OUTPUT_DIR}/$type.bak
-				tmp_path=$tmp_path" "$type"##"
+				tmp_path=$tmp_path" "$imageType"#"$linkpath"#"$realpath
+			elif [ -f ${KERNEL_OUTPUT_DIR}/$imageType ]; then
+				mv -f ${KERNEL_OUTPUT_DIR}/$imageType ${KERNEL_OUTPUT_DIR}/$imageType.bak
+				tmp_path=$tmp_path" "$imageType"##"
 			fi
 		done
 		use_alternate_initrd=CONFIG_INITRAMFS_SOURCE=${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio
 		kernel_do_compile
 		# Restoring kernel image
 		for tp in $tmp_path ; do
-			type=`echo $tp|cut -d "#" -f 1`
+			imageType=`echo $tp|cut -d "#" -f 1`
 			linkpath=`echo $tp|cut -d "#" -f 2`
 			realpath=`echo $tp|cut -d "#" -f 3`
 			if [ -n "$realpath" ]; then
 				mv -f $realpath $realpath.initramfs
 				mv -f $realpath.bak $realpath
-				ln -sf $linkpath.initramfs ${B}/${KERNEL_OUTPUT_DIR}/$type.initramfs
+				ln -sf $linkpath.initramfs ${B}/${KERNEL_OUTPUT_DIR}/$imageType.initramfs
 			else
-				mv -f ${KERNEL_OUTPUT_DIR}/$type ${KERNEL_OUTPUT_DIR}/$type.initramfs
-				mv -f ${KERNEL_OUTPUT_DIR}/$type.bak ${KERNEL_OUTPUT_DIR}/$type
+				mv -f ${KERNEL_OUTPUT_DIR}/$imageType ${KERNEL_OUTPUT_DIR}/$imageType.initramfs
+				mv -f ${KERNEL_OUTPUT_DIR}/$imageType.bak ${KERNEL_OUTPUT_DIR}/$imageType
 			fi
 		done
 	fi
@@ -283,7 +286,7 @@
 	if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then
 		# kernel sources do not use do_unpack, so SOURCE_DATE_EPOCH may not
 		# be set....
-		if [ "$SOURCE_DATE_EPOCH" = "0" ]; then
+		if [ "${SOURCE_DATE_EPOCH}" = "" -o "${SOURCE_DATE_EPOCH}" = "0" ]; then
 			olddir=`pwd`
 			cd ${S}
 			SOURCE_DATE_EPOCH=`git log  -1 --pretty=%ct`
@@ -362,10 +365,10 @@
 	#
 	install -d ${D}/${KERNEL_IMAGEDEST}
 	install -d ${D}/boot
-	for type in ${KERNEL_IMAGETYPES} ; do
-		install -m 0644 ${KERNEL_OUTPUT_DIR}/${type} ${D}/${KERNEL_IMAGEDEST}/${type}-${KERNEL_VERSION}
+	for imageType in ${KERNEL_IMAGETYPES} ; do
+		install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} ${D}/${KERNEL_IMAGEDEST}/${imageType}-${KERNEL_VERSION}
 		if [ "${KERNEL_PACKAGE_NAME}" = "kernel" ]; then
-			ln -sf ${type}-${KERNEL_VERSION} ${D}/${KERNEL_IMAGEDEST}/${type}
+			ln -sf ${imageType}-${KERNEL_VERSION} ${D}/${KERNEL_IMAGEDEST}/${imageType}
 		fi
 	done
 	install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION}
@@ -637,10 +640,10 @@
 			die "Invalid KERNEL_IMAGE_MAXSIZE: ${KERNEL_IMAGE_MAXSIZE}, should be an integer (The unit is Kbytes)"
 		fi
 		at_least_one_fits=
-		for type in ${KERNEL_IMAGETYPES} ; do
-			size=`du -ks ${B}/${KERNEL_OUTPUT_DIR}/$type | awk '{print $1}'`
+		for imageType in ${KERNEL_IMAGETYPES} ; do
+			size=`du -ks ${B}/${KERNEL_OUTPUT_DIR}/$imageType | awk '{print $1}'`
 			if [ $size -ge ${KERNEL_IMAGE_MAXSIZE} ]; then
-				bbwarn "This kernel $type (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device."
+				bbwarn "This kernel $imageType (size=$size(K) > ${KERNEL_IMAGE_MAXSIZE}(K)) is too big for your device."
 			else
 				at_least_one_fits=y
 			fi
@@ -654,16 +657,7 @@
 
 addtask sizecheck before do_install after do_strip
 
-KERNEL_IMAGE_BASE_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}-${DATETIME}"
-# Don't include the DATETIME variable in the sstate package signatures
-KERNEL_IMAGE_BASE_NAME[vardepsexclude] = "DATETIME"
-KERNEL_IMAGE_SYMLINK_NAME ?= "${MACHINE}"
-MODULE_IMAGE_BASE_NAME ?= "modules-${PKGE}-${PKGV}-${PKGR}-${MACHINE}-${DATETIME}"
-MODULE_IMAGE_BASE_NAME[vardepsexclude] = "DATETIME"
-MODULE_TARBALL_BASE_NAME ?= "${MODULE_IMAGE_BASE_NAME}.tgz"
-# Don't include the DATETIME variable in the sstate package signatures
-MODULE_TARBALL_SYMLINK_NAME ?= "modules-${MACHINE}.tgz"
-MODULE_TARBALL_DEPLOY ?= "1"
+inherit kernel-artifact-names
 
 kernel_do_deploy() {
 	deployDir="${DEPLOYDIR}"
@@ -672,34 +666,28 @@
 		mkdir "$deployDir"
 	fi
 
-	for type in ${KERNEL_IMAGETYPES} ; do
-		base_name=${type}-${KERNEL_IMAGE_BASE_NAME}
-		install -m 0644 ${KERNEL_OUTPUT_DIR}/${type} $deployDir/${base_name}.bin
+	for imageType in ${KERNEL_IMAGETYPES} ; do
+		base_name=${imageType}-${KERNEL_IMAGE_NAME}
+		install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} $deployDir/${base_name}.bin
+		symlink_name=${imageType}-${KERNEL_IMAGE_LINK_NAME}
+		ln -sf ${base_name}.bin $deployDir/${symlink_name}.bin
+		ln -sf ${base_name}.bin $deployDir/${imageType}
 	done
+
 	if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then
-		mkdir -p ${D}/lib
-		tar -cvzf $deployDir/${MODULE_TARBALL_BASE_NAME} -C ${D} lib
-		ln -sf ${MODULE_TARBALL_BASE_NAME} $deployDir/${MODULE_TARBALL_SYMLINK_NAME}
+		mkdir -p ${D}${root_prefix}/lib
+		tar -cvzf $deployDir/modules-${MODULE_TARBALL_NAME}.tgz -C ${D}${root_prefix} lib
+		ln -sf modules-${MODULE_TARBALL_NAME}.tgz $deployDir/modules-${MODULE_TARBALL_LINK_NAME}.tgz
 	fi
 
-	for type in ${KERNEL_IMAGETYPES} ; do
-		base_name=${type}-${KERNEL_IMAGE_BASE_NAME}
-		symlink_name=${type}-${KERNEL_IMAGE_SYMLINK_NAME}
-		ln -sf ${base_name}.bin $deployDir/${symlink_name}.bin
-		ln -sf ${base_name}.bin $deployDir/${type}
-	done
-
-	cd ${B}
-	# Update deploy directory
-	for type in ${KERNEL_IMAGETYPES} ; do
-		if [ -e "${KERNEL_OUTPUT_DIR}/${type}.initramfs" ]; then
-			echo "Copying deploy ${type} kernel-initramfs image and setting up links..."
-			initramfs_base_name=${type}-${INITRAMFS_BASE_NAME}
-			initramfs_symlink_name=${type}-initramfs-${MACHINE}
-			install -m 0644 ${KERNEL_OUTPUT_DIR}/${type}.initramfs $deployDir/${initramfs_base_name}.bin
+	if [ ! -z "${INITRAMFS_IMAGE}" -a x"${INITRAMFS_IMAGE_BUNDLE}" = x1 ]; then
+		for imageType in ${KERNEL_IMAGETYPES} ; do
+			initramfs_base_name=${imageType}-${INITRAMFS_NAME}
+			initramfs_symlink_name=${imageType}-${INITRAMFS_LINK_NAME}
+			install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType}.initramfs $deployDir/${initramfs_base_name}.bin
 			ln -sf ${initramfs_base_name}.bin $deployDir/${initramfs_symlink_name}.bin
-		fi
-	done
+		done
+	fi
 }
 do_deploy[cleandirs] = "${DEPLOYDIR}"
 do_deploy[dirs] = "${DEPLOYDIR} ${B}"
diff --git a/poky/meta/classes/libc-common.bbclass b/poky/meta/classes/libc-common.bbclass
index 9ea2c03..0e351b6 100644
--- a/poky/meta/classes/libc-common.bbclass
+++ b/poky/meta/classes/libc-common.bbclass
@@ -1,9 +1,5 @@
 do_install() {
 	oe_runmake install_root=${D} install
-	for r in ${rpcsvc}; do
-		h=`echo $r|sed -e's,\.x$,.h,'`
-		install -m 0644 ${S}/sunrpc/rpcsvc/$h ${D}/${includedir}/rpcsvc/
-	done
 	install -Dm 0644 ${WORKDIR}/etc/ld.so.conf ${D}/${sysconfdir}/ld.so.conf
 	install -d ${D}${localedir}
 	make -f ${WORKDIR}/generate-supported.mk IN="${S}/localedata/SUPPORTED" OUT="${WORKDIR}/SUPPORTED"
diff --git a/poky/meta/classes/libc-package.bbclass b/poky/meta/classes/libc-package.bbclass
index 2e7cd25..9d09c7b 100644
--- a/poky/meta/classes/libc-package.bbclass
+++ b/poky/meta/classes/libc-package.bbclass
@@ -257,6 +257,8 @@
                 "mipsisa32r6el":  " --uint32-align=4 --little-endian ", \
                 "mips64el":" --uint32-align=4 --little-endian ", \
                 "mipsisa64r6el":" --uint32-align=4 --little-endian ", \
+                "riscv64": " --uint32-align=4 --little-endian ", \
+                "riscv32": " --uint32-align=4 --little-endian ", \
                 "i586":    " --uint32-align=4 --little-endian ", \
                 "i686":    " --uint32-align=4 --little-endian ", \
                 "x86_64":  " --uint32-align=4 --little-endian "  }
diff --git a/poky/meta/classes/license.bbclass b/poky/meta/classes/license.bbclass
index 5103ed8..4cf7f07 100644
--- a/poky/meta/classes/license.bbclass
+++ b/poky/meta/classes/license.bbclass
@@ -16,284 +16,6 @@
 do_populate_lic[dirs] = "${LICSSTATEDIR}/${PN}"
 do_populate_lic[cleandirs] = "${LICSSTATEDIR}"
 
-python write_package_manifest() {
-    # Get list of installed packages
-    license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}')
-    bb.utils.mkdirhier(license_image_dir)
-    from oe.rootfs import image_list_installed_packages
-    from oe.utils import format_pkg_list
-
-    pkgs = image_list_installed_packages(d)
-    output = format_pkg_list(pkgs)
-    open(os.path.join(license_image_dir, 'package.manifest'),
-        'w+').write(output)
-}
-
-python write_deploy_manifest() {
-    license_deployed_manifest(d)
-}
-
-python license_create_manifest() {
-    import oe.packagedata
-    from oe.rootfs import image_list_installed_packages
-
-    build_images_from_feeds = d.getVar('BUILD_IMAGES_FROM_FEEDS')
-    if build_images_from_feeds == "1":
-        return 0
-
-    pkg_dic = {}
-    for pkg in sorted(image_list_installed_packages(d)):
-        pkg_info = os.path.join(d.getVar('PKGDATA_DIR'),
-                                'runtime-reverse', pkg)
-        pkg_name = os.path.basename(os.readlink(pkg_info))
-
-        pkg_dic[pkg_name] = oe.packagedata.read_pkgdatafile(pkg_info)
-        if not "LICENSE" in pkg_dic[pkg_name].keys():
-            pkg_lic_name = "LICENSE_" + pkg_name
-            pkg_dic[pkg_name]["LICENSE"] = pkg_dic[pkg_name][pkg_lic_name]
-
-    rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY'),
-                        d.getVar('IMAGE_NAME'), 'license.manifest')
-    write_license_files(d, rootfs_license_manifest, pkg_dic)
-}
-
-def write_license_files(d, license_manifest, pkg_dic):
-    import re
-
-    bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE") or "").split()
-    bad_licenses = map(lambda l: canonical_license(d, l), bad_licenses)
-    bad_licenses = expand_wildcard_licenses(d, bad_licenses)
-
-    with open(license_manifest, "w") as license_file:
-        for pkg in sorted(pkg_dic):
-            if bad_licenses:
-                try:
-                    (pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
-                        oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
-                        bad_licenses, canonical_license, d)
-                except oe.license.LicenseError as exc:
-                    bb.fatal('%s: %s' % (d.getVar('P'), exc))
-            else:
-                pkg_dic[pkg]["LICENSES"] = re.sub('[|&()*]', ' ', pkg_dic[pkg]["LICENSE"])
-                pkg_dic[pkg]["LICENSES"] = re.sub('  *', ' ', pkg_dic[pkg]["LICENSES"])
-                pkg_dic[pkg]["LICENSES"] = pkg_dic[pkg]["LICENSES"].split()
-
-            if not "IMAGE_MANIFEST" in pkg_dic[pkg]:
-                # Rootfs manifest
-                license_file.write("PACKAGE NAME: %s\n" % pkg)
-                license_file.write("PACKAGE VERSION: %s\n" % pkg_dic[pkg]["PV"])
-                license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
-                license_file.write("LICENSE: %s\n\n" % pkg_dic[pkg]["LICENSE"])
-
-                # If the package doesn't contain any file, that is, its size is 0, the license
-                # isn't relevant as far as the final image is concerned. So doing license check
-                # doesn't make much sense, skip it.
-                if pkg_dic[pkg]["PKGSIZE_%s" % pkg] == "0":
-                    continue
-            else:
-                # Image manifest
-                license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
-                license_file.write("VERSION: %s\n" % pkg_dic[pkg]["PV"])
-                license_file.write("LICENSE: %s\n" % pkg_dic[pkg]["LICENSE"])
-                license_file.write("FILES: %s\n\n" % pkg_dic[pkg]["FILES"])
-
-            for lic in pkg_dic[pkg]["LICENSES"]:
-                lic_file = os.path.join(d.getVar('LICENSE_DIRECTORY'),
-                                        pkg_dic[pkg]["PN"], "generic_%s" % 
-                                        re.sub('\+', '', lic))
-                # add explicity avoid of CLOSED license because isn't generic
-                if lic == "CLOSED":
-                   continue
-
-                if not os.path.exists(lic_file):
-                   bb.warn("The license listed %s was not in the "\ 
-                            "licenses collected for recipe %s" 
-                            % (lic, pkg_dic[pkg]["PN"]))
-
-    # Two options here:
-    # - Just copy the manifest
-    # - Copy the manifest and the license directories
-    # With both options set we see a .5 M increase in core-image-minimal
-    copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST')
-    copy_lic_dirs = d.getVar('COPY_LIC_DIRS')
-    if copy_lic_manifest == "1":
-        rootfs_license_dir = os.path.join(d.getVar('IMAGE_ROOTFS'), 
-                                'usr', 'share', 'common-licenses')
-        bb.utils.mkdirhier(rootfs_license_dir)
-        rootfs_license_manifest = os.path.join(rootfs_license_dir,
-                os.path.split(license_manifest)[1])
-        if not os.path.exists(rootfs_license_manifest):
-            os.link(license_manifest, rootfs_license_manifest)
-
-        if copy_lic_dirs == "1":
-            for pkg in sorted(pkg_dic):
-                pkg_rootfs_license_dir = os.path.join(rootfs_license_dir, pkg)
-                bb.utils.mkdirhier(pkg_rootfs_license_dir)
-                pkg_license_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
-                                            pkg_dic[pkg]["PN"]) 
-
-                pkg_manifest_licenses = [canonical_license(d, lic) \
-                        for lic in pkg_dic[pkg]["LICENSES"]]
-
-                licenses = os.listdir(pkg_license_dir)
-                for lic in licenses:
-                    rootfs_license = os.path.join(rootfs_license_dir, lic)
-                    pkg_license = os.path.join(pkg_license_dir, lic)
-                    pkg_rootfs_license = os.path.join(pkg_rootfs_license_dir, lic)
-
-                    if re.match("^generic_.*$", lic):
-                        generic_lic = canonical_license(d,
-                                re.search("^generic_(.*)$", lic).group(1))
-
-                        # Do not copy generic license into package if isn't
-                        # declared into LICENSES of the package.
-                        if not re.sub('\+$', '', generic_lic) in \
-                                [re.sub('\+', '', lic) for lic in \
-                                 pkg_manifest_licenses]:
-                            continue
-
-                        if oe.license.license_ok(generic_lic,
-                                bad_licenses) == False:
-                            continue
-
-                        if not os.path.exists(rootfs_license):
-                            os.link(pkg_license, rootfs_license)
-
-                        if not os.path.exists(pkg_rootfs_license):
-                            os.symlink(os.path.join('..', lic), pkg_rootfs_license)
-                    else:
-                        if (oe.license.license_ok(canonical_license(d,
-                                lic), bad_licenses) == False or
-                                os.path.exists(pkg_rootfs_license)):
-                            continue
-
-                        os.link(pkg_license, pkg_rootfs_license)
-
-
-def license_deployed_manifest(d):
-    """
-    Write the license manifest for the deployed recipes.
-    The deployed recipes usually includes the bootloader
-    and extra files to boot the target.
-    """
-
-    dep_dic = {}
-    man_dic = {}
-    lic_dir = d.getVar("LICENSE_DIRECTORY")
-
-    dep_dic = get_deployed_dependencies(d)
-    for dep in dep_dic.keys():
-        man_dic[dep] = {}
-        # It is necessary to mark this will be used for image manifest
-        man_dic[dep]["IMAGE_MANIFEST"] = True
-        man_dic[dep]["PN"] = dep
-        man_dic[dep]["FILES"] = \
-            " ".join(get_deployed_files(dep_dic[dep]))
-        with open(os.path.join(lic_dir, dep, "recipeinfo"), "r") as f:
-            for line in f.readlines():
-                key,val = line.split(": ", 1)
-                man_dic[dep][key] = val[:-1]
-
-    lic_manifest_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
-                                    d.getVar('IMAGE_NAME'))
-    bb.utils.mkdirhier(lic_manifest_dir)
-    image_license_manifest = os.path.join(lic_manifest_dir, 'image_license.manifest')
-    write_license_files(d, image_license_manifest, man_dic)
-
-def get_deployed_dependencies(d):
-    """
-    Get all the deployed dependencies of an image
-    """
-
-    deploy = {}
-    # Get all the dependencies for the current task (rootfs).
-    # Also get EXTRA_IMAGEDEPENDS because the bootloader is
-    # usually in this var and not listed in rootfs.
-    # At last, get the dependencies from boot classes because
-    # it might contain the bootloader.
-    taskdata = d.getVar("BB_TASKDEPDATA", False)
-    depends = list(set([dep[0] for dep
-                    in list(taskdata.values())
-                    if not dep[0].endswith("-native")]))
-    extra_depends = d.getVar("EXTRA_IMAGEDEPENDS")
-    boot_depends = get_boot_dependencies(d)
-    depends.extend(extra_depends.split())
-    depends.extend(boot_depends)
-    depends = list(set(depends))
-
-    # To verify what was deployed it checks the rootfs dependencies against
-    # the SSTATE_MANIFESTS for "deploy" task.
-    # The manifest file name contains the arch. Because we are not running
-    # in the recipe context it is necessary to check every arch used.
-    sstate_manifest_dir = d.getVar("SSTATE_MANIFESTS")
-    archs = list(set(d.getVar("SSTATE_ARCHS").split()))
-    for dep in depends:
-        # Some recipes have an arch on their own, so we try that first.
-        special_arch = d.getVar("PACKAGE_ARCH_pn-%s" % dep)
-        if special_arch:
-            sstate_manifest_file = os.path.join(sstate_manifest_dir,
-                    "manifest-%s-%s.deploy" % (special_arch, dep))
-            if os.path.exists(sstate_manifest_file):
-                deploy[dep] = sstate_manifest_file
-                continue
-
-        for arch in archs:
-            sstate_manifest_file = os.path.join(sstate_manifest_dir,
-                    "manifest-%s-%s.deploy" % (arch, dep))
-            if os.path.exists(sstate_manifest_file):
-                deploy[dep] = sstate_manifest_file
-                break
-
-    return deploy
-get_deployed_dependencies[vardepsexclude] = "BB_TASKDEPDATA"
-
-def get_boot_dependencies(d):
-    """
-    Return the dependencies from boot tasks
-    """
-
-    depends = []
-    taskdepdata = d.getVar("BB_TASKDEPDATA", False)
-    # Only bootimg includes the depends flag
-    boot_depends_string = d.getVarFlag("do_bootimg", "depends") or ""
-    boot_depends = [dep.split(":")[0] for dep
-                in boot_depends_string.split()
-                if not dep.split(":")[0].endswith("-native")]
-    for dep in boot_depends:
-        info_file = os.path.join(d.getVar("LICENSE_DIRECTORY"),
-                dep, "recipeinfo")
-        # If the recipe and dependency name is the same
-        if os.path.exists(info_file):
-            depends.append(dep)
-        # We need to search for the provider of the dependency
-        else:
-            for taskdep in taskdepdata.values():
-                # The fifth field contains what the task provides
-                if dep in taskdep[4]:
-                    info_file = os.path.join(
-                            d.getVar("LICENSE_DIRECTORY"),
-                            taskdep[0], "recipeinfo")
-                    if os.path.exists(info_file):
-                        depends.append(taskdep[0])
-                        break
-    return depends
-get_boot_dependencies[vardepsexclude] = "BB_TASKDEPDATA"
-
-def get_deployed_files(man_file):
-    """
-    Get the files deployed from the sstate manifest
-    """
-
-    dep_files = []
-    excluded_files = []
-    with open(man_file, "r") as manifest:
-        all_files = manifest.read()
-    for f in all_files.splitlines():
-        if ((not (os.path.islink(f) or os.path.isdir(f))) and
-                not os.path.basename(f) in excluded_files):
-            dep_files.append(os.path.basename(f))
-    return dep_files
-
 python do_populate_lic() {
     """
     Populate LICENSE_DIRECTORY with licenses.
@@ -340,6 +62,9 @@
         # first in PACKAGES to be sure that nothing else gets LICENSE_FILES_DIRECTORY
         d.setVar('PACKAGES', "%s %s" % (pn_lic, packages))
         d.setVar('FILES_' + pn_lic, files)
+    for pn in packages.split():
+        if pn == pn_lic:
+            continue
         rrecommends_pn = d.getVar('RRECOMMENDS_' + pn)
         if rrecommends_pn:
             d.setVar('RRECOMMENDS_' + pn, "%s %s" % (pn_lic, rrecommends_pn))
@@ -674,11 +399,7 @@
 do_populate_lic[sstate-inputdirs] = "${LICSSTATEDIR}"
 do_populate_lic[sstate-outputdirs] = "${LICENSE_DIRECTORY}/"
 
-ROOTFS_POSTPROCESS_COMMAND_prepend = "write_package_manifest; license_create_manifest; "
-do_rootfs[recrdeptask] += "do_populate_lic"
-
-IMAGE_POSTPROCESS_COMMAND_prepend = "write_deploy_manifest; "
-do_image[recrdeptask] += "do_populate_lic"
+IMAGE_CLASSES_append = " license_image"
 
 python do_populate_lic_setscene () {
     sstate_setscene(d)
diff --git a/poky/meta/classes/license_image.bbclass b/poky/meta/classes/license_image.bbclass
new file mode 100644
index 0000000..f0fbb76
--- /dev/null
+++ b/poky/meta/classes/license_image.bbclass
@@ -0,0 +1,238 @@
+python write_package_manifest() {
+    # Get list of installed packages
+    license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}')
+    bb.utils.mkdirhier(license_image_dir)
+    from oe.rootfs import image_list_installed_packages
+    from oe.utils import format_pkg_list
+
+    pkgs = image_list_installed_packages(d)
+    output = format_pkg_list(pkgs)
+    open(os.path.join(license_image_dir, 'package.manifest'),
+        'w+').write(output)
+}
+
+python license_create_manifest() {
+    import oe.packagedata
+    from oe.rootfs import image_list_installed_packages
+
+    build_images_from_feeds = d.getVar('BUILD_IMAGES_FROM_FEEDS')
+    if build_images_from_feeds == "1":
+        return 0
+
+    pkg_dic = {}
+    for pkg in sorted(image_list_installed_packages(d)):
+        pkg_info = os.path.join(d.getVar('PKGDATA_DIR'),
+                                'runtime-reverse', pkg)
+        pkg_name = os.path.basename(os.readlink(pkg_info))
+
+        pkg_dic[pkg_name] = oe.packagedata.read_pkgdatafile(pkg_info)
+        if not "LICENSE" in pkg_dic[pkg_name].keys():
+            pkg_lic_name = "LICENSE_" + pkg_name
+            pkg_dic[pkg_name]["LICENSE"] = pkg_dic[pkg_name][pkg_lic_name]
+
+    rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY'),
+                        d.getVar('IMAGE_NAME'), 'license.manifest')
+    write_license_files(d, rootfs_license_manifest, pkg_dic)
+}
+
+def write_license_files(d, license_manifest, pkg_dic):
+    import re
+
+    bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE") or "").split()
+    bad_licenses = map(lambda l: canonical_license(d, l), bad_licenses)
+    bad_licenses = expand_wildcard_licenses(d, bad_licenses)
+
+    with open(license_manifest, "w") as license_file:
+        for pkg in sorted(pkg_dic):
+            if bad_licenses:
+                try:
+                    (pkg_dic[pkg]["LICENSE"], pkg_dic[pkg]["LICENSES"]) = \
+                        oe.license.manifest_licenses(pkg_dic[pkg]["LICENSE"],
+                        bad_licenses, canonical_license, d)
+                except oe.license.LicenseError as exc:
+                    bb.fatal('%s: %s' % (d.getVar('P'), exc))
+            else:
+                pkg_dic[pkg]["LICENSES"] = re.sub('[|&()*]', ' ', pkg_dic[pkg]["LICENSE"])
+                pkg_dic[pkg]["LICENSES"] = re.sub('  *', ' ', pkg_dic[pkg]["LICENSES"])
+                pkg_dic[pkg]["LICENSES"] = pkg_dic[pkg]["LICENSES"].split()
+
+            if not "IMAGE_MANIFEST" in pkg_dic[pkg]:
+                # Rootfs manifest
+                license_file.write("PACKAGE NAME: %s\n" % pkg)
+                license_file.write("PACKAGE VERSION: %s\n" % pkg_dic[pkg]["PV"])
+                license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
+                license_file.write("LICENSE: %s\n\n" % pkg_dic[pkg]["LICENSE"])
+
+                # If the package doesn't contain any file, that is, its size is 0, the license
+                # isn't relevant as far as the final image is concerned. So doing license check
+                # doesn't make much sense, skip it.
+                if pkg_dic[pkg]["PKGSIZE_%s" % pkg] == "0":
+                    continue
+            else:
+                # Image manifest
+                license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
+                license_file.write("VERSION: %s\n" % pkg_dic[pkg]["PV"])
+                license_file.write("LICENSE: %s\n" % pkg_dic[pkg]["LICENSE"])
+                license_file.write("FILES: %s\n\n" % pkg_dic[pkg]["FILES"])
+
+            for lic in pkg_dic[pkg]["LICENSES"]:
+                lic_file = os.path.join(d.getVar('LICENSE_DIRECTORY'),
+                                        pkg_dic[pkg]["PN"], "generic_%s" % 
+                                        re.sub('\+', '', lic))
+                # add explicity avoid of CLOSED license because isn't generic
+                if lic == "CLOSED":
+                   continue
+
+                if not os.path.exists(lic_file):
+                   bb.warn("The license listed %s was not in the "\ 
+                            "licenses collected for recipe %s" 
+                            % (lic, pkg_dic[pkg]["PN"]))
+
+    # Two options here:
+    # - Just copy the manifest
+    # - Copy the manifest and the license directories
+    # With both options set we see a .5 M increase in core-image-minimal
+    copy_lic_manifest = d.getVar('COPY_LIC_MANIFEST')
+    copy_lic_dirs = d.getVar('COPY_LIC_DIRS')
+    if copy_lic_manifest == "1":
+        rootfs_license_dir = os.path.join(d.getVar('IMAGE_ROOTFS'), 
+                                'usr', 'share', 'common-licenses')
+        bb.utils.mkdirhier(rootfs_license_dir)
+        rootfs_license_manifest = os.path.join(rootfs_license_dir,
+                os.path.split(license_manifest)[1])
+        if not os.path.exists(rootfs_license_manifest):
+            os.link(license_manifest, rootfs_license_manifest)
+
+        if copy_lic_dirs == "1":
+            for pkg in sorted(pkg_dic):
+                pkg_rootfs_license_dir = os.path.join(rootfs_license_dir, pkg)
+                bb.utils.mkdirhier(pkg_rootfs_license_dir)
+                pkg_license_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
+                                            pkg_dic[pkg]["PN"]) 
+
+                pkg_manifest_licenses = [canonical_license(d, lic) \
+                        for lic in pkg_dic[pkg]["LICENSES"]]
+
+                licenses = os.listdir(pkg_license_dir)
+                for lic in licenses:
+                    rootfs_license = os.path.join(rootfs_license_dir, lic)
+                    pkg_license = os.path.join(pkg_license_dir, lic)
+                    pkg_rootfs_license = os.path.join(pkg_rootfs_license_dir, lic)
+
+                    if re.match("^generic_.*$", lic):
+                        generic_lic = canonical_license(d,
+                                re.search("^generic_(.*)$", lic).group(1))
+
+                        # Do not copy generic license into package if isn't
+                        # declared into LICENSES of the package.
+                        if not re.sub('\+$', '', generic_lic) in \
+                                [re.sub('\+', '', lic) for lic in \
+                                 pkg_manifest_licenses]:
+                            continue
+
+                        if oe.license.license_ok(generic_lic,
+                                bad_licenses) == False:
+                            continue
+
+                        if not os.path.exists(rootfs_license):
+                            os.link(pkg_license, rootfs_license)
+
+                        if not os.path.exists(pkg_rootfs_license):
+                            os.symlink(os.path.join('..', lic), pkg_rootfs_license)
+                    else:
+                        if (oe.license.license_ok(canonical_license(d,
+                                lic), bad_licenses) == False or
+                                os.path.exists(pkg_rootfs_license)):
+                            continue
+
+                        os.link(pkg_license, pkg_rootfs_license)
+
+
+def license_deployed_manifest(d):
+    """
+    Write the license manifest for the deployed recipes.
+    The deployed recipes usually includes the bootloader
+    and extra files to boot the target.
+    """
+
+    dep_dic = {}
+    man_dic = {}
+    lic_dir = d.getVar("LICENSE_DIRECTORY")
+
+    dep_dic = get_deployed_dependencies(d)
+    for dep in dep_dic.keys():
+        man_dic[dep] = {}
+        # It is necessary to mark this will be used for image manifest
+        man_dic[dep]["IMAGE_MANIFEST"] = True
+        man_dic[dep]["PN"] = dep
+        man_dic[dep]["FILES"] = \
+            " ".join(get_deployed_files(dep_dic[dep]))
+        with open(os.path.join(lic_dir, dep, "recipeinfo"), "r") as f:
+            for line in f.readlines():
+                key,val = line.split(": ", 1)
+                man_dic[dep][key] = val[:-1]
+
+    lic_manifest_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'),
+                                    d.getVar('IMAGE_NAME'))
+    bb.utils.mkdirhier(lic_manifest_dir)
+    image_license_manifest = os.path.join(lic_manifest_dir, 'image_license.manifest')
+    write_license_files(d, image_license_manifest, man_dic)
+
+def get_deployed_dependencies(d):
+    """
+    Get all the deployed dependencies of an image
+    """
+
+    deploy = {}
+    # Get all the dependencies for the current task (rootfs).
+    # Also get EXTRA_IMAGEDEPENDS because the bootloader is
+    # usually in this var and not listed in rootfs.
+    # At last, get the dependencies from boot classes because
+    # it might contain the bootloader.
+    taskdata = d.getVar("BB_TASKDEPDATA", False)
+    depends = list(set([dep[0] for dep
+                    in list(taskdata.values())
+                    if not dep[0].endswith("-native")]))
+
+    # To verify what was deployed it checks the rootfs dependencies against
+    # the SSTATE_MANIFESTS for "deploy" task.
+    # The manifest file name contains the arch. Because we are not running
+    # in the recipe context it is necessary to check every arch used.
+    sstate_manifest_dir = d.getVar("SSTATE_MANIFESTS")
+    archs = list(set(d.getVar("SSTATE_ARCHS").split()))
+    for dep in depends:
+        for arch in archs:
+            sstate_manifest_file = os.path.join(sstate_manifest_dir,
+                    "manifest-%s-%s.deploy" % (arch, dep))
+            if os.path.exists(sstate_manifest_file):
+                deploy[dep] = sstate_manifest_file
+                break
+
+    return deploy
+get_deployed_dependencies[vardepsexclude] = "BB_TASKDEPDATA"
+
+def get_deployed_files(man_file):
+    """
+    Get the files deployed from the sstate manifest
+    """
+
+    dep_files = []
+    excluded_files = []
+    with open(man_file, "r") as manifest:
+        all_files = manifest.read()
+    for f in all_files.splitlines():
+        if ((not (os.path.islink(f) or os.path.isdir(f))) and
+                not os.path.basename(f) in excluded_files):
+            dep_files.append(os.path.basename(f))
+    return dep_files
+
+ROOTFS_POSTPROCESS_COMMAND_prepend = "write_package_manifest; license_create_manifest; "
+do_rootfs[recrdeptask] += "do_populate_lic"
+
+python do_populate_lic_deploy() {
+    license_deployed_manifest(d)
+}
+
+addtask populate_lic_deploy before do_build after do_image_complete
+do_populate_lic_deploy[recrdeptask] += "do_populate_lic do_deploy"
+
diff --git a/poky/meta/classes/linuxloader.bbclass b/poky/meta/classes/linuxloader.bbclass
index 8f30eb3..b4c4134 100644
--- a/poky/meta/classes/linuxloader.bbclass
+++ b/poky/meta/classes/linuxloader.bbclass
@@ -1,70 +1,61 @@
-LDSO_TCLIBC = "glibc"
-LDSO_TCLIBC_libc-musl = "musl"
-LDSO_TCLIBC_libc-baremetal = "musl"
+def get_musl_loader(d):
+    import re
+    dynamic_loader = None
 
-linuxloader_glibc () {
-	case ${TARGET_ARCH} in
-		powerpc | microblaze )
-			dynamic_loader="${base_libdir}/ld.so.1"
-			;;
-		mipsisa32r6el | mipsisa32r6 | mipsisa64r6el | mipsisa64r6)
-			dynamic_loader="${base_libdir}/ld-linux-mipsn8.so.1"
-			;;
-		mips* )
-			dynamic_loader="${base_libdir}/ld.so.1"
-			;;
-		powerpc64)
-			dynamic_loader="${base_libdir}/ld64.so.1"
-			;;
-		x86_64)
-			dynamic_loader="${base_libdir}/ld-linux-x86-64.so.2"
-			;;
-		i*86 )
-			dynamic_loader="${base_libdir}/ld-linux.so.2"
-			;;
-		arm )
-			dynamic_loader="${base_libdir}/ld-linux.so.3"
-			;;
-		* )
-			dynamic_loader="/unknown_dynamic_linker"
-			;;
-	esac
-	echo $dynamic_loader
-}
+    targetarch = d.getVar("TARGET_ARCH")
+    if targetarch.startswith("microblaze"):
+        dynamic_loader = "${base_libdir}/ld-musl-microblaze${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'el' ,d)}.so.1"
+    elif targetarch.startswith("mips"):
+        dynamic_loader = "${base_libdir}/ld-musl-mips${ABIEXTENSION}${MIPSPKGSFX_BYTE}${MIPSPKGSFX_R6}${MIPSPKGSFX_ENDIAN}${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}.so.1"
+    elif targetarch == "powerpc":
+        dynamic_loader = "${base_libdir}/ld-musl-powerpc${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}.so.1"
+    elif targetarch == "powerpc64":
+        dynamic_loader = "${base_libdir}/ld-musl-powerpc64.so.1"
+    elif targetarch == "x86_64":
+        dynamic_loader = "${base_libdir}/ld-musl-x86_64.so.1"
+    elif re.search("i.86", targetarch):
+        dynamic_loader = "${base_libdir}/ld-musl-i386.so.1"
+    elif targetarch.startswith("arm"):
+        dynamic_loader = "${base_libdir}/ld-musl-arm${ARMPKGSFX_ENDIAN}${ARMPKGSFX_EABI}.so.1"
+    elif targetarch.startswith("aarch64"):
+        dynamic_loader = "${base_libdir}/ld-musl-aarch64${ARMPKGSFX_ENDIAN_64}.so.1"
+    return dynamic_loader
 
-linuxloader_musl () {
-	case ${TARGET_ARCH} in
-		microblaze* )
-			dynamic_loader="${base_libdir}/ld-musl-microblaze${@bb.utils.contains('TUNE_FEATURES', 'bigendian', '', 'el' ,d)}.so.1"
-			;;
-		mips* )
-			dynamic_loader="${base_libdir}/ld-musl-mips${ABIEXTENSION}${MIPSPKGSFX_BYTE}${MIPSPKGSFX_R6}${MIPSPKGSFX_ENDIAN}${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}.so.1"
-			;;
-		powerpc )
-			dynamic_loader="${base_libdir}/ld-musl-powerpc${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}.so.1"
-			;;
-		powerpc64 )
-			dynamic_loader="${base_libdir}/ld-musl-powerpc64.so.1"
-			;;
-		x86_64 )
-			dynamic_loader="${base_libdir}/ld-musl-x86_64.so.1"
-			;;
-		i*86 )
-			dynamic_loader="${base_libdir}/ld-musl-i386.so.1"
-			;;
-		arm* )
-			dynamic_loader="${base_libdir}/ld-musl-arm${ARMPKGSFX_ENDIAN}${ARMPKGSFX_EABI}.so.1"
-			;;
-		aarch64* )
-			dynamic_loader="${base_libdir}/ld-musl-aarch64${ARMPKGSFX_ENDIAN_64}.so.1"
-			;;
-		* )
-			dynamic_loader="/unknown_dynamic_linker"
-			;;
-	esac
-	echo $dynamic_loader
-}
+def get_glibc_loader(d):
+    import re
 
-linuxloader () {
-	linuxloader_${LDSO_TCLIBC}
-}
+    dynamic_loader = None
+    targetarch = d.getVar("TARGET_ARCH")
+    if targetarch in ["powerpc", "microblaze"]:
+        dynamic_loader = "${base_libdir}/ld.so.1"
+    elif targetarch in ["mipsisa32r6el", "mipsisa32r6", "mipsisa64r6el", "mipsisa64r6"]:
+        dynamic_loader = "${base_libdir}/ld-linux-mipsn8.so.1"
+    elif targetarch.startswith("mips"):
+        dynamic_loader = "${base_libdir}/ld.so.1"
+    elif targetarch == "powerpc64":
+        dynamic_loader = "${base_libdir}/ld64.so.1"
+    elif targetarch == "x86_64":
+        dynamic_loader = "${base_libdir}/ld-linux-x86-64.so.2"
+    elif re.search("i.86", targetarch):
+        dynamic_loader = "${base_libdir}/ld-linux.so.2"
+    elif targetarch == "arm":
+        dynamic_loader = "${base_libdir}/ld-linux.so.3"
+    elif targetarch.startswith("aarch64"):
+        dynamic_loader = "${base_libdir}/ld-linux-aarch64${ARMPKGSFX_ENDIAN_64}.so.1"
+    return dynamic_loader
+
+def get_linuxloader(d):
+    overrides = d.getVar("OVERRIDES").split(":")
+
+    if "libc-baremetal" in overrides:
+        return None
+
+    if "libc-musl" in overrides:
+        dynamic_loader = get_musl_loader(d)
+    else:
+        dynamic_loader = get_glibc_loader(d)
+    return dynamic_loader
+
+get_linuxloader[vardepvalue] = "${@get_linuxloader(d)}"
+get_musl_loader[vardepvalue] = "${@get_musl_loader(d)}"
+get_glibc_loader[vardepvalue] = "${@get_glibc_loader(d)}"
diff --git a/poky/meta/classes/manpages.bbclass b/poky/meta/classes/manpages.bbclass
index d16237b..50c2547 100644
--- a/poky/meta/classes/manpages.bbclass
+++ b/poky/meta/classes/manpages.bbclass
@@ -3,3 +3,35 @@
 # tends to pull in the entire XML stack and other tools, so it's not enabled
 # by default.
 PACKAGECONFIG_append_class-target = " ${@bb.utils.contains('DISTRO_FEATURES', 'api-documentation', 'manpages', '', d)}"
+
+inherit qemu
+
+# usually manual files are packaged to ${PN}-doc except man-pages
+MAN_PKG ?= "${PN}-doc"
+
+# only add man-db to RDEPENDS when manual files are built and installed
+RDEPENDS_${MAN_PKG} += "${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'man-db', '', d)}"
+
+pkg_postinst_append_${MAN_PKG} () {
+	# only update manual page index caches when manual files are built and installed
+	if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
+		if test -n "$D"; then
+			if ${@bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'true','false', d)}; then
+				sed "s:\(\s\)/:\1$D/:g" $D${sysconfdir}/man_db.conf | ${@qemu_run_binary(d, '$D', '${bindir}/mandb')} -C - -u -q $D${mandir}
+				mkdir -p $D${localstatedir}/cache/man
+				mv $D${mandir}/index.db $D${localstatedir}/cache/man
+			else
+				$INTERCEPT_DIR/postinst_intercept delay_to_first_boot ${PKG} mlprefix=${MLPREFIX}
+			fi
+		else
+			mandb -q
+		fi
+	fi
+}
+
+pkg_postrm_append_${MAN_PKG} () {
+	# only update manual page index caches when manual files are built and installed
+	if ${@bb.utils.contains('PACKAGECONFIG', 'manpages', 'true', 'false', d)}; then
+		mandb -q
+	fi
+}
diff --git a/poky/meta/classes/meson.bbclass b/poky/meta/classes/meson.bbclass
index 2d7ee4f..3cbdcf1 100644
--- a/poky/meta/classes/meson.bbclass
+++ b/poky/meta/classes/meson.bbclass
@@ -1,4 +1,4 @@
-inherit python3native
+inherit siteinfo python3native
 
 DEPENDS_append = " meson-native ninja-native"
 
@@ -24,25 +24,52 @@
               --infodir ${@noprefix('infodir', d)} \
               --sysconfdir ${sysconfdir} \
               --localstatedir ${localstatedir} \
-              --sharedstatedir ${sharedstatedir}"
+              --sharedstatedir ${sharedstatedir} \
+              -Dc_args='${BUILD_CPPFLAGS} ${BUILD_CFLAGS}' \
+              -Dc_link_args='${BUILD_LDFLAGS}' \
+              -Dcpp_args='${BUILD_CPPFLAGS} ${BUILD_CXXFLAGS}' \
+              -Dcpp_link_args='${BUILD_LDFLAGS}'"
 
 MESON_TOOLCHAIN_ARGS = "${HOST_CC_ARCH}${TOOLCHAIN_OPTIONS}"
 MESON_C_ARGS = "${MESON_TOOLCHAIN_ARGS} ${CFLAGS}"
 MESON_CPP_ARGS = "${MESON_TOOLCHAIN_ARGS} ${CXXFLAGS}"
 MESON_LINK_ARGS = "${MESON_TOOLCHAIN_ARGS} ${LDFLAGS}"
 
-# both are required but not used by meson
-MESON_HOST_ENDIAN = "bogus-endian"
-MESON_TARGET_ENDIAN = "bogus-endian"
-
-EXTRA_OEMESON += "${PACKAGECONFIG_CONFARGS}"
+EXTRA_OEMESON_append = " ${PACKAGECONFIG_CONFARGS}"
 
 MESON_CROSS_FILE = ""
 MESON_CROSS_FILE_class-target = "--cross-file ${WORKDIR}/meson.cross"
 MESON_CROSS_FILE_class-nativesdk = "--cross-file ${WORKDIR}/meson.cross"
 
 def meson_array(var, d):
-    return "', '".join(d.getVar(var).split()).join(("'", "'"))
+    items = d.getVar(var).split()
+    return repr(items[0] if len(items) == 1 else items)
+
+# Map our ARCH values to what Meson expects:
+# http://mesonbuild.com/Reference-tables.html#cpu-families
+def meson_cpu_family(var, d):
+    import re
+    arch = d.getVar(var)
+    if arch == 'powerpc':
+        return 'ppc'
+    elif arch == 'powerpc64':
+        return 'ppc64'
+    elif arch == 'mipsel':
+        return 'mips'
+    elif re.match(r"i[3-6]86", arch):
+        return "x86"
+    else:
+        return arch
+
+def meson_endian(prefix, d):
+    arch, os = d.getVar(prefix + "_ARCH"), d.getVar(prefix + "_OS")
+    sitedata = siteinfo_data_for_machine(arch, os, d)
+    if "endian-little" in sitedata:
+        return "little"
+    elif "endian-big" in sitedata:
+        return "big"
+    else:
+        bb.fatal("Cannot determine endianism for %s-%s" % (arch, os))
 
 addtask write_config before do_configure
 do_write_config[vardeps] += "MESON_C_ARGS MESON_CPP_ARGS MESON_LINK_ARGS CC CXX LD AR NM STRIP READELF"
@@ -50,68 +77,69 @@
     # This needs to be Py to split the args into single-element lists
     cat >${WORKDIR}/meson.cross <<EOF
 [binaries]
-c = [${@meson_array('CC', d)}]
-cpp = [${@meson_array('CXX', d)}]
-ar = [${@meson_array('AR', d)}]
-nm = [${@meson_array('NM', d)}]
-ld = [${@meson_array('LD', d)}]
-strip = [${@meson_array('STRIP', d)}]
-readelf = [${@meson_array('READELF', d)}]
+c = ${@meson_array('CC', d)}
+cpp = ${@meson_array('CXX', d)}
+ar = ${@meson_array('AR', d)}
+nm = ${@meson_array('NM', d)}
+ld = ${@meson_array('LD', d)}
+strip = ${@meson_array('STRIP', d)}
+readelf = ${@meson_array('READELF', d)}
 pkgconfig = 'pkg-config'
 
 [properties]
 needs_exe_wrapper = true
-c_args = [${@meson_array('MESON_C_ARGS', d)}]
-c_link_args = [${@meson_array('MESON_LINK_ARGS', d)}]
-cpp_args = [${@meson_array('MESON_CPP_ARGS', d)}]
-cpp_link_args = [${@meson_array('MESON_LINK_ARGS', d)}]
+c_args = ${@meson_array('MESON_C_ARGS', d)}
+c_link_args = ${@meson_array('MESON_LINK_ARGS', d)}
+cpp_args = ${@meson_array('MESON_CPP_ARGS', d)}
+cpp_link_args = ${@meson_array('MESON_LINK_ARGS', d)}
 gtkdoc_exe_wrapper = '${B}/gtkdoc-qemuwrapper'
 
 [host_machine]
 system = '${HOST_OS}'
-cpu_family = '${HOST_ARCH}'
+cpu_family = '${@meson_cpu_family('HOST_ARCH', d)}'
 cpu = '${HOST_ARCH}'
-endian = '${MESON_HOST_ENDIAN}'
+endian = '${@meson_endian('HOST', d)}'
 
 [target_machine]
 system = '${TARGET_OS}'
-cpu_family = '${TARGET_ARCH}'
+cpu_family = '${@meson_cpu_family('TARGET_ARCH', d)}'
 cpu = '${TARGET_ARCH}'
-endian = '${MESON_TARGET_ENDIAN}'
+endian = '${@meson_endian('TARGET', d)}'
 EOF
 }
 
 CONFIGURE_FILES = "meson.build"
 
 meson_do_configure() {
+    # Work around "Meson fails if /tmp is mounted with noexec #2972"
+    mkdir -p "${B}/meson-private/tmp"
+    export TMPDIR="${B}/meson-private/tmp"
+    bbnote Executing meson ${EXTRA_OEMESON}...
     if ! meson ${MESONOPTS} "${MESON_SOURCEPATH}" "${B}" ${MESON_CROSS_FILE} ${EXTRA_OEMESON}; then
-        cat ${B}/meson-logs/meson-log.txt
         bbfatal_log meson failed
     fi
 }
 
-meson_do_configure_prepend_class-target() {
+override_native_tools() {
     # Set these so that meson uses the native tools for its build sanity tests,
     # which require executables to be runnable. The cross file will still
-    # override these for the target build. Note that we do *not* set CFLAGS,
-    # LDFLAGS, etc. as they will be slurped in by meson and applied to the
-    # target build, causing errors.
+    # override these for the target build.
     export CC="${BUILD_CC}"
     export CXX="${BUILD_CXX}"
     export LD="${BUILD_LD}"
     export AR="${BUILD_AR}"
+    # These contain *target* flags but will be used as *native* flags.  The
+    # correct native flags will be passed via -Dc_args and so on, unset them so
+    # they don't interfere with tools invoked by Meson (such as g-ir-scanner)
+    unset CPPFLAGS CFLAGS CXXFLAGS LDFLAGS
+}
+
+meson_do_configure_prepend_class-target() {
+    override_native_tools
 }
 
 meson_do_configure_prepend_class-nativesdk() {
-    # Set these so that meson uses the native tools for its build sanity tests,
-    # which require executables to be runnable. The cross file will still
-    # override these for the nativesdk build. Note that we do *not* set CFLAGS,
-    # LDFLAGS, etc. as they will be slurped in by meson and applied to the
-    # nativesdk build, causing errors.
-    export CC="${BUILD_CC}"
-    export CXX="${BUILD_CXX}"
-    export LD="${BUILD_LD}"
-    export AR="${BUILD_AR}"
+    override_native_tools
 }
 
 meson_do_configure_prepend_class-native() {
@@ -120,11 +148,11 @@
 
 do_compile[progress] = "outof:^\[(\d+)/(\d+)\]\s+"
 meson_do_compile() {
-    ninja ${PARALLEL_MAKE}
+    ninja -v ${PARALLEL_MAKE}
 }
 
 meson_do_install() {
-    DESTDIR='${D}' ninja ${PARALLEL_MAKEINST} install
+    DESTDIR='${D}' ninja -v ${PARALLEL_MAKEINST} install
 }
 
 EXPORT_FUNCTIONS do_configure do_compile do_install
diff --git a/poky/meta/classes/mime.bbclass b/poky/meta/classes/mime.bbclass
index 0df1583..6c7b868 100644
--- a/poky/meta/classes/mime.bbclass
+++ b/poky/meta/classes/mime.bbclass
@@ -53,5 +53,5 @@
             postrm += d.getVar('mime_postrm')
             d.setVar('pkg_postrm_%s' % pkg, postrm)
             bb.note("adding shared-mime-info-data dependency to %s" % pkg)
-            d.appendVar('RDEPENDS_' + pkg, " shared-mime-info-data")
+            d.appendVar('RDEPENDS_' + pkg, " " + d.getVar('MLPREFIX')+"shared-mime-info-data")
 }
diff --git a/poky/meta/classes/multilib.bbclass b/poky/meta/classes/multilib.bbclass
index 519c1a5..6c6499a 100644
--- a/poky/meta/classes/multilib.bbclass
+++ b/poky/meta/classes/multilib.bbclass
@@ -11,8 +11,12 @@
     # There should only be one kernel in multilib configs
     # We also skip multilib setup for module packages.
     provides = (e.data.getVar("PROVIDES") or "").split()
-    if "virtual/kernel" in provides or bb.data.inherits_class('module-base', e.data) or "make-mod-scripts" in e.data.getVar("PN"):
-        raise bb.parse.SkipRecipe("We shouldn't have multilib variants for the kernel")
+    non_ml_recipes = d.getVar('NON_MULTILIB_RECIPES').split()
+    bpn = e.data.getVar("BPN")
+    if "virtual/kernel" in provides or \
+            bb.data.inherits_class('module-base', e.data) or \
+            bpn in non_ml_recipes:
+        raise bb.parse.SkipRecipe("We shouldn't have multilib variants for %s" % bpn)
 
     save_var_name=e.data.getVar("MULTILIB_SAVE_VARNAME") or ""
     for name in save_var_name.split():
@@ -35,6 +39,10 @@
         return
 
     if bb.data.inherits_class('cross-canadian', e.data):
+        # Multilib cross-candian should use the same nativesdk sysroot without MLPREFIX
+        e.data.setVar("RECIPE_SYSROOT", "${WORKDIR}/recipe-sysroot")
+        e.data.setVar("STAGING_DIR_TARGET", "${WORKDIR}/recipe-sysroot")
+        e.data.setVar("STAGING_DIR_HOST", "${WORKDIR}/recipe-sysroot")
         e.data.setVar("MLPREFIX", variant + "-")
         override = ":virtclass-multilib-" + variant
         e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
@@ -46,10 +54,10 @@
     if bb.data.inherits_class('nativesdk', e.data) or bb.data.inherits_class('crosssdk', e.data):
         raise bb.parse.SkipRecipe("We can't extend nativesdk recipes")
 
-    if bb.data.inherits_class('allarch', e.data) and not bb.data.inherits_class('packagegroup', e.data):
+    if bb.data.inherits_class('allarch', e.data) and not d.getVar('MULTILIB_VARIANTS') \
+        and not bb.data.inherits_class('packagegroup', e.data):
         raise bb.parse.SkipRecipe("Don't extend allarch recipes which are not packagegroups")
 
-
     # Expand this since this won't work correctly once we set a multilib into place
     e.data.setVar("ALL_MULTILIB_PACKAGE_ARCHS", e.data.getVar("ALL_MULTILIB_PACKAGE_ARCHS"))
  
@@ -65,12 +73,11 @@
     e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
     e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
 
-    # Expand the WHITELISTs with multilib prefix
-    for whitelist in ["WHITELIST_GPL-3.0", "LGPLv2_WHITELIST_GPL-3.0"]:
-        pkgs = e.data.getVar(whitelist)
-        for pkg in pkgs.split():
-            pkgs += " " + variant + "-" + pkg
-        e.data.setVar(whitelist, pkgs)
+    # Expand WHITELIST_GPL-3.0 with multilib prefix
+    pkgs = e.data.getVar("WHITELIST_GPL-3.0")
+    for pkg in pkgs.split():
+        pkgs += " " + variant + "-" + pkg
+    e.data.setVar("WHITELIST_GPL-3.0", pkgs)
 
     # DEFAULTTUNE can change TARGET_ARCH override so expand this now before update_data
     newtune = e.data.getVar("DEFAULTTUNE_" + "virtclass-multilib-" + variant, False)
@@ -117,6 +124,7 @@
     clsextend.map_variable("INITSCRIPT_PACKAGES")
     clsextend.map_variable("USERADD_PACKAGES")
     clsextend.map_variable("SYSTEMD_PACKAGES")
+    clsextend.map_variable("UPDATERCPN")
 }
 
 PACKAGEFUNCS_append = " do_package_qa_multilib"
@@ -131,7 +139,8 @@
                 i = i[len('virtual/'):]
             if (not i.startswith('kernel-module')) and (not i.startswith(mlprefix)) and \
                 (not 'cross-canadian' in i) and (not i.startswith("nativesdk-")) and \
-                (not i.startswith("rtld")) and (not i.startswith('kernel-vmlinux')):
+                (not i.startswith("rtld")) and (not i.startswith('kernel-vmlinux')) \
+                and (not i.startswith("kernel-image")):
                 candidates.append(i)
         if len(candidates) > 0:
             msg = "%s package %s - suspicious values '%s' in %s" \
@@ -142,6 +151,10 @@
     if not ml:
         return
 
+    # exception for ${MLPREFIX}target-sdk-provides-dummy
+    if 'target-sdk-provides-dummy' in d.getVar('PN'):
+        return
+
     packages = d.getVar('PACKAGES')
     for pkg in packages.split():
         check_mlprefix(pkg, 'RDEPENDS', ml)
diff --git a/poky/meta/classes/multilib_global.bbclass b/poky/meta/classes/multilib_global.bbclass
index d2ec1ad..649cc09 100644
--- a/poky/meta/classes/multilib_global.bbclass
+++ b/poky/meta/classes/multilib_global.bbclass
@@ -164,10 +164,11 @@
     if variant:
         return
 
+    non_ml_recipes = d.getVar('NON_MULTILIB_RECIPES').split()
+
     if bb.data.inherits_class('kernel', e.data) or \
             bb.data.inherits_class('module-base', e.data) or \
-            (bb.data.inherits_class('allarch', e.data) and\
-             not bb.data.inherits_class('packagegroup', e.data)):
+            d.getVar('BPN') in non_ml_recipes:
             variants = (e.data.getVar("MULTILIB_VARIANTS") or "").split()
 
             import oe.classextend
diff --git a/poky/meta/classes/multilib_script.bbclass b/poky/meta/classes/multilib_script.bbclass
new file mode 100644
index 0000000..51c9fcc
--- /dev/null
+++ b/poky/meta/classes/multilib_script.bbclass
@@ -0,0 +1,32 @@
+#
+# Recipe needs to set MULTILIB_SCRIPTS in the form <pkgname>:<scriptname>, e.g.
+# MULTILIB_SCRIPTS = "${PN}-dev:${bindir}/file1 ${PN}:${base_bindir}/file2"
+# to indicate which script files to process from which packages.
+#
+
+inherit update-alternatives
+
+MULTILIB_SUFFIX = "${@d.getVar('base_libdir',1).split('/')[-1]}"
+
+PACKAGE_PREPROCESS_FUNCS += "multilibscript_rename"
+
+multilibscript_rename() {
+	:
+}
+
+python () {
+    # Do nothing if multilib isn't being used
+    if not d.getVar("MULTILIB_VARIANTS"):
+       return
+    # Do nothing for native/cross
+    if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d):
+       return
+
+    for entry in (d.getVar("MULTILIB_SCRIPTS", False) or "").split():
+       pkg, script = entry.split(":")
+       scriptname = os.path.basename(script)
+       d.appendVar("ALTERNATIVE_" + pkg, scriptname + " ")
+       d.setVarFlag("ALTERNATIVE_LINK_NAME", scriptname, script)
+       d.setVarFlag("ALTERNATIVE_TARGET", scriptname, script + "-${MULTILIB_SUFFIX}")
+       d.appendVar("multilibscript_rename",  "\n	mv ${PKGD}" + script + " ${PKGD}" + script + "-${MULTILIB_SUFFIX}")
+}
diff --git a/poky/meta/classes/native.bbclass b/poky/meta/classes/native.bbclass
index a911f2a..ddccfe2 100644
--- a/poky/meta/classes/native.bbclass
+++ b/poky/meta/classes/native.bbclass
@@ -44,7 +44,6 @@
 CFLAGS = "${BUILD_CFLAGS}"
 CXXFLAGS = "${BUILD_CXXFLAGS}"
 LDFLAGS = "${BUILD_LDFLAGS}"
-LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE} "
 
 STAGING_BINDIR = "${STAGING_BINDIR_NATIVE}"
 STAGING_BINDIR_CROSS = "${STAGING_BINDIR_NATIVE}"
@@ -78,6 +77,7 @@
 
 bindir = "${STAGING_BINDIR_NATIVE}"
 sbindir = "${STAGING_SBINDIR_NATIVE}"
+base_libdir = "${STAGING_LIBDIR_NATIVE}"
 libdir = "${STAGING_LIBDIR_NATIVE}"
 includedir = "${STAGING_INCDIR_NATIVE}"
 sysconfdir = "${STAGING_ETCDIR_NATIVE}"
@@ -89,6 +89,7 @@
 
 NATIVE_PACKAGE_PATH_SUFFIX ?= ""
 bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
+base_libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
 libdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
 libexecdir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
 
@@ -152,8 +153,6 @@
                 newdeps.append(dep)
         d.setVar(varname, " ".join(newdeps))
 
-    e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-native")
-
     map_dependencies("DEPENDS", e.data)
     for pkg in [e.data.getVar("PN"), "", "${PN}"]:
         map_dependencies("RDEPENDS", e.data, pkg)
diff --git a/poky/meta/classes/nativesdk.bbclass b/poky/meta/classes/nativesdk.bbclass
index 69fb45c..f25b0c3 100644
--- a/poky/meta/classes/nativesdk.bbclass
+++ b/poky/meta/classes/nativesdk.bbclass
@@ -12,6 +12,11 @@
 
 MULTILIBS = ""
 
+# we need consistent staging dir whether or not multilib is enabled
+STAGING_DIR_HOST = "${WORKDIR}/recipe-sysroot"
+STAGING_DIR_TARGET = "${WORKDIR}/recipe-sysroot"
+RECIPE_SYSROOT = "${WORKDIR}/recipe-sysroot"
+
 #
 # Update PACKAGE_ARCH and PACKAGE_ARCHS
 #
@@ -78,7 +83,6 @@
 
     e.data.setVar("MLPREFIX", "nativesdk-")
     e.data.setVar("PN", "nativesdk-" + e.data.getVar("PN").replace("-nativesdk", "").replace("nativesdk-", ""))
-    e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + ":virtclass-nativesdk")
 }
 
 python () {
diff --git a/poky/meta/classes/npm.bbclass b/poky/meta/classes/npm.bbclass
index c351ff0..6dbae6b 100644
--- a/poky/meta/classes/npm.bbclass
+++ b/poky/meta/classes/npm.bbclass
@@ -10,7 +10,7 @@
 
 NPMPN ?= "${@node_pkgname(d)}"
 
-NPM_INSTALLDIR = "${D}${libdir}/node_modules/${NPMPN}"
+NPM_INSTALLDIR = "${libdir}/node/${NPMPN}"
 
 # function maps arch names to npm arch names
 def npm_oe_arch_map(target_arch, d):
@@ -22,7 +22,7 @@
     return target_arch
 
 NPM_ARCH ?= "${@npm_oe_arch_map(d.getVar('TARGET_ARCH'), d)}"
-NPM_INSTALL_DEV = "0"
+NPM_INSTALL_DEV ?= "0"
 
 npm_do_compile() {
 	# Copy in any additionally fetched modules
@@ -52,9 +52,10 @@
 	# changing the home directory to the working directory, the .npmrc will
 	# be created in this directory
 	export HOME=${WORKDIR}
-	mkdir -p ${NPM_INSTALLDIR}/
+	mkdir -p ${D}${libdir}/node_modules
 	npm pack .
 	npm install --prefix ${D}${prefix} -g --arch=${NPM_ARCH} --target_arch=${NPM_ARCH} --production --no-registry ${NPMPN}-${PV}.tgz
+	mv ${D}${libdir}/node_modules ${D}${libdir}/node
 	if [ -d ${D}${prefix}/etc ] ; then
 		# This will be empty
 		rmdir ${D}${prefix}/etc
@@ -62,13 +63,13 @@
 }
 
 python populate_packages_prepend () {
-    instdir = d.expand('${D}${libdir}/node_modules/${NPMPN}')
+    instdir = d.expand('${D}${NPM_INSTALLDIR}')
     extrapackages = oe.package.npm_split_package_dirs(instdir)
     pkgnames = extrapackages.keys()
     d.prependVar('PACKAGES', '%s ' % ' '.join(pkgnames))
     for pkgname in pkgnames:
         pkgrelpath, pdata = extrapackages[pkgname]
-        pkgpath = '${libdir}/node_modules/${NPMPN}/' + pkgrelpath
+        pkgpath = '${NPM_INSTALLDIR}/' + pkgrelpath
         # package names can't have underscores but npm packages sometimes use them
         oe_pkg_name = pkgname.replace('_', '-')
         expanded_pkgname = d.expand(oe_pkg_name)
@@ -84,7 +85,7 @@
 }
 
 FILES_${PN} += " \
-    ${libdir}/node_modules/${NPMPN} \
+    ${NPM_INSTALLDIR} \
 "
 
 EXPORT_FUNCTIONS do_compile do_install
diff --git a/poky/meta/classes/package.bbclass b/poky/meta/classes/package.bbclass
index edeffa9..d1e9138 100644
--- a/poky/meta/classes/package.bbclass
+++ b/poky/meta/classes/package.bbclass
@@ -58,7 +58,7 @@
 
 # If your postinstall can execute at rootfs creation time rather than on
 # target but depends on a native/cross tool in order to execute, you need to
-# list that tool in PACKAGE_WRITE_DEPENDS. Target package dependencies belong
+# list that tool in PACKAGE_WRITE_DEPS. Target package dependencies belong
 # in the package dependencies as normal, this is just for native/cross support
 # tools at rootfs build time.
 PACKAGE_WRITE_DEPS ??= ""
@@ -345,8 +345,16 @@
     return debugfiles.keys()
 
 def append_source_info(file, sourcefile, d, fatal=True):
-    cmd = "'dwarfsrcfiles' '%s'" % (file)
-    (retval, output) = oe.utils.getstatusoutput(cmd)
+    import subprocess
+
+    cmd = ["dwarfsrcfiles", file]
+    try:
+        output = subprocess.check_output(cmd, universal_newlines=True, stderr=subprocess.STDOUT)
+        retval = 0
+    except subprocess.CalledProcessError as exc:
+        output = exc.output
+        retval = exc.returncode
+
     # 255 means a specific file wasn't fully parsed to get the debug file list, which is not a fatal failure
     if retval != 0 and retval != 255:
         msg = "dwarfsrcfiles failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else "")
@@ -359,10 +367,12 @@
     # of rpm's debugedit, which was writing them out that way, and the code elsewhere
     # is still assuming that.
     debuglistoutput = '\0'.join(debugsources) + '\0'
+    lf = bb.utils.lockfile(sourcefile + ".lock")
     open(sourcefile, 'a').write(debuglistoutput)
+    bb.utils.unlockfile(lf)
 
 
-def splitdebuginfo(file, debugfile, debugsrcdir, sourcefile, d):
+def splitdebuginfo(file, dvar, debugdir, debuglibdir, debugappend, debugsrcdir, sourcefile, d):
     # Function to split a single file into two components, one is the stripped
     # target system binary, the other contains any debugging information. The
     # two files are linked to reference each other.
@@ -370,6 +380,16 @@
     # sourcefile is also generated containing a list of debugsources
 
     import stat
+    import subprocess
+
+    src = file[len(dvar):]
+    dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
+    debugfile = dvar + dest
+
+    # Split the file...
+    bb.utils.mkdirhier(os.path.dirname(debugfile))
+    #bb.note("Split %s -> %s" % (file, debugfile))
+    # Only store off the hard link reference if we successfully split!
 
     dvar = d.getVar('PKGD')
     objcopy = d.getVar("OBJCOPY")
@@ -390,16 +410,10 @@
 
     bb.utils.mkdirhier(os.path.dirname(debugfile))
 
-    cmd = "'%s' --only-keep-debug '%s' '%s'" % (objcopy, file, debugfile)
-    (retval, output) = oe.utils.getstatusoutput(cmd)
-    if retval:
-        bb.fatal("objcopy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+    subprocess.check_output([objcopy, '--only-keep-debug', file, debugfile], stderr=subprocess.STDOUT)
 
     # Set the debuglink to have the view of the file path on the target
-    cmd = "'%s' --add-gnu-debuglink='%s' '%s'" % (objcopy, debugfile, file)
-    (retval, output) = oe.utils.getstatusoutput(cmd)
-    if retval:
-        bb.fatal("objcopy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+    subprocess.check_output([objcopy, '--add-gnu-debuglink', debugfile, file], stderr=subprocess.STDOUT)
 
     if newmode:
         os.chmod(file, origmode)
@@ -411,6 +425,7 @@
     # and copied to the destination here.
 
     import stat
+    import subprocess
 
     sourcefile = d.expand("${WORKDIR}/debugsources.list")
     if debugsrcdir and os.path.isfile(sourcefile):
@@ -447,23 +462,20 @@
         processdebugsrc += "(cd '%s' ; cpio -pd0mlL --no-preserve-owner '%s%s' 2>/dev/null)"
 
         cmd = processdebugsrc % (sourcefile, workbasedir, localsrc_prefix, workparentdir, dvar, debugsrcdir)
-        (retval, output) = oe.utils.getstatusoutput(cmd)
-        # Can "fail" if internal headers/transient sources are attempted
-        #if retval:
-        #    bb.fatal("debug source copy failed with exit code %s (cmd was %s)" % (retval, cmd))
+        try:
+            subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+        except subprocess.CalledProcessError:
+            # Can "fail" if internal headers/transient sources are attempted
+            pass
 
         # cpio seems to have a bug with -lL together and symbolic links are just copied, not dereferenced.
         # Work around this by manually finding and copying any symbolic links that made it through.
         cmd = "find %s%s -type l -print0 -delete | sed s#%s%s/##g | (cd '%s' ; cpio -pd0mL --no-preserve-owner '%s%s' 2>/dev/null)" % (dvar, debugsrcdir, dvar, debugsrcdir, workparentdir, dvar, debugsrcdir)
-        (retval, output) = oe.utils.getstatusoutput(cmd)
-        if retval:
-            bb.fatal("debugsrc symlink fixup failed with exit code %s (cmd was %s)" % (retval, cmd))
+        subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
 
         # The copy by cpio may have resulted in some empty directories!  Remove these
         cmd = "find %s%s -empty -type d -delete" % (dvar, debugsrcdir)
-        (retval, output) = oe.utils.getstatusoutput(cmd)
-        if retval:
-            bb.fatal("empty directory removal failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+        subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
 
         # Also remove debugsrcdir if its empty
         for p in nosuchdir[::-1]:
@@ -482,7 +494,8 @@
 
     if key in data:
         # Have to avoid undoing the write_extra_pkgs(global_variants...)
-        if bb.data.inherits_class('allarch', d) and data[key] == basepkg:
+        if bb.data.inherits_class('allarch', d) and not d.getVar('MULTILIB_VARIANTS') \
+            and data[key] == basepkg:
             return pkg
         return data[key]
 
@@ -624,16 +637,16 @@
 }
 
 python perform_packagecopy () {
+    import subprocess
+
     dest = d.getVar('D')
     dvar = d.getVar('PKGD')
 
     # Start by package population by taking a copy of the installed
     # files to operate on
     # Preserve sparse files and hard links
-    cmd = 'tar -cf - -C %s -p . | tar -xf - -C %s' % (dest, dvar)
-    (retval, output) = oe.utils.getstatusoutput(cmd)
-    if retval:
-        bb.fatal("file copy failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
+    cmd = 'tar -cf - -C %s -p -S . | tar -xf - -C %s' % (dest, dvar)
+    subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
 
     # replace RPATHs for the nativesdk binaries, to make them relocatable
     if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d):
@@ -762,7 +775,11 @@
         bbpath = d.getVar('BBPATH')
         fs_perms_tables = d.getVar('FILESYSTEM_PERMS_TABLES') or ""
         for conf_file in fs_perms_tables.split():
-            str += " %s" % bb.utils.which(bbpath, conf_file)
+            confpath = bb.utils.which(bbpath, conf_file)
+            if confpath:
+                str += " %s" % bb.utils.which(bbpath, conf_file)
+            else:
+                bb.warn("cannot find %s specified in FILESYSTEM_PERMS_TABLES" % conf_file)
         return str
 
 
@@ -880,6 +897,7 @@
 
 python split_and_strip_files () {
     import stat, errno
+    import subprocess
 
     dvar = d.getVar('PKGD')
     pn = d.getVar('PN')
@@ -916,43 +934,6 @@
     sourcefile = d.expand("${WORKDIR}/debugsources.list")
     bb.utils.remove(sourcefile)
 
-    # Return type (bits):
-    # 0 - not elf
-    # 1 - ELF
-    # 2 - stripped
-    # 4 - executable
-    # 8 - shared library
-    # 16 - kernel module
-    def isELF(path):
-        type = 0
-        ret, result = oe.utils.getstatusoutput("file -b '%s'" % path)
-
-        if ret:
-            msg = "split_and_strip_files: 'file %s' failed" % path
-            package_qa_handle_error("split-strip", msg, d)
-            return type
-
-        # Not stripped
-        if "ELF" in result:
-            type |= 1
-            if "not stripped" not in result:
-                type |= 2
-            if "executable" in result:
-                type |= 4
-            if "shared" in result:
-                type |= 8
-        return type
-
-    def isStaticLib(path):
-        if path.endswith('.a') and not os.path.islink(path):
-            with open(path, 'rb') as fh:
-                # The magic must include the first slash to avoid
-                # matching golang static libraries
-                magic = b'!<arch>\x0a/'
-                start = fh.read(len(magic))
-                return start == magic
-        return False
-
     #
     # First lets figure out all of the files we may have to process ... do this only once!
     #
@@ -966,13 +947,15 @@
     skipfiles = (d.getVar("INHIBIT_PACKAGE_STRIP_FILES") or "").split()
     if (d.getVar('INHIBIT_PACKAGE_STRIP') != '1' or \
             d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
+        checkelf = {}
+        checkelflinks = {}
         for root, dirs, files in cpath.walk(dvar):
             for f in files:
                 file = os.path.join(root, f)
                 if file.endswith(".ko") and file.find("/lib/modules/") != -1:
                     kernmods.append(file)
                     continue
-                if isStaticLib(file):
+                if oe.package.is_static_lib(file):
                     staticlibs.append(file)
                     continue
 
@@ -999,59 +982,63 @@
                 # Check its an executable
                 if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH) \
                         or ((file.startswith(libdir) or file.startswith(baselibdir)) and (".so" in f or ".node" in f)):
-                    # If it's a symlink, and points to an ELF file, we capture the readlink target
+
                     if cpath.islink(file):
-                        target = os.readlink(file)
-                        if isELF(ltarget):
-                            #bb.note("Sym: %s (%d)" % (ltarget, isELF(ltarget)))
-                            symlinks[file] = target
+                        checkelflinks[file] = ltarget
                         continue
+                    # Use a reference of device ID and inode number to identify files
+                    file_reference = "%d_%d" % (s.st_dev, s.st_ino)
+                    checkelf[file] = (file, file_reference)
 
-                    # It's a file (or hardlink), not a link
-                    # ...but is it ELF, and is it already stripped?
-                    elf_file = isELF(file)
-                    if elf_file & 1:
-                        if elf_file & 2:
-                            if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split():
-                                bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
-                            else:
-                                msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
-                                package_qa_handle_error("already-stripped", msg, d)
-                            continue
+        results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelflinks.values(), d)
+        results_map = {}
+        for (ltarget, elf_file) in results:
+            results_map[ltarget] = elf_file
+        for file in checkelflinks:
+            ltarget = checkelflinks[file]
+            # If it's a symlink, and points to an ELF file, we capture the readlink target
+            if results_map[ltarget]:
+                target = os.readlink(file)
+                #bb.note("Sym: %s (%d)" % (ltarget, results_map[ltarget]))
+                symlinks[file] = target
 
-                        # At this point we have an unstripped elf file. We need to:
-                        #  a) Make sure any file we strip is not hardlinked to anything else outside this tree
-                        #  b) Only strip any hardlinked file once (no races)
-                        #  c) Track any hardlinks between files so that we can reconstruct matching debug file hardlinks
+        results = oe.utils.multiprocess_launch(oe.package.is_elf, checkelf.keys(), d)
+        for (file, elf_file) in results:
+            # It's a file (or hardlink), not a link
+            # ...but is it ELF, and is it already stripped?
+            if elf_file & 1:
+                if elf_file & 2:
+                    if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split():
+                        bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
+                    else:
+                        msg = "File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn)
+                        package_qa_handle_error("already-stripped", msg, d)
+                    continue
 
-                        # Use a reference of device ID and inode number to identify files
-                        file_reference = "%d_%d" % (s.st_dev, s.st_ino)
-                        if file_reference in inodes:
-                            os.unlink(file)
-                            os.link(inodes[file_reference][0], file)
-                            inodes[file_reference].append(file)
-                        else:
-                            inodes[file_reference] = [file]
-                            # break hardlink
-                            bb.utils.copyfile(file, file)
-                            elffiles[file] = elf_file
-                        # Modified the file so clear the cache
-                        cpath.updatecache(file)
+                # At this point we have an unstripped elf file. We need to:
+                #  a) Make sure any file we strip is not hardlinked to anything else outside this tree
+                #  b) Only strip any hardlinked file once (no races)
+                #  c) Track any hardlinks between files so that we can reconstruct matching debug file hardlinks
+
+                # Use a reference of device ID and inode number to identify files
+                file_reference = checkelf[file][1]
+                if file_reference in inodes:
+                    os.unlink(file)
+                    os.link(inodes[file_reference][0], file)
+                    inodes[file_reference].append(file)
+                else:
+                    inodes[file_reference] = [file]
+                    # break hardlink
+                    bb.utils.break_hardlinks(file)
+                    elffiles[file] = elf_file
+                # Modified the file so clear the cache
+                cpath.updatecache(file)
 
     #
     # First lets process debug splitting
     #
     if (d.getVar('INHIBIT_PACKAGE_DEBUG_SPLIT') != '1'):
-        for file in elffiles:
-            src = file[len(dvar):]
-            dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
-            fpath = dvar + dest
-
-            # Split the file...
-            bb.utils.mkdirhier(os.path.dirname(fpath))
-            #bb.note("Split %s -> %s" % (file, fpath))
-            # Only store off the hard link reference if we successfully split!
-            splitdebuginfo(file, fpath, debugsrcdir, sourcefile, d)
+        oe.utils.multiprocess_launch(splitdebuginfo, list(elffiles), d, extraargs=(dvar, debugdir, debuglibdir, debugappend, debugsrcdir, sourcefile, d))
 
         if debugsrcdir and not targetos.startswith("mingw"):
             for file in staticlibs:
@@ -1061,15 +1048,18 @@
         for ref in inodes:
             if len(inodes[ref]) == 1:
                 continue
+
+            target = inodes[ref][0][len(dvar):]
             for file in inodes[ref][1:]:
                 src = file[len(dvar):]
-                dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(src) + debugappend
+                dest = debuglibdir + os.path.dirname(src) + debugdir + "/" + os.path.basename(target) + debugappend
                 fpath = dvar + dest
-                target = inodes[ref][0][len(dvar):]
                 ftarget = dvar + debuglibdir + os.path.dirname(target) + debugdir + "/" + os.path.basename(target) + debugappend
                 bb.utils.mkdirhier(os.path.dirname(fpath))
-                #bb.note("Link %s -> %s" % (fpath, ftarget))
-                os.link(ftarget, fpath)
+                # Only one hardlink of separated debug info file in each directory
+                if not os.access(fpath, os.R_OK):
+                    #bb.note("Link %s -> %s" % (fpath, ftarget))
+                    os.link(ftarget, fpath)
 
         # Create symlinks for all cases we were able to split symbols
         for file in symlinks:
@@ -1118,7 +1108,7 @@
         for f in kernmods:
             sfiles.append((f, 16, strip))
 
-        oe.utils.multiprocess_exec(sfiles, oe.package.runstrip)
+        oe.utils.multiprocess_launch(oe.package.runstrip, sfiles, d)
 
     #
     # End of strip
@@ -1151,21 +1141,22 @@
 
     # Sanity check PACKAGES for duplicates
     # Sanity should be moved to sanity.bbclass once we have the infrastructure
-    package_list = []
+    package_dict = {}
 
-    for pkg in packages.split():
-        if pkg in package_list:
+    for i, pkg in enumerate(packages.split()):
+        if pkg in package_dict:
             msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
             package_qa_handle_error("packages-list", msg, d)
         # If debug-with-srcpkg mode is enabled then the src package will have
         # priority over dbg package when assigning the files.
         # This allows src package to include source files and remove them from dbg.
         elif split_source_package and pkg.endswith("-src"):
-            package_list.insert(0, pkg)
-        elif autodebug and pkg.endswith("-dbg") and not split_source_package:
-            package_list.insert(0, pkg)
+            package_dict[pkg] = (10, i)
+        elif autodebug and pkg.endswith("-dbg"):
+            package_dict[pkg] = (30, i)
         else:
-            package_list.append(pkg)
+            package_dict[pkg] = (50, i)
+    package_list = sorted(package_dict.keys(), key=package_dict.get)
     d.setVar('PACKAGES', ' '.join(package_list))
     pkgdest = d.getVar('PKGDEST')
 
@@ -1411,9 +1402,6 @@
     pkgdest = d.getVar('PKGDEST')
     pkgdatadir = d.getVar('PKGDESTWORK')
 
-    # Take shared lock since we're only reading, not writing
-    lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True)
-
     data_file = pkgdatadir + d.expand("/${PN}" )
     f = open(data_file, 'w')
     f.write("PACKAGES: %s\n" % packages)
@@ -1426,7 +1414,8 @@
     if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
         write_extra_pkgs(variants, pn, packages, pkgdatadir)
 
-    if (bb.data.inherits_class('allarch', d) and not bb.data.inherits_class('packagegroup', d)):
+    if bb.data.inherits_class('allarch', d) and not variants \
+        and not bb.data.inherits_class('packagegroup', d):
         write_extra_pkgs(global_variants, pn, packages, pkgdatadir)
 
     workdir = d.getVar('WORKDIR')
@@ -1515,10 +1504,10 @@
     if bb.data.inherits_class('kernel', d) or bb.data.inherits_class('module-base', d):
         write_extra_runtime_pkgs(variants, packages, pkgdatadir)
 
-    if bb.data.inherits_class('allarch', d) and not bb.data.inherits_class('packagegroup', d):
+    if bb.data.inherits_class('allarch', d) and not variants \
+        and not bb.data.inherits_class('packagegroup', d):
         write_extra_runtime_pkgs(global_variants, packages, pkgdatadir)
 
-    bb.utils.unlockfile(lf)
 }
 emit_pkgdata[dirs] = "${PKGDESTWORK}/runtime ${PKGDESTWORK}/runtime-reverse ${PKGDESTWORK}/runtime-rprovides"
 
@@ -1558,7 +1547,7 @@
         for files in chunks(pkgfiles[pkg], 100):
             pkglist.append((pkg, files, rpmdeps, pkgdest))
 
-    processed = oe.utils.multiprocess_exec( pkglist, oe.package.filedeprunner)
+    processed = oe.utils.multiprocess_launch(oe.package.filedeprunner, pkglist, d)
 
     provides_files = {}
     requires_files = {}
@@ -1574,12 +1563,12 @@
         for file in provides:
             provides_files[pkg].append(file)
             key = "FILERPROVIDES_" + file + "_" + pkg
-            d.setVar(key, " ".join(provides[file]))
+            d.appendVar(key, " " + " ".join(provides[file]))
 
         for file in requires:
             requires_files[pkg].append(file)
             key = "FILERDEPENDS_" + file + "_" + pkg
-            d.setVar(key, " ".join(requires[file]))
+            d.appendVar(key, " " + " ".join(requires[file]))
 
     for pkg in requires_files:
         d.setVar("FILERDEPENDSFLIST_" + pkg, " ".join(requires_files[pkg]))
@@ -1592,7 +1581,7 @@
 
 python package_do_shlibs() {
     import re, pipes
-    import subprocess as sub
+    import subprocess
 
     exclude_shlibs = d.getVar('EXCLUDE_FROM_SHLIBS', False)
     if exclude_shlibs:
@@ -1603,6 +1592,18 @@
     libdir_re = re.compile(".*/%s$" % d.getVar('baselib'))
 
     packages = d.getVar('PACKAGES')
+
+    shlib_pkgs = []
+    exclusion_list = d.getVar("EXCLUDE_PACKAGES_FROM_SHLIBS")
+    if exclusion_list:
+        for pkg in packages.split():
+            if pkg not in exclusion_list.split():
+                shlib_pkgs.append(pkg)
+            else:
+                bb.note("not generating shlibs for %s" % pkg)
+    else:
+        shlib_pkgs = packages.split()
+
     targetos = d.getVar('TARGET_OS')
 
     workdir = d.getVar('WORKDIR')
@@ -1617,28 +1618,28 @@
 
     shlibswork_dir = d.getVar('SHLIBSWORKDIR')
 
-    # Take shared lock since we're only reading, not writing
-    lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True)
-
-    def linux_so(file, needed, sonames, renames, pkgver):
+    def linux_so(file, pkg, pkgver, d):
         needs_ldconfig = False
+        needed = set()
+        sonames = set()
+        renames = []
         ldir = os.path.dirname(file).replace(pkgdest + "/" + pkg, '')
         cmd = d.getVar('OBJDUMP') + " -p " + pipes.quote(file) + " 2>/dev/null"
         fd = os.popen(cmd)
         lines = fd.readlines()
         fd.close()
-        rpath = []
+        rpath = tuple()
         for l in lines:
             m = re.match("\s+RPATH\s+([^\s]*)", l)
             if m:
                 rpaths = m.group(1).replace("$ORIGIN", ldir).split(":")
-                rpath = list(map(os.path.normpath, rpaths))
+                rpath = tuple(map(os.path.normpath, rpaths))
         for l in lines:
             m = re.match("\s+NEEDED\s+([^\s]*)", l)
             if m:
                 dep = m.group(1)
-                if dep not in needed[pkg]:
-                    needed[pkg].append((dep, file, rpath))
+                if dep not in needed:
+                    needed.add((dep, file, rpath))
             m = re.match("\s+SONAME\s+([^\s]*)", l)
             if m:
                 this_soname = m.group(1)
@@ -1646,12 +1647,12 @@
                 if not prov in sonames:
                     # if library is private (only used by package) then do not build shlib for it
                     if not private_libs or this_soname not in private_libs:
-                        sonames.append(prov)
+                        sonames.add(prov)
                 if libdir_re.match(os.path.dirname(file)):
                     needs_ldconfig = True
                 if snap_symlinks and (os.path.basename(file) != this_soname):
                     renames.append((file, os.path.join(os.path.dirname(file), this_soname)))
-        return needs_ldconfig
+        return (needs_ldconfig, needed, sonames, renames)
 
     def darwin_so(file, needed, sonames, renames, pkgver):
         if not os.path.exists(file):
@@ -1679,10 +1680,10 @@
             for combo in combos:
                 if not combo in sonames:
                     prov = (combo, ldir, pkgver)
-                    sonames.append(prov)
+                    sonames.add(prov)
         if file.endswith('.dylib') or file.endswith('.so'):
             rpath = []
-            p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-l', file],stdout=sub.PIPE,stderr=sub.PIPE)
+            p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-l', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
             out, err = p.communicate()
             # If returned successfully, process stdout for results
             if p.returncode == 0:
@@ -1691,7 +1692,7 @@
                     if l.startswith('path '):
                         rpath.append(l.split()[1])
 
-        p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', file],stdout=sub.PIPE,stderr=sub.PIPE)
+        p = subprocess.Popen([d.expand("${HOST_PREFIX}otool"), '-L', file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
         out, err = p.communicate()
         # If returned successfully, process stdout for results
         if p.returncode == 0:
@@ -1703,7 +1704,7 @@
                     continue
                 name = os.path.basename(l.split()[0]).rsplit(".", 1)[0]
                 if name and name not in needed[pkg]:
-                     needed[pkg].append((name, file, []))
+                     needed[pkg].add((name, file, tuple()))
 
     def mingw_dll(file, needed, sonames, renames, pkgver):
         if not os.path.exists(file):
@@ -1711,18 +1712,18 @@
 
         if file.endswith(".dll"):
             # assume all dlls are shared objects provided by the package
-            sonames.append((os.path.basename(file), os.path.dirname(file).replace(pkgdest + "/" + pkg, ''), pkgver))
+            sonames.add((os.path.basename(file), os.path.dirname(file).replace(pkgdest + "/" + pkg, ''), pkgver))
 
         if (file.endswith(".dll") or file.endswith(".exe")):
             # use objdump to search for "DLL Name: .*\.dll"
-            p = sub.Popen([d.expand("${HOST_PREFIX}objdump"), "-p", file], stdout = sub.PIPE, stderr= sub.PIPE)
+            p = subprocess.Popen([d.expand("${HOST_PREFIX}objdump"), "-p", file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
             out, err = p.communicate()
             # process the output, grabbing all .dll names
             if p.returncode == 0:
                 for m in re.finditer("DLL Name: (.*?\.dll)$", out.decode(), re.MULTILINE | re.IGNORECASE):
                     dllname = m.group(1)
                     if dllname:
-                        needed[pkg].append((dllname, file, []))
+                        needed[pkg].add((dllname, file, tuple()))
 
     if d.getVar('PACKAGE_SNAP_LIB_SYMLINKS') == "1":
         snap_symlinks = True
@@ -1732,9 +1733,13 @@
     use_ldconfig = bb.utils.contains('DISTRO_FEATURES', 'ldconfig', True, False, d)
 
     needed = {}
-    shlib_provider = oe.package.read_shlib_providers(d)
 
-    for pkg in packages.split():
+    # Take shared lock since we're only reading, not writing
+    lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True)
+    shlib_provider = oe.package.read_shlib_providers(d)
+    bb.utils.unlockfile(lf)
+
+    for pkg in shlib_pkgs:
         private_libs = d.getVar('PRIVATE_LIBS_' + pkg) or d.getVar('PRIVATE_LIBS') or ""
         private_libs = private_libs.split()
         needs_ldconfig = False
@@ -1746,9 +1751,10 @@
         if not pkgver:
             pkgver = ver
 
-        needed[pkg] = []
-        sonames = list()
-        renames = list()
+        needed[pkg] = set()
+        sonames = set()
+        renames = []
+        linuxlist = []
         for file in pkgfiles[pkg]:
                 soname = None
                 if cpath.islink(file):
@@ -1758,8 +1764,17 @@
                 elif targetos.startswith("mingw"):
                     mingw_dll(file, needed, sonames, renames, pkgver)
                 elif os.access(file, os.X_OK) or lib_re.match(file):
-                    ldconfig = linux_so(file, needed, sonames, renames, pkgver)
-                    needs_ldconfig = needs_ldconfig or ldconfig
+                    linuxlist.append(file)
+
+        if linuxlist:
+            results = oe.utils.multiprocess_launch(linux_so, linuxlist, d, extraargs=(pkg, pkgver, d))
+            for r in results:
+                ldconfig = r[0]
+                needed[pkg] |= r[1]
+                sonames |= r[2]
+                renames.extend(r[3])
+                needs_ldconfig = needs_ldconfig or ldconfig
+
         for (old, new) in renames:
             bb.note("Renaming %s to %s" % (old, new))
             os.rename(old, new)
@@ -1788,8 +1803,6 @@
             d.setVar('pkg_postinst_%s' % pkg, postinst)
         bb.debug(1, 'LIBNAMES: pkg %s sonames %s' % (pkg, sonames))
 
-    bb.utils.unlockfile(lf)
-
     assumed_libs = d.getVar('ASSUME_SHLIBS')
     if assumed_libs:
         libdir = d.getVar("libdir")
@@ -1806,7 +1819,7 @@
 
     libsearchpath = [d.getVar('libdir'), d.getVar('base_libdir')]
 
-    for pkg in packages.split():
+    for pkg in shlib_pkgs:
         bb.debug(2, "calculating shlib requirements for %s" % pkg)
 
         private_libs = d.getVar('PRIVATE_LIBS_' + pkg) or d.getVar('PRIVATE_LIBS') or ""
@@ -1827,7 +1840,7 @@
                 for k in shlib_provider[n[0]].keys():
                     shlib_provider_path.append(k)
                 match = None
-                for p in n[2] + shlib_provider_path + libsearchpath:
+                for p in list(n[2]) + shlib_provider_path + libsearchpath:
                     if p in shlib_provider[n[0]]:
                         match = p
                         break
@@ -1902,9 +1915,6 @@
                             if hdr == 'Requires':
                                 pkgconfig_needed[pkg] += exp.replace(',', ' ').split()
 
-    # Take shared lock since we're only reading, not writing
-    lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True)
-
     for pkg in packages.split():
         pkgs_file = os.path.join(shlibswork_dir, pkg + ".pclist")
         if pkgconfig_provided[pkg] != []:
@@ -1913,6 +1923,9 @@
                 f.write('%s\n' % p)
             f.close()
 
+    # Take shared lock since we're only reading, not writing
+    lf = bb.utils.lockfile(d.expand("${PACKAGELOCK}"), True)
+
     # Go from least to most specific since the last one found wins
     for dir in reversed(shlibs_dirs):
         if not os.path.exists(dir):
@@ -1928,6 +1941,8 @@
                 for l in lines:
                     pkgconfig_provided[pkg].append(l.rstrip())
 
+    bb.utils.unlockfile(lf)
+
     for pkg in packages.split():
         deps = []
         for n in pkgconfig_needed[pkg]:
@@ -1945,8 +1960,6 @@
             for dep in deps:
                 fd.write(dep + '\n')
             fd.close()
-
-    bb.utils.unlockfile(lf)
 }
 
 def read_libdep_files(d):
diff --git a/poky/meta/classes/package_deb.bbclass b/poky/meta/classes/package_deb.bbclass
index 2e8d17d..6f81591 100644
--- a/poky/meta/classes/package_deb.bbclass
+++ b/poky/meta/classes/package_deb.bbclass
@@ -41,32 +41,6 @@
     return arch
 
 python do_package_deb () {
-
-    import multiprocessing
-    import traceback
-
-    class DebianWritePkgProcess(multiprocessing.Process):
-        def __init__(self, *args, **kwargs):
-            multiprocessing.Process.__init__(self, *args, **kwargs)
-            self._pconn, self._cconn = multiprocessing.Pipe()
-            self._exception = None
-
-        def run(self):
-            try:
-                multiprocessing.Process.run(self)
-                self._cconn.send(None)
-            except Exception as e:
-                tb = traceback.format_exc()
-                self._cconn.send((e, tb))
-
-        @property
-        def exception(self):
-            if self._pconn.poll():
-                self._exception = self._pconn.recv()
-            return self._exception
-
-    oldcwd = os.getcwd()
-
     packages = d.getVar('PACKAGES')
     if not packages:
         bb.debug(1, "PACKAGES not defined, nothing to package")
@@ -76,30 +50,7 @@
     if os.access(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"),os.R_OK):
         os.unlink(os.path.join(tmpdir, "stamps", "DEB_PACKAGE_INDEX_CLEAN"))
 
-    max_process = int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
-    launched = []
-    error = None
-    pkgs = packages.split()
-    while not error and pkgs:
-        if len(launched) < max_process:
-            p = DebianWritePkgProcess(target=deb_write_pkg, args=(pkgs.pop(), d))
-            p.start()
-            launched.append(p)
-        for q in launched:
-            # The finished processes are joined when calling is_alive()
-            if not q.is_alive():
-                launched.remove(q)
-            if q.exception:
-                error, traceback = q.exception
-                break
-
-    for p in launched:
-        p.join()
-
-    os.chdir(oldcwd)
-
-    if error:
-        raise error
+    oe.utils.multiprocess_launch(deb_write_pkg, packages.split(), d, extraargs=(d,))
 }
 do_package_deb[vardeps] += "deb_write_pkg"
 do_package_deb[vardepsexclude] = "BB_NUMBER_THREADS"
diff --git a/poky/meta/classes/package_ipk.bbclass b/poky/meta/classes/package_ipk.bbclass
index a0b34fa..5eb910c 100644
--- a/poky/meta/classes/package_ipk.bbclass
+++ b/poky/meta/classes/package_ipk.bbclass
@@ -17,32 +17,6 @@
 OPKGLIBDIR = "${localstatedir}/lib"
 
 python do_package_ipk () {
-    import multiprocessing
-    import traceback
-
-    class IPKWritePkgProcess(multiprocessing.Process):
-        def __init__(self, *args, **kwargs):
-            multiprocessing.Process.__init__(self, *args, **kwargs)
-            self._pconn, self._cconn = multiprocessing.Pipe()
-            self._exception = None
-
-        def run(self):
-            try:
-                multiprocessing.Process.run(self)
-                self._cconn.send(None)
-            except Exception as e:
-                tb = traceback.format_exc()
-                self._cconn.send((e, tb))
-
-        @property
-        def exception(self):
-            if self._pconn.poll():
-                self._exception = self._pconn.recv()
-            return self._exception
-
-
-    oldcwd = os.getcwd()
-
     workdir = d.getVar('WORKDIR')
     outdir = d.getVar('PKGWRITEDIRIPK')
     tmpdir = d.getVar('TMPDIR')
@@ -61,30 +35,7 @@
     if os.access(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"), os.R_OK):
         os.unlink(os.path.join(tmpdir, "stamps", "IPK_PACKAGE_INDEX_CLEAN"))
 
-    max_process = int(d.getVar("BB_NUMBER_THREADS") or os.cpu_count() or 1)
-    launched = []
-    error = None
-    pkgs = packages.split()
-    while not error and pkgs:
-        if len(launched) < max_process:
-            p = IPKWritePkgProcess(target=ipk_write_pkg, args=(pkgs.pop(), d))
-            p.start()
-            launched.append(p)
-        for q in launched:
-            # The finished processes are joined when calling is_alive()
-            if not q.is_alive():
-                launched.remove(q)
-            if q.exception:
-                error, traceback = q.exception
-                break
-
-    for p in launched:
-        p.join()
-
-    os.chdir(oldcwd)
-
-    if error:
-        raise error
+    oe.utils.multiprocess_launch(ipk_write_pkg, packages.split(), d, extraargs=(d,))
 }
 do_package_ipk[vardeps] += "ipk_write_pkg"
 do_package_ipk[vardepsexclude] = "BB_NUMBER_THREADS"
diff --git a/poky/meta/classes/patch.bbclass b/poky/meta/classes/patch.bbclass
index 2fc6925..3e0a181 100644
--- a/poky/meta/classes/patch.bbclass
+++ b/poky/meta/classes/patch.bbclass
@@ -33,7 +33,7 @@
     if (testsrcdir + os.sep).startswith(workdir + os.sep):
         # Double-check that either workdir or S or some directory in-between is a git repository
         found = False
-        while testsrcdir != '/':
+        while testsrcdir != workdir:
             if os.path.exists(os.path.join(testsrcdir, '.git')):
                 found = True
                 break
diff --git a/poky/meta/classes/pixbufcache.bbclass b/poky/meta/classes/pixbufcache.bbclass
index b3e507f..3378ff2 100644
--- a/poky/meta/classes/pixbufcache.bbclass
+++ b/poky/meta/classes/pixbufcache.bbclass
@@ -12,7 +12,7 @@
 
 pixbufcache_common() {
 if [ "x$D" != "x" ]; then
-	$INTERCEPT_DIR/postinst_intercept update_pixbuf_cache ${PKG} mlprefix=${MLPREFIX} libdir=${libdir} \
+	$INTERCEPT_DIR/postinst_intercept update_pixbuf_cache ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} libdir=${libdir} \
 		bindir=${bindir} base_libdir=${base_libdir}
 else
 
diff --git a/poky/meta/classes/populate_sdk_base.bbclass b/poky/meta/classes/populate_sdk_base.bbclass
index 3da3507..677ba3c 100644
--- a/poky/meta/classes/populate_sdk_base.bbclass
+++ b/poky/meta/classes/populate_sdk_base.bbclass
@@ -1,4 +1,4 @@
-inherit meta
+inherit meta image-postinst-intercepts
 
 # Wildcards specifying complementary packages to install for every package that has been explicitly
 # installed into the rootfs
@@ -40,13 +40,13 @@
 
 TOOLCHAIN_HOST_TASK ?= "nativesdk-packagegroup-sdk-host packagegroup-cross-canadian-${MACHINE}"
 TOOLCHAIN_HOST_TASK_ATTEMPTONLY ?= ""
-TOOLCHAIN_TARGET_TASK ?= "${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target')} \
-                          ${@multilib_pkg_extend(d, 'target-sdk-provides-dummy')}"
+TOOLCHAIN_TARGET_TASK ?= "${@multilib_pkg_extend(d, 'packagegroup-core-standalone-sdk-target')} target-sdk-provides-dummy"
 TOOLCHAIN_TARGET_TASK_ATTEMPTONLY ?= ""
 TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
 
 SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
-SDK_DEPENDS = "virtual/fakeroot-native xz-native cross-localedef-native ${MLPREFIX}qemuwrapper-cross"
+SDK_DEPENDS = "virtual/fakeroot-native xz-native cross-localedef-native nativesdk-qemuwrapper-cross ${@' '.join(["%s-qemuwrapper-cross" % m for m in d.getVar("MULTILIB_VARIANTS").split()])} qemuwrapper-cross"
+PATH_prepend = "${STAGING_DIR_HOST}${SDKPATHNATIVE}${bindir}/crossscripts:${@":".join(all_multilib_tune_values(d, 'STAGING_BINDIR_CROSS').split())}:"
 SDK_DEPENDS_append_libc-glibc = " nativesdk-glibc-locale"
 
 # We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
@@ -62,8 +62,8 @@
 SDK_POST_INSTALL_COMMAND ?= ""
 SDK_RELOCATE_AFTER_INSTALL ?= "1"
 
-SDKEXTPATH ?= "~/${@d.getVar('DISTRO')}_sdk"
-SDK_TITLE ?= "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} SDK"
+SDKEXTPATH ??= "~/${@d.getVar('DISTRO')}_sdk"
+SDK_TITLE ??= "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} SDK"
 
 SDK_TARGET_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.target.manifest"
 SDK_HOST_MANIFEST = "${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"
@@ -228,14 +228,17 @@
 	tar ${SDKTAROPTS} -cf - . | xz -T 0 > ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.tar.xz
 }
 
+TOOLCHAIN_SHAR_EXT_TMPL ?= "${COREBASE}/meta/files/toolchain-shar-extract.sh"
+TOOLCHAIN_SHAR_REL_TMPL ?= "${COREBASE}/meta/files/toolchain-shar-relocate.sh"
+
 fakeroot create_shar() {
 	# copy in the template shar extractor script
-	cp ${COREBASE}/meta/files/toolchain-shar-extract.sh ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
+	cp ${TOOLCHAIN_SHAR_EXT_TMPL} ${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
 
 	rm -f ${T}/pre_install_command ${T}/post_install_command
 
 	if [ ${SDK_RELOCATE_AFTER_INSTALL} -eq 1 ] ; then
-		cp ${COREBASE}/meta/files/toolchain-shar-relocate.sh ${T}/post_install_command
+		cp ${TOOLCHAIN_SHAR_REL_TMPL} ${T}/post_install_command
 	fi
 	cat << "EOF" >> ${T}/pre_install_command
 ${SDK_PRE_INSTALL_COMMAND}
@@ -258,7 +261,7 @@
 		-e 's#@SDK_VERSION@#${SDK_VERSION}#g' \
 		-e '/@SDK_PRE_INSTALL_COMMAND@/d' \
 		-e '/@SDK_POST_INSTALL_COMMAND@/d' \
-		-e 's#@SDK_GCC_VER@#${@oe.utils.host_gcc_version(d)}#g' \
+		-e 's#@SDK_GCC_VER@#${@oe.utils.host_gcc_version(d, taskcontextonly=True)}#g' \
 		${SDKDEPLOYDIR}/${TOOLCHAIN_OUTPUTNAME}.sh
 
 	# add execution permission
@@ -293,17 +296,18 @@
 def sdk_variables(d):
     variables = ['BUILD_IMAGES_FROM_FEEDS','SDK_OS','SDK_OUTPUT','SDKPATHNATIVE','SDKTARGETSYSROOT','SDK_DIR','SDK_VENDOR','SDKIMAGE_INSTALL_COMPLEMENTARY','SDK_PACKAGE_ARCHS','SDK_OUTPUT',
                  'SDKTARGETSYSROOT','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS','PACKAGE_ARCHS',
-                 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI', 'PACKAGE_EXCLUDE_COMPLEMENTARY']
+                 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI', 'PACKAGE_EXCLUDE_COMPLEMENTARY', 'IMAGE_INSTALL_DEBUGFS']
     variables.extend(sdk_command_variables(d))
     return " ".join(variables)
 
 do_populate_sdk[vardeps] += "${@sdk_variables(d)}"
 
-do_populate_sdk[file-checksums] += "${COREBASE}/meta/files/toolchain-shar-relocate.sh:True \
-                                    ${COREBASE}/meta/files/toolchain-shar-extract.sh:True"
+do_populate_sdk[file-checksums] += "${TOOLCHAIN_SHAR_REL_TMPL}:True \
+                                    ${TOOLCHAIN_SHAR_EXT_TMPL}:True"
 
 do_populate_sdk[dirs] = "${PKGDATA_DIR} ${TOPDIR}"
 do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS').split()])}  ${@d.getVarFlag('do_rootfs', 'depends', False)}"
 do_populate_sdk[rdepends] = "${@' '.join([x + ':do_package_write_${IMAGE_PKGTYPE} ' + x + ':do_packagedata' for x in d.getVar('SDK_RDEPENDS').split()])}"
 do_populate_sdk[recrdeptask] += "do_packagedata do_package_write_rpm do_package_write_ipk do_package_write_deb"
+do_populate_sdk[file-checksums] += "${POSTINST_INTERCEPT_CHECKSUMS}"
 addtask populate_sdk
diff --git a/poky/meta/classes/populate_sdk_ext.bbclass b/poky/meta/classes/populate_sdk_ext.bbclass
index e1bba49..e30c492 100644
--- a/poky/meta/classes/populate_sdk_ext.bbclass
+++ b/poky/meta/classes/populate_sdk_ext.bbclass
@@ -200,15 +200,9 @@
         workspace_name = 'orig-workspace'
     else:
         workspace_name = None
-    layers_copied = buildsystem.copy_bitbake_and_layers(baseoutpath + '/layers', workspace_name)
 
-    sdkbblayers = []
-    corebase = os.path.basename(d.getVar('COREBASE'))
-    for layer in layers_copied:
-        if corebase == os.path.basename(layer):
-            conf_bbpath = os.path.join('layers', layer, 'bitbake')
-        else:
-            sdkbblayers.append(layer)
+    corebase, sdkbblayers = buildsystem.copy_bitbake_and_layers(baseoutpath + '/layers', workspace_name)
+    conf_bbpath = os.path.join('layers', corebase, 'bitbake')
 
     for path in os.listdir(baseoutpath + '/layers'):
         relpath = os.path.join('layers', path, oe_init_env_script)
@@ -325,8 +319,9 @@
             f.write('TCLIBCAPPEND = ""\n')
             f.write('DL_DIR = "${TOPDIR}/downloads"\n')
 
-            f.write('INHERIT += "%s"\n' % 'uninative')
-            f.write('UNINATIVE_CHECKSUM[%s] = "%s"\n\n' % (d.getVar('BUILD_ARCH'), uninative_checksum))
+            if bb.data.inherits_class('uninative', d):
+               f.write('INHERIT += "%s"\n' % 'uninative')
+               f.write('UNINATIVE_CHECKSUM[%s] = "%s"\n\n' % (d.getVar('BUILD_ARCH'), uninative_checksum))
             f.write('CONF_VERSION = "%s"\n\n' % d.getVar('CONF_VERSION', False))
 
             # Some classes are not suitable for SDK, remove them from INHERIT
@@ -536,7 +531,8 @@
 	scripts="devtool recipetool oe-find-native-sysroot runqemu* wic"
 	for script in $scripts; do
 		for scriptfn in `find ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath} -maxdepth 1 -executable -name "$script"`; do
-			lnr ${scriptfn} ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/`basename $scriptfn`
+			targetscriptfn="${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/$(basename $scriptfn)"
+			test -e ${targetscriptfn} || lnr ${scriptfn} ${targetscriptfn}
 		done
 	done
 	# We can't use the same method as above because files in the sysroot won't exist at this point
diff --git a/poky/meta/classes/python3native.bbclass b/poky/meta/classes/python3native.bbclass
index 89665ef..da12a71 100644
--- a/poky/meta/classes/python3native.bbclass
+++ b/poky/meta/classes/python3native.bbclass
@@ -1,8 +1,8 @@
 inherit python3-dir
 
-PYTHON="${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}"
-EXTRANATIVEPATH += "${PYTHON_PN}-native"
-DEPENDS_append = " ${PYTHON_PN}-native "
+PYTHON="${STAGING_BINDIR_NATIVE}/python3-native/python3"
+EXTRANATIVEPATH += "python3-native"
+DEPENDS_append = " python3-native "
 
 # python-config and other scripts are using distutils modules
 # which we patch to access these variables
diff --git a/poky/meta/classes/pythonnative.bbclass b/poky/meta/classes/pythonnative.bbclass
index 4cc8b27..ae6600c 100644
--- a/poky/meta/classes/pythonnative.bbclass
+++ b/poky/meta/classes/pythonnative.bbclass
@@ -1,11 +1,11 @@
 
 inherit python-dir
 
-PYTHON="${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN}"
+PYTHON="${STAGING_BINDIR_NATIVE}/python-native/python"
 # PYTHON_EXECUTABLE is used by cmake
 PYTHON_EXECUTABLE="${PYTHON}"
-EXTRANATIVEPATH += "${PYTHON_PN}-native"
-DEPENDS_append = " ${PYTHON_PN}-native "
+EXTRANATIVEPATH += "python-native"
+DEPENDS_append = " python-native "
 
 # python-config and other scripts are using distutils modules
 # which we patch to access these variables
diff --git a/poky/meta/classes/reproducible_build.bbclass b/poky/meta/classes/reproducible_build.bbclass
index 2df8053..0eb696a 100644
--- a/poky/meta/classes/reproducible_build.bbclass
+++ b/poky/meta/classes/reproducible_build.bbclass
@@ -1,33 +1,35 @@
-#
 # reproducible_build.bbclass
 #
-# This bbclass is mainly responsible to determine SOURCE_DATE_EPOCH on a per recipe base.
-# We need to set a recipe specific SOURCE_DATE_EPOCH in each recipe environment for various tasks.
-# One way would be to modify all recipes one-by-one to specify SOURCE_DATE_EPOCH explicitly, 
-# but that is not realistic as there are hundreds (probably thousands) of recipes in various meta-layers.
-# Therefore we do it this class. 
-# After sources are unpacked but before they are patched, we try to determine the value for SOURCE_DATE_EPOCH.
+# Sets SOURCE_DATE_EPOCH in each component's build environment.
+# Upstream components (generally) respect this environment variable,
+# using it in place of the "current" date and time.
+# See https://reproducible-builds.org/specs/source-date-epoch/
 #
-# There are 4 ways to determine SOURCE_DATE_EPOCH:
+# After sources are unpacked but before they are patched, we set a reproducible value for SOURCE_DATE_EPOCH.
+# This value should be reproducible for anyone who builds the same revision from the same sources.
 #
-# 1. Use value from __source_date_epoch.txt file if this file exists. 
-#    This file was most likely created in the previous build by one of the following methods 2,3,4. 
-#    In principle, it could actually provided by a recipe via SRC_URI
+# There are 4 ways we determine SOURCE_DATE_EPOCH:
 #
-# If the file does not exist, first try to determine the value for SOURCE_DATE_EPOCH:
+# 1. Use the value from __source_date_epoch.txt file if this file exists.
+#    This file was most likely created in the previous build by one of the following methods 2,3,4.
+#    Alternatively, it can be provided by a recipe via SRC_URI.
 #
-# 2. If we detected a folder .git, use .git last commit date timestamp, as git does not allow checking out
-#    files and preserving their timestamps.
+# If the file does not exist:
+#
+# 2. If there is a git checkout, use the last git commit timestamp.
+#    Git does not preserve file timestamps on checkout.
 #
 # 3. Use the mtime of "known" files such as NEWS, CHANGLELOG, ...
-#    This will work fine for any well kept repository distributed via tarballs.
+#    This works for well-kept repositories distributed via tarball.
 #
-# 4. If the above steps fail, we need to check all package source files and use the youngest file of the source tree.
+# 4. If the above steps fail, use the modification time of the youngest file in the source tree.
 #
-# Once the value of SOURCE_DATE_EPOCH is determined, it is stored in the recipe ${WORKDIR}/source_date_epoch folder
-# in a text file "__source_date_epoch.txt'. If this file is found by other recipe task, the value is exported in
-# the SOURCE_DATE_EPOCH variable in the task environment. This is done in an anonymous python function, 
-# so SOURCE_DATE_EPOCH is guaranteed to exist for all tasks the may use it (do_configure, do_compile, do_package, ...)
+# Once the value of SOURCE_DATE_EPOCH is determined, it is stored in the recipe's SDE_FILE.
+# If none of these mechanisms are suitable, replace the do_deploy_source_date_epoch task
+# with recipe-specific functionality to write the appropriate SOURCE_DATE_EPOCH into the SDE_FILE.
+#
+# If this file is found by other tasks, the value is exported in the SOURCE_DATE_EPOCH variable.
+# SOURCE_DATE_EPOCH is set for all tasks that might use it (do_configure, do_compile, do_package, ...)
 
 BUILD_REPRODUCIBLE_BINARIES ??= '1'
 inherit ${@oe.utils.ifelse(d.getVar('BUILD_REPRODUCIBLE_BINARIES') == '1', 'reproducible_build_simple', '')}
@@ -50,86 +52,100 @@
 addtask do_deploy_source_date_epoch_setscene
 addtask do_deploy_source_date_epoch before do_configure after do_patch
 
-def get_source_date_epoch_known_files(d, path):
-    source_date_epoch = 0
+def get_source_date_epoch_from_known_files(d, sourcedir):
+    source_date_epoch = None
+    newest_file = None
     known_files = set(["NEWS", "ChangeLog", "Changelog", "CHANGES"])
     for file in known_files:
-        filepath = os.path.join(path,file)
+        filepath = os.path.join(sourcedir, file)
         if os.path.isfile(filepath):
-            mtime = int(os.path.getmtime(filepath))
+            mtime = int(os.lstat(filepath).st_mtime)
             # There may be more than one "known_file" present, if so, use the youngest one
-            if mtime > source_date_epoch:
+            if not source_date_epoch or mtime > source_date_epoch:
                 source_date_epoch = mtime
+                newest_file = filepath
+    if newest_file:
+        bb.debug(1, "SOURCE_DATE_EPOCH taken from: %s" % newest_file)
     return source_date_epoch
 
-def find_git_folder(path):
-    exclude = set(["temp", "license-destdir", "patches", "recipe-sysroot-native", "recipe-sysroot", "pseudo", "build", "image", "sysroot-destdir"])
-    for root, dirs, files in os.walk(path, topdown=True):
+def find_git_folder(d, sourcedir):
+    # First guess: WORKDIR/git
+    # This is the default git fetcher unpack path
+    workdir = d.getVar('WORKDIR')
+    gitpath = os.path.join(workdir, "git/.git")
+    if os.path.isdir(gitpath):
+        return gitpath
+
+    # Second guess: ${S}
+    gitpath = os.path.join(sourcedir, ".git")
+    if os.path.isdir(gitpath):
+        return gitpath
+
+    # Perhaps there was a subpath or destsuffix specified.
+    # Go looking in the WORKDIR
+    exclude = set(["build", "image", "license-destdir", "patches", "pseudo",
+                   "recipe-sysroot", "recipe-sysroot-native", "sysroot-destdir", "temp"])
+    for root, dirs, files in os.walk(workdir, topdown=True):
         dirs[:] = [d for d in dirs if d not in exclude]
         if '.git' in dirs:
-            #bb.warn("found root:%s" % (str(root)))
             return root
-     
-def get_source_date_epoch_git(d, path):
-    source_date_epoch = 0
-    if "git://" in d.getVar('SRC_URI'):
-        gitpath = find_git_folder(d.getVar('WORKDIR'))
-        if gitpath != None:
-            import subprocess
-            if os.path.isdir(os.path.join(gitpath,".git")):
-                try:
-                    source_date_epoch = int(subprocess.check_output(['git','log','-1','--pretty=%ct'], cwd=path))
-                    #bb.warn("JB *** gitpath:%s sde: %d" % (gitpath,source_date_epoch))
-                    bb.debug(1, "git repo path:%s sde: %d" % (gitpath,source_date_epoch))
-                except subprocess.CalledProcessError as grepexc:
-                    #bb.warn( "Expected git repository not found, (path: %s) error:%d" % (gitpath, grepexc.returncode))
-                    bb.debug(1, "Expected git repository not found, (path: %s) error:%d" % (gitpath, grepexc.returncode))
-        else:
-            bb.warn("Failed to find a git repository for path:%s" % (path))
-    return source_date_epoch
-            
-python do_create_source_date_epoch_stamp() {
-    path = d.getVar('S')
-    if not os.path.isdir(path):
-        bb.warn("Unable to determine source_date_epoch! path:%s" % path)
-        return
 
+    bb.warn("Failed to find a git repository in WORKDIR: %s" % workdir)
+    return None
+
+def get_source_date_epoch_from_git(d, sourcedir):
+    source_date_epoch = None
+    if "git://" in d.getVar('SRC_URI'):
+        gitpath = find_git_folder(d, sourcedir)
+        if gitpath:
+            import subprocess
+            source_date_epoch = int(subprocess.check_output(['git','log','-1','--pretty=%ct'], cwd=gitpath))
+            bb.debug(1, "git repository: %s" % gitpath)
+    return source_date_epoch
+
+def get_source_date_epoch_from_youngest_file(d, sourcedir):
+    # Do it the hard way: check all files and find the youngest one...
+    source_date_epoch = None
+    newest_file = None
+    # Just in case S = WORKDIR
+    exclude = set(["build", "image", "license-destdir", "patches", "pseudo",
+                   "recipe-sysroot", "recipe-sysroot-native", "sysroot-destdir", "temp"])
+    for root, dirs, files in os.walk(sourcedir, topdown=True):
+        files = [f for f in files if not f[0] == '.']
+        dirs[:] = [d for d in dirs if d not in exclude]
+
+        for fname in files:
+            filename = os.path.join(root, fname)
+            try:
+                mtime = int(os.lstat(filename).st_mtime)
+            except ValueError:
+                mtime = 0
+            if not source_date_epoch or mtime > source_date_epoch:
+                source_date_epoch = mtime
+                newest_file = filename
+
+    if newest_file:
+        bb.debug(1, "Newest file found: %s" % newest_file)
+    return source_date_epoch
+
+python do_create_source_date_epoch_stamp() {
     epochfile = d.getVar('SDE_FILE')
     if os.path.isfile(epochfile):
-        bb.debug(1, " path: %s reusing __source_date_epoch.txt" % epochfile)
+        bb.debug(1, "Reusing SOURCE_DATE_EPOCH from: %s" % epochfile)
         return
- 
-    # Try to detect/find a git repository
-    source_date_epoch = get_source_date_epoch_git(d, path)
+
+    sourcedir = d.getVar('S')
+    source_date_epoch = (
+        get_source_date_epoch_from_git(d, sourcedir) or
+        get_source_date_epoch_from_known_files(d, sourcedir) or
+        get_source_date_epoch_from_youngest_file(d, sourcedir) or
+        0       # Last resort
+    )
     if source_date_epoch == 0:
-        source_date_epoch = get_source_date_epoch_known_files(d, path)
-    if source_date_epoch == 0:
-        # Do it the hard way: check all files and find the youngest one...
-        filename_dbg = None
-        exclude = set(["temp", "license-destdir", "patches", "recipe-sysroot-native", "recipe-sysroot", "pseudo", "build", "image", "sysroot-destdir"])
-        for root, dirs, files in os.walk(path, topdown=True):
-            files = [f for f in files if not f[0] == '.']
-            dirs[:] = [d for d in dirs if d not in exclude]
+        # empty folder, not a single file ...
+        bb.debug(1, "No files found to determine SOURCE_DATE_EPOCH")
 
-            for fname in files:
-                filename = os.path.join(root, fname)
-                try:
-                    mtime = int(os.path.getmtime(filename))
-                except ValueError:
-                    mtime = 0
-                if mtime > source_date_epoch:
-                    source_date_epoch = mtime
-                    filename_dbg = filename
-
-        if filename_dbg != None:
-            bb.debug(1," SOURCE_DATE_EPOCH %d derived from: %s" % (source_date_epoch, filename_dbg))
-
-        if source_date_epoch == 0:
-            # empty folder, not a single file ...
-            # kernel source do_unpack is special cased
-            if not bb.data.inherits_class('kernel', d):
-                bb.debug(1, "Unable to determine source_date_epoch! path:%s" % path)
-
+    bb.debug(1, "SOURCE_DATE_EPOCH: %d" % source_date_epoch)
     bb.utils.mkdirhier(d.getVar('SDE_DIR'))
     with open(epochfile, 'w') as f:
         f.write(str(source_date_epoch))
@@ -145,6 +161,6 @@
         if os.path.isfile(epochfile):
             with open(epochfile, 'r') as f:
                 source_date_epoch = f.read()
-            bb.debug(1, "source_date_epoch stamp found ---> stamp %s" % source_date_epoch)
+            bb.debug(1, "SOURCE_DATE_EPOCH: %s" % source_date_epoch)
         d.setVar('SOURCE_DATE_EPOCH', source_date_epoch)
 }
diff --git a/poky/meta/classes/rm_work.bbclass b/poky/meta/classes/rm_work.bbclass
index 31d99e4..10e134b 100644
--- a/poky/meta/classes/rm_work.bbclass
+++ b/poky/meta/classes/rm_work.bbclass
@@ -47,59 +47,60 @@
     cd `dirname ${STAMP}`
     for i in `basename ${STAMP}`*
     do
-        for j in ${SSTATETASKS} do_shared_workdir
-        do
-            case $i in
-            *do_setscene*)
-                break
-                ;;
-            *sigdata*|*sigbasedata*)
-                i=dummy
-                break
-                ;;
-            *do_package_write*)
-                i=dummy
-                break
-                ;;
-            *do_image_complete*)
-                mv $i `echo $i | sed -e "s#do_image_complete#do_image_complete_setscene#"`
-                i=dummy
-                break
-                ;;
-            *do_rootfs*|*do_image*|*do_bootimg*|*do_write_qemuboot_conf*)
-                i=dummy
-                break
-                ;;
-            *do_build*)
-                i=dummy
-                break
-                ;;
-            *do_addto_recipe_sysroot*)
-                # Preserve recipe-sysroot-native if do_addto_recipe_sysroot has been used
-                excludes="$excludes recipe-sysroot-native"
-                i=dummy
-                break
-                ;;
+        # By default we'll delete the stamp, unless $i is changed by the inner loop
+        # (i=dummy does this)
+
+        case $i in
+        *sigdata*|*sigbasedata*)
+            # Save/skip anything that looks like a signature data file.
+            i=dummy
+            ;;
+        *do_image_complete_setscene*)
+            # Ensure we don't 'stack' setscene extensions to this stamp with the section below
+            i=dummy
+            ;;
+        *do_image_complete*)
+            # Promote do_image_complete stamps to setscene versions (ahead of *do_image* below)
+            mv $i `echo $i | sed -e "s#do_image_complete#do_image_complete_setscene#"`
+            i=dummy
+            ;;
+        *do_package_write*|*do_rootfs*|*do_image*|*do_bootimg*|*do_write_qemuboot_conf*|*do_build*)
+            i=dummy
+            ;;
+        *do_addto_recipe_sysroot*)
+            # Preserve recipe-sysroot-native if do_addto_recipe_sysroot has been used
+            excludes="$excludes recipe-sysroot-native"
+            i=dummy
+            ;;
+        *do_package|*do_package.*|*do_package_setscene.*)
             # We remove do_package entirely, including any
             # sstate version since otherwise we'd need to leave 'plaindirs' around
             # such as 'packages' and 'packages-split' and these can be large. No end
             # of chain tasks depend directly on do_package anymore.
-            *do_package|*do_package.*|*do_package_setscene.*)
-                rm -f $i;
-                i=dummy
-                break
-                ;;
-            *_setscene*)
-                i=dummy
+            rm -f $i;
+            i=dummy
+            ;;
+        *_setscene*)
+            # Skip stamps which are already setscene versions
+            i=dummy
+            ;;
+        esac
+
+        for j in ${SSTATETASKS} do_shared_workdir
+        do
+            case $i in
+            dummy)
                 break
                 ;;
             *$j|*$j.*)
+                # Promote the stamp to a setscene version
                 mv $i `echo $i | sed -e "s#${j}#${j}_setscene#"`
                 i=dummy
                 break
-            ;;
+                ;;
             esac
         done
+
         rm -f $i
     done
 
diff --git a/poky/meta/classes/rootfs-postcommands.bbclass b/poky/meta/classes/rootfs-postcommands.bbclass
index 221869e..bde58ad 100644
--- a/poky/meta/classes/rootfs-postcommands.bbclass
+++ b/poky/meta/classes/rootfs-postcommands.bbclass
@@ -2,9 +2,12 @@
 # Zap the root password if debug-tweaks feature is not enabled
 ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password ; ",d)}'
 
-# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks is enabled
+# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks or allow-empty-password is enabled
 ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}'
 
+# Allow dropbear/openssh to accept root logins if debug-tweaks or allow-root-login is enabled
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-root-login' ], "ssh_allow_root_login; ", "",d)}'
+
 # Enable postinst logging if debug-tweaks is enabled
 ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}'
 
@@ -36,11 +39,6 @@
 
 ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
 
-# Disable DNS lookups, the SSH_DISABLE_DNS_LOOKUP can be overridden to allow
-# distros to choose not to take this change
-SSH_DISABLE_DNS_LOOKUP ?= " ssh_disable_dns_lookup ; "
-ROOTFS_POSTPROCESS_COMMAND_append_qemuall = "${SSH_DISABLE_DNS_LOOKUP}"
-
 # Sort the user and group entries in /etc by ID in order to make the content
 # deterministic. Package installs are not deterministic, causing the ordering
 # of entries to change between builds. In case that this isn't desired,
@@ -143,12 +141,11 @@
 }
 
 #
-# allow dropbear/openssh to accept root logins and logins from accounts with an empty password string
+# allow dropbear/openssh to accept logins from accounts with an empty password string
 #
 ssh_allow_empty_password () {
 	for config in sshd_config sshd_config_readonly; do
 		if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config ]; then
-			sed -i 's/^[#[:space:]]*PermitRootLogin.*/PermitRootLogin yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config
 			sed -i 's/^[#[:space:]]*PermitEmptyPasswords.*/PermitEmptyPasswords yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config
 		fi
 	done
@@ -171,9 +168,20 @@
 	fi
 }
 
-ssh_disable_dns_lookup () {
-	if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config ]; then
-		sed -i -e 's:#UseDNS yes:UseDNS no:' ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config
+#
+# allow dropbear/openssh to accept root logins
+#
+ssh_allow_root_login () {
+	for config in sshd_config sshd_config_readonly; do
+		if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config ]; then
+			sed -i 's/^[#[:space:]]*PermitRootLogin.*/PermitRootLogin yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config
+		fi
+	done
+
+	if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then
+		if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then
+			sed -i '/^DROPBEAR_EXTRA_ARGS=/ s/-w//' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
+		fi
 	fi
 }
 
@@ -245,7 +253,6 @@
     pkgs = image_list_installed_packages(d)
     with open(manifest_name, 'w+') as image_manifest:
         image_manifest.write(format_pkg_list(pkgs, "ver"))
-        image_manifest.write("\n")
 
     if os.path.exists(manifest_name):
         manifest_link = deploy_dir + "/" + link_name + ".manifest"
@@ -308,17 +315,18 @@
 python write_image_test_data() {
     from oe.data import export2json
 
-    testdata = "%s/%s.testdata.json" % (d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('IMAGE_NAME'))
-    testdata_link = "%s/%s.testdata.json" % (d.getVar('DEPLOY_DIR_IMAGE'), d.getVar('IMAGE_LINK_NAME'))
+    deploy_dir = d.getVar('IMGDEPLOYDIR')
+    link_name = d.getVar('IMAGE_LINK_NAME')
+    testdata_name = os.path.join(deploy_dir, "%s.testdata.json" % d.getVar('IMAGE_NAME'))
 
-    bb.utils.mkdirhier(os.path.dirname(testdata))
     searchString = "%s/"%(d.getVar("TOPDIR")).replace("//","/")
-    export2json(d, testdata,searchString=searchString,replaceString="")
+    export2json(d, testdata_name, searchString=searchString, replaceString="")
 
-    if testdata_link != testdata:
+    if os.path.exists(testdata_name):
+        testdata_link = os.path.join(deploy_dir, "%s.testdata.json" % link_name)
         if os.path.lexists(testdata_link):
-           os.remove(testdata_link)
-        os.symlink(os.path.basename(testdata), testdata_link)
+            os.remove(testdata_link)
+        os.symlink(os.path.basename(testdata_name), testdata_link)
 }
 write_image_test_data[vardepsexclude] += "TOPDIR"
 
diff --git a/poky/meta/classes/rootfs_deb.bbclass b/poky/meta/classes/rootfs_deb.bbclass
index 9ee1dfc..2b93796 100644
--- a/poky/meta/classes/rootfs_deb.bbclass
+++ b/poky/meta/classes/rootfs_deb.bbclass
@@ -6,7 +6,7 @@
 
 do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot"
 do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
-do_rootfs[recrdeptask] += "do_package_write_deb"
+do_rootfs[recrdeptask] += "do_package_write_deb do_package_qa"
 do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
 
 do_rootfs[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
diff --git a/poky/meta/classes/rootfs_ipk.bbclass b/poky/meta/classes/rootfs_ipk.bbclass
index 52b468d..aabc370 100644
--- a/poky/meta/classes/rootfs_ipk.bbclass
+++ b/poky/meta/classes/rootfs_ipk.bbclass
@@ -10,7 +10,7 @@
 
 do_rootfs[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
 do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
-do_rootfs[recrdeptask] += "do_package_write_ipk"
+do_rootfs[recrdeptask] += "do_package_write_ipk do_package_qa"
 do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
 
 do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock"
diff --git a/poky/meta/classes/rootfs_rpm.bbclass b/poky/meta/classes/rootfs_rpm.bbclass
index 7f305f5..51f89ea 100644
--- a/poky/meta/classes/rootfs_rpm.bbclass
+++ b/poky/meta/classes/rootfs_rpm.bbclass
@@ -23,7 +23,7 @@
 do_rootfs[depends] += "${RPMROOTFSDEPENDS}"
 do_populate_sdk[depends] += "${RPMROOTFSDEPENDS}"
 
-do_rootfs[recrdeptask] += "do_package_write_rpm"
+do_rootfs[recrdeptask] += "do_package_write_rpm do_package_qa"
 do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
 
 python () {
diff --git a/poky/meta/classes/setuptools.bbclass b/poky/meta/classes/setuptools.bbclass
index 56343b1..a923ea3 100644
--- a/poky/meta/classes/setuptools.bbclass
+++ b/poky/meta/classes/setuptools.bbclass
@@ -1,8 +1,3 @@
 inherit distutils
 
-DEPENDS += "python-distribute-native"
-
-DISTUTILS_INSTALL_ARGS = "--root=${D} \
-    --prefix=${prefix} \
-    --install-lib=${PYTHON_SITEPACKAGES_DIR} \
-    --install-data=${datadir}"
+DEPENDS += "python-setuptools-native"
diff --git a/poky/meta/classes/setuptools3.bbclass b/poky/meta/classes/setuptools3.bbclass
index de6dd94..8ca66ee 100644
--- a/poky/meta/classes/setuptools3.bbclass
+++ b/poky/meta/classes/setuptools3.bbclass
@@ -2,7 +2,3 @@
 
 DEPENDS += "python3-setuptools-native"
 
-DISTUTILS_INSTALL_ARGS = "--root=${D} \
-    --prefix=${prefix} \
-    --install-lib=${PYTHON_SITEPACKAGES_DIR} \
-    --install-data=${datadir}"
diff --git a/poky/meta/classes/siteinfo.bbclass b/poky/meta/classes/siteinfo.bbclass
index 86bb853..411e704 100644
--- a/poky/meta/classes/siteinfo.bbclass
+++ b/poky/meta/classes/siteinfo.bbclass
@@ -15,11 +15,13 @@
 # It is an error for the target not to exist.
 # If 'what' doesn't exist then an empty value is returned
 #
-def siteinfo_data(d):
+def siteinfo_data_for_machine(arch, os, d):
     archinfo = {
         "allarch": "endian-little bit-32", # bogus, but better than special-casing the checks below for allarch
         "aarch64": "endian-little bit-64 arm-common arm-64",
         "aarch64_be": "endian-big bit-64 arm-common arm-64",
+        "arc": "endian-little bit-32 arc-common",
+        "arceb": "endian-big bit-32 arc-common",
         "arm": "endian-little bit-32 arm-common arm-32",
         "armeb": "endian-big bit-32 arm-common arm-32",
         "avr32": "endian-big bit-32 avr32-common",
@@ -30,6 +32,8 @@
         "i586": "endian-little bit-32 ix86-common",
         "i686": "endian-little bit-32 ix86-common",
         "ia64": "endian-little bit-64",
+        "lm32": "endian-big bit-32",
+        "m68k": "endian-big bit-32",
         "microblaze": "endian-big bit-32 microblaze-common",
         "microblazeeb": "endian-big bit-32 microblaze-common",
         "microblazeel": "endian-little bit-32 microblaze-common",
@@ -128,15 +132,13 @@
         locs = { "archinfo" : archinfo, "osinfo" : osinfo, "targetinfo" : targetinfo, "d" : d}
         archinfo, osinfo, targetinfo = bb.utils.better_eval(call, locs)
 
-    hostarch = d.getVar("HOST_ARCH")
-    hostos = d.getVar("HOST_OS")
-    target = "%s-%s" % (hostarch, hostos)
+    target = "%s-%s" % (arch, os)
 
     sitedata = []
-    if hostarch in archinfo:
-        sitedata.extend(archinfo[hostarch].split())
-    if hostos in osinfo:
-        sitedata.extend(osinfo[hostos].split())
+    if arch in archinfo:
+        sitedata.extend(archinfo[arch].split())
+    if os in osinfo:
+        sitedata.extend(osinfo[os].split())
     if target in targetinfo:
         sitedata.extend(targetinfo[target].split())
     sitedata.append(target)
@@ -145,6 +147,9 @@
     bb.debug(1, "SITE files %s" % sitedata);
     return sitedata
 
+def siteinfo_data(d):
+    return siteinfo_data_for_machine(d.getVar("HOST_ARCH"), d.getVar("HOST_OS"), d)
+
 python () {
     sitedata = set(siteinfo_data(d))
     if "endian-little" in sitedata:
diff --git a/poky/meta/classes/spdx.bbclass b/poky/meta/classes/spdx.bbclass
index c5f544d..fb78e27 100644
--- a/poky/meta/classes/spdx.bbclass
+++ b/poky/meta/classes/spdx.bbclass
@@ -202,18 +202,13 @@
     return
 
 def hash_file(file_name):
-    try:
-        with open(file_name, 'rb') as f:
-            data_string = f.read()
-            sha1 = hash_string(data_string)
-            return sha1
-    except:
-        return None
+    from bb.utils import sha1_file
+    return sha1_file(file_name)
 
 def hash_string(data):
     import hashlib
     sha1 = hashlib.sha1()
-    sha1.update(data)
+    sha1.update(data.encode('utf-8'))
     return sha1.hexdigest()
 
 def run_fossology(foss_command, full_spdx):
@@ -226,7 +221,7 @@
     except subprocess.CalledProcessError as e:
         return None
 
-    foss_output = string.replace(foss_output, '\r', '')
+    foss_output = foss_output.replace('\r', '')
 
     # Package info
     package_info = {}
@@ -289,7 +284,8 @@
 def get_ver_code(dirname):
     chksums = []
     for f_dir, f in list_files(dirname):
-        hash = hash_file(os.path.join(dirname, f_dir, f))
+        path = os.path.join(dirname, f_dir, f)
+        hash = hash_file(path)
         if not hash is None:
             chksums.append(hash)
         else:
diff --git a/poky/meta/classes/sstate.bbclass b/poky/meta/classes/sstate.bbclass
index 0b28850..efb0096 100644
--- a/poky/meta/classes/sstate.bbclass
+++ b/poky/meta/classes/sstate.bbclass
@@ -25,14 +25,21 @@
 SSTATE_EXTRAPATH[vardepvalue] = ""
 
 # For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
-SSTATE_DUPWHITELIST = "${DEPLOY_DIR_IMAGE}/ ${DEPLOY_DIR}/licenses/ ${DEPLOY_DIR_RPM}/noarch/"
+SSTATE_DUPWHITELIST = "${DEPLOY_DIR}/licenses/"
 # Avoid docbook/sgml catalog warnings for now
 SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
 # sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
 SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
 SSTATE_DUPWHITELIST += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
+# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
+SSTATE_DUPWHITELIST += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
 # Archive the sources for many architectures in one deploy folder
 SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}"
+# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
+SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/ovmf"
+SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/grub-efi"
+SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/systemd-boot"
+SSTATE_DUPWHITELIST += "${DEPLOY_DIR_IMAGE}/microcode"
 
 SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
 SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
@@ -59,7 +66,6 @@
 SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
 SSTATEPOSTINSTFUNCS = ""
 EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
-SSTATECLEANFUNCS = ""
 
 # Check whether sstate exists for tasks that support sstate and are in the
 # locked signatures file.
@@ -301,7 +307,7 @@
     sstatepkg = d.getVar('SSTATE_PKG') + '_' + ss['task'] + ".tgz"
 
     if not os.path.exists(sstatepkg):
-        pstaging_fetch(sstatefetch, sstatepkg, d)
+        pstaging_fetch(sstatefetch, d)
 
     if not os.path.isfile(sstatepkg):
         bb.note("Staging package %s does not exist" % sstatepkg)
@@ -408,7 +414,7 @@
         bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
         subprocess.check_call(sstate_hardcode_cmd, shell=True)
 
-        # Need to remove this or we'd copy it into the target directory and may 
+        # Need to remove this or we'd copy it into the target directory and may
         # conflict with another writer
         os.remove(fixmefn)
 }
@@ -507,10 +513,6 @@
                 stfile.endswith(rm_nohash):
             oe.path.remove(stfile)
 
-    # Removes the users/groups created by the package
-    for cleanfunc in (d.getVar('SSTATECLEANFUNCS') or '').split():
-        bb.build.exec_func(cleanfunc, d)
-
 sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
 
 CLEANFUNCS += "sstate_cleanall"
@@ -640,7 +642,7 @@
 
     return
 
-def pstaging_fetch(sstatefetch, sstatepkg, d):
+def pstaging_fetch(sstatefetch, d):
     import bb.fetch2
 
     # Only try and fetch if the user has configured a mirror
@@ -720,17 +722,24 @@
 #
 sstate_create_package () {
 	TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
+
+        # Use pigz if available
+        OPT="-czS"
+        if [ -x "$(command -v pigz)" ]; then
+            OPT="-I pigz -cS"
+        fi
+
 	# Need to handle empty directories
 	if [ "$(ls -A)" ]; then
 		set +e
-		tar -czf $TFILE *
+		tar $OPT -f $TFILE *
 		ret=$?
 		if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
 			exit 1
 		fi
 		set -e
 	else
-		tar -cz --file=$TFILE --files-from=/dev/null
+		tar $OPT --file=$TFILE --files-from=/dev/null
 	fi
 	chmod 0664 $TFILE
 	mv -f $TFILE ${SSTATE_PKG}
@@ -890,6 +899,18 @@
             evdata['found'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) )
         bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
 
+    # Print some summary statistics about the current task completion and how much sstate
+    # reuse there was. Avoid divide by zero errors.
+    total = len(sq_fn)
+    currentcount = d.getVar("BB_SETSCENE_STAMPCURRENT_COUNT") or 0
+    complete = 0
+    if currentcount:
+        complete = (len(ret) + currentcount) / (total + currentcount) * 100
+    match = 0
+    if total:
+        match = len(ret) / total * 100
+    bb.plain("Sstate summary: Wanted %d Found %d Missed %d Current %d (%d%% match, %d%% complete)" % (total, len(ret), len(missed), currentcount, match, complete))
+
     if hasattr(bb.parse.siggen, "checkhashes"):
         bb.parse.siggen.checkhashes(missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d)
 
@@ -902,6 +923,7 @@
     # task is included in taskdependees too
     # Return - False - We need this dependency
     #        - True - We can skip this dependency
+    import re
 
     def logit(msg, log):
         if log is not None:
@@ -956,11 +978,24 @@
 
         # Consider sysroot depending on sysroot tasks
         if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
-            # base-passwd/shadow-sysroot don't need their dependencies
-            if taskdependees[dep][0].endswith(("base-passwd", "shadow-sysroot")):
-                continue
-            # Nothing need depend on libc-initial/gcc-cross-initial
-            if "-initial" in taskdependees[task][0]:
+            # Allow excluding certain recursive dependencies. If a recipe needs it should add a
+            # specific dependency itself, rather than relying on one of its dependees to pull
+            # them in.
+            # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html
+            not_needed = False
+            excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT')
+            if excludedeps is None:
+                # Cache the regular expressions for speed
+                excludedeps = []
+                for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split():
+                    excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1])))
+                d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps)
+            for excl in excludedeps:
+                if excl[0].match(taskdependees[dep][0]):
+                    if excl[1].match(taskdependees[task][0]):
+                        not_needed = True
+                        break
+            if not_needed:
                 continue
             # For meta-extsdk-toolchain we want all sysroot dependencies
             if taskdependees[dep][0] == 'meta-extsdk-toolchain':
@@ -1007,7 +1042,7 @@
         bb.siggen.dump_this_task(sstatepkg + '_' + taskname + ".tgz" ".siginfo", d)
 }
 
-SSTATE_PRUNE_OBSOLETEWORKDIR = "1"
+SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
 
 # Event handler which removes manifests and stamps file for
 # recipes which are no longer reachable in a build where they
@@ -1027,6 +1062,16 @@
         with open(preservestampfile, 'r') as f:
             preservestamps = f.readlines()
     seen = []
+
+    # The machine index contains all the stamps this machine has ever seen in this build directory.
+    # We should only remove things which this machine once accessed but no longer does.
+    machineindex = set()
+    bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
+    mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}")
+    if os.path.exists(mi):
+        with open(mi, "r") as f:
+            machineindex = set(line.strip() for line in f.readlines())
+
     for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
         toremove = []
         i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
@@ -1036,7 +1081,7 @@
             lines = f.readlines()
             for l in lines:
                 (stamp, manifest, workdir) = l.split()
-                if stamp not in stamps and stamp not in preservestamps:
+                if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
                     toremove.append(l)
                     if stamp not in seen:
                         bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
@@ -1065,6 +1110,11 @@
         with open(i, "w") as f:
             for l in lines:
                 f.write(l)
+    machineindex |= set(stamps)
+    with open(mi, "w") as f:
+        for l in machineindex:
+            f.write(l + "\n")
+
     if preservestamps:
         os.remove(preservestampfile)
 }
diff --git a/poky/meta/classes/staging.bbclass b/poky/meta/classes/staging.bbclass
index 939042e..84e13ba 100644
--- a/poky/meta/classes/staging.bbclass
+++ b/poky/meta/classes/staging.bbclass
@@ -70,7 +70,7 @@
 python sysroot_strip () {
     inhibit_sysroot = d.getVar('INHIBIT_SYSROOT_STRIP')
     if inhibit_sysroot and oe.types.boolean(inhibit_sysroot):
-        return 0
+        return
 
     dstdir = d.getVar('SYSROOT_DESTDIR')
     pn = d.getVar('PN')
@@ -79,7 +79,7 @@
     qa_already_stripped = 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn) or "").split()
     strip_cmd = d.getVar("STRIP")
 
-    oe.package.strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir,
+    oe.package.strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d,
                            qa_already_stripped=qa_already_stripped)
 }
 
@@ -256,7 +256,7 @@
     workdir = d.getVar("WORKDIR")
     #bb.warn(str(taskdepdata))
     pn = d.getVar("PN")
-
+    mc = d.getVar("BB_CURRENT_MC")
     stagingdir = d.getVar("STAGING_DIR")
     sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests"
     recipesysroot = d.getVar("RECIPE_SYSROOT")
@@ -294,12 +294,14 @@
     start = set([start])
 
     sstatetasks = d.getVar("SSTATETASKS").split()
+    # Add recipe specific tasks referenced by setscene_depvalid()
+    sstatetasks.append("do_stash_locale")
 
     def print_dep_tree(deptree):
         data = ""
         for dep in deptree:
             deps = "    " + "\n    ".join(deptree[dep][3]) + "\n"
-            data = "%s:\n  %s\n  %s\n%s  %s\n  %s\n" % (deptree[dep][0], deptree[dep][1], deptree[dep][2], deps, deptree[dep][4], deptree[dep][5])
+            data = data + "%s:\n  %s\n  %s\n%s  %s\n  %s\n" % (deptree[dep][0], deptree[dep][1], deptree[dep][2], deps, deptree[dep][4], deptree[dep][5])
         return data
 
     #bb.note("Full dep tree is:\n%s" % print_dep_tree(taskdepdata))
@@ -443,7 +445,13 @@
 
     msg_exists = []
     msg_adding = []
+
     for dep in configuredeps:
+        if mc != 'default':
+            # We should not care about other multiconfigs
+            depmc = dep.split(':')[1]
+            if depmc != mc:
+                continue
         c = setscenedeps[dep][0]
         if c not in installed:
             continue
diff --git a/poky/meta/classes/systemd.bbclass b/poky/meta/classes/systemd.bbclass
index 1b13432..c7b784d 100644
--- a/poky/meta/classes/systemd.bbclass
+++ b/poky/meta/classes/systemd.bbclass
@@ -34,10 +34,10 @@
 		systemctl daemon-reload
 	fi
 
-	systemctl $OPTS ${SYSTEMD_AUTO_ENABLE} ${SYSTEMD_SERVICE}
+	systemctl $OPTS ${SYSTEMD_AUTO_ENABLE} ${SYSTEMD_SERVICE_ESCAPED}
 
 	if [ -z "$D" -a "${SYSTEMD_AUTO_ENABLE}" = "enable" ]; then
-		systemctl --no-block restart ${SYSTEMD_SERVICE}
+		systemctl --no-block restart ${SYSTEMD_SERVICE_ESCAPED}
 	fi
 fi
 }
@@ -51,10 +51,10 @@
 
 if type systemctl >/dev/null 2>/dev/null; then
 	if [ -z "$D" ]; then
-		systemctl stop ${SYSTEMD_SERVICE}
+		systemctl stop ${SYSTEMD_SERVICE_ESCAPED}
 	fi
 
-	systemctl $OPTS disable ${SYSTEMD_SERVICE}
+	systemctl $OPTS disable ${SYSTEMD_SERVICE_ESCAPED}
 fi
 }
 
@@ -65,6 +65,7 @@
 
 python systemd_populate_packages() {
     import re
+    import shlex
 
     if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
         return
@@ -85,6 +86,9 @@
     def systemd_generate_package_scripts(pkg):
         bb.debug(1, 'adding systemd calls to postinst/postrm for %s' % pkg)
 
+        paths_escaped = ' '.join(shlex.quote(s) for s in d.getVar('SYSTEMD_SERVICE_' + pkg, True).split())
+        d.setVar('SYSTEMD_SERVICE_ESCAPED_' + pkg, paths_escaped)
+
         # Add pkg to the overrides so that it finds the SYSTEMD_SERVICE_pkg
         # variable.
         localdata = d.createCopy()
@@ -130,7 +134,7 @@
                 systemd_add_files_and_parse(pkg_systemd, path, service_base + '@.service', keys)
             for key in keys.split():
                 # recurse all dependencies found in keys ('Also';'Conflicts';..) and add to files
-                cmd = "grep %s %s | sed 's,%s=,,g' | tr ',' '\\n'" % (key, fullpath, key)
+                cmd = "grep %s %s | sed 's,%s=,,g' | tr ',' '\\n'" % (key, shlex.quote(fullpath), key)
                 pipe = os.popen(cmd, 'r')
                 line = pipe.readline()
                 while line:
diff --git a/poky/meta/classes/terminal.bbclass b/poky/meta/classes/terminal.bbclass
index a27e10c..73e765d 100644
--- a/poky/meta/classes/terminal.bbclass
+++ b/poky/meta/classes/terminal.bbclass
@@ -25,7 +25,8 @@
     bb.utils.mkdirhier(os.path.dirname(runfile))
 
     with open(runfile, 'w') as script:
-        script.write('#!/bin/sh -e\n')
+        script.write('#!/usr/bin/env %s\n' % d.getVar('SHELL'))
+        script.write('set -e\n')
         bb.data.emit_func(cmd_func, script, envdata)
         script.write(cmd_func)
         script.write("\n")
diff --git a/poky/meta/classes/testexport.bbclass b/poky/meta/classes/testexport.bbclass
index d070f07..59cbaef 100644
--- a/poky/meta/classes/testexport.bbclass
+++ b/poky/meta/classes/testexport.bbclass
@@ -131,6 +131,11 @@
     shutil.copy2(image_manifest, os.path.join(test_data_path, 'manifest'))
     shutil.copy2(tdname, os.path.join(test_data_path, 'testdata.json'))
 
+    for subdir, dirs, files in os.walk(export_path):
+        for dir in dirs:
+            if dir == '__pycache__':
+                shutil.rmtree(os.path.join(subdir, dir))
+
     # Create tar file for common parts of testexport
     create_tarball(d, "testexport.tar.gz", d.getVar("TEST_EXPORT_DIR"))
 
diff --git a/poky/meta/classes/testimage-auto.bbclass b/poky/meta/classes/testimage-auto.bbclass
deleted file mode 100644
index e0a22b7..0000000
--- a/poky/meta/classes/testimage-auto.bbclass
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright (C) 2013 Intel Corporation
-#
-# Released under the MIT license (see COPYING.MIT)
-
-
-# Run tests automatically on an image after the image is constructed
-# (as opposed to testimage.bbclass alone where tests must be called
-# manually using bitbake -c testimage <image>).
-#
-# NOTE: to use this class, simply set TEST_IMAGE = "1" - no need to
-# inherit it since that will be done in image.bbclass when this variable
-# has been set.
-#
-# See testimage.bbclass for the test implementation.
-
-inherit testimage
-
-python do_testimage_auto() {
-    testimage_main(d)
-}
-addtask testimage_auto before do_build after do_image_complete
-do_testimage_auto[depends] += "${TESTIMAGEDEPENDS}"
-do_testimage_auto[lockfiles] += "${TESTIMAGELOCK}"
diff --git a/poky/meta/classes/testimage.bbclass b/poky/meta/classes/testimage.bbclass
index a9e8b49..f2ff91d 100644
--- a/poky/meta/classes/testimage.bbclass
+++ b/poky/meta/classes/testimage.bbclass
@@ -10,6 +10,11 @@
 # - first add IMAGE_CLASSES += "testimage" in local.conf
 # - build a qemu core-image-sato
 # - then bitbake core-image-sato -c testimage. That will run a standard suite of tests.
+#
+# The tests can be run automatically each time an image is built if you set
+# TESTIMAGE_AUTO = "1"
+
+TESTIMAGE_AUTO ??= "0"
 
 # You can set (or append to) TEST_SUITES in local.conf to select the tests
 # which you want to run for your target.
@@ -35,25 +40,30 @@
 TEST_EXTRACTED_DIR ?= "${TEST_NEEDED_PACKAGES_DIR}/extracted"
 TEST_PACKAGED_DIR ?= "${TEST_NEEDED_PACKAGES_DIR}/packaged"
 
-RPMTESTSUITE = "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'dnf rpm', '', d)}"
+PKGMANTESTSUITE = "\
+    ${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'dnf rpm', '', d)} \
+    ${@bb.utils.contains('IMAGE_PKGTYPE', 'ipk', 'opkg', '', d)} \
+    ${@bb.utils.contains('IMAGE_PKGTYPE', 'deb', 'apt', '', d)} \
+    "
 SYSTEMDSUITE = "${@bb.utils.filter('DISTRO_FEATURES', 'systemd', d)}"
 MINTESTSUITE = "ping"
 NETTESTSUITE = "${MINTESTSUITE} ssh df date scp oe_syslog ${SYSTEMDSUITE}"
 DEVTESTSUITE = "gcc kernelmodule ldd"
+PTESTTESTSUITE = "${MINTESTSUITE} ssh scp ptest"
 
 DEFAULT_TEST_SUITES = "${MINTESTSUITE} auto"
 DEFAULT_TEST_SUITES_pn-core-image-minimal = "${MINTESTSUITE}"
 DEFAULT_TEST_SUITES_pn-core-image-minimal-dev = "${MINTESTSUITE}"
 DEFAULT_TEST_SUITES_pn-core-image-full-cmdline = "${NETTESTSUITE} perl python logrotate ptest"
 DEFAULT_TEST_SUITES_pn-core-image-x11 = "${MINTESTSUITE}"
-DEFAULT_TEST_SUITES_pn-core-image-lsb = "${NETTESTSUITE} pam parselogs ${RPMTESTSUITE} ptest"
-DEFAULT_TEST_SUITES_pn-core-image-sato = "${NETTESTSUITE} connman xorg parselogs ${RPMTESTSUITE} \
+DEFAULT_TEST_SUITES_pn-core-image-lsb = "${NETTESTSUITE} pam parselogs ${PKGMANTESTSUITE} ptest"
+DEFAULT_TEST_SUITES_pn-core-image-sato = "${NETTESTSUITE} connman xorg parselogs ${PKGMANTESTSUITE} \
     ${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'python', '', d)} ptest gi"
 DEFAULT_TEST_SUITES_pn-core-image-sato-sdk = "${NETTESTSUITE} buildcpio buildlzip buildgalculator \
-    connman ${DEVTESTSUITE} logrotate perl parselogs python ${RPMTESTSUITE} xorg ptest gi stap"
-DEFAULT_TEST_SUITES_pn-core-image-lsb-dev = "${NETTESTSUITE} pam perl python parselogs ${RPMTESTSUITE} ptest gi"
+    connman ${DEVTESTSUITE} logrotate perl parselogs python ${PKGMANTESTSUITE} xorg ptest gi stap"
+DEFAULT_TEST_SUITES_pn-core-image-lsb-dev = "${NETTESTSUITE} pam perl python parselogs ${PKGMANTESTSUITE} ptest gi"
 DEFAULT_TEST_SUITES_pn-core-image-lsb-sdk = "${NETTESTSUITE} buildcpio buildlzip buildgalculator \
-    connman ${DEVTESTSUITE} logrotate pam parselogs perl python ${RPMTESTSUITE} ptest gi stap"
+    connman ${DEVTESTSUITE} logrotate pam parselogs perl python ${PKGMANTESTSUITE} ptest gi stap"
 DEFAULT_TEST_SUITES_pn-meta-toolchain = "auto"
 
 # aarch64 has no graphics
@@ -73,13 +83,13 @@
 TEST_TARGET ?= "qemu"
 
 TESTIMAGEDEPENDS = ""
-TESTIMAGEDEPENDS_qemuall = "qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot qemu-helper-native:do_addto_recipe_sysroot"
+TESTIMAGEDEPENDS_append_qemuall = " qemu-native:do_populate_sysroot qemu-helper-native:do_populate_sysroot qemu-helper-native:do_addto_recipe_sysroot"
 TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
-TESTIMAGEDEPENDS_qemuall += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
-TESTIMAGEDEPENDS_qemuall += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-c-native:do_populate_sysroot', '', d)}"
+TESTIMAGEDEPENDS_append_qemuall = " ${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'cpio-native:do_populate_sysroot', '', d)}"
+TESTIMAGEDEPENDS_append_qemuall = " ${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-c-native:do_populate_sysroot', '', d)}"
 TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'dnf-native:do_populate_sysroot', '', d)}"
-TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'ipk', 'opkg-utils-native:do_populate_sysroot', '', d)}"
-TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'deb', 'apt-native:do_populate_sysroot', '', d)}"
+TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'ipk', 'opkg-utils-native:do_populate_sysroot package-index:do_package_index', '', d)}"
+TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'deb', 'apt-native:do_populate_sysroot  package-index:do_package_index', '', d)}"
 TESTIMAGEDEPENDS += "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'createrepo-c-native:do_populate_sysroot', '', d)}"
 
 TESTIMAGELOCK = "${TMPDIR}/testimage.lock"
@@ -141,7 +151,7 @@
                     'IMAGE_BASENAME': d.getVar("IMAGE_BASENAME"),
                     'IMAGE_PKGTYPE': d.getVar("IMAGE_PKGTYPE"),
                     'STARTTIME': d.getVar("DATETIME"),
-                    'HOST_DISTRO': ('-'.join(platform.linux_distribution())).replace(' ', '-'),
+                    'HOST_DISTRO': oe.lsb.distro_identifier().replace(' ', '-'),
                     'LAYERS': get_layers(d.getVar("BBLAYERS"))}
     return configuration
 get_testimage_configuration[vardepsexclude] = "DATETIME"
@@ -240,8 +250,8 @@
     # Get use_kvm
     qemu_use_kvm = d.getVar("QEMU_USE_KVM")
     if qemu_use_kvm and \
-       (oe.types.boolean(qemu_use_kvm) and 'x86' in machine or \
-        d.getVar('MACHINE') in qemu_use_kvm.split()):
+       (d.getVar('MACHINE') in qemu_use_kvm.split() or \
+        oe.types.boolean(qemu_use_kvm) and 'x86' in machine):
         kvm = True
     else:
         kvm = False
@@ -363,6 +373,7 @@
     return None
 
 def create_rpm_index(d):
+    import glob
     # Index RPMs
     rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo_c")
     index_cmds = []
@@ -379,9 +390,13 @@
         lf = bb.utils.lockfile(lockfilename, False)
         oe.path.copyhardlinktree(rpm_dir, idx_path)
         # Full indexes overload a 256MB image so reduce the number of rpms
-        # in the feed. Filter to r* since we use the run-postinst packages and
-        # this leaves some allarch and machine arch packages too.
-        bb.utils.remove(idx_path + "*/[a-qs-z]*.rpm")
+        # in the feed by filtering to specific packages needed by the tests.
+        package_list = glob.glob(idx_path + "*/*.rpm")
+
+        for pkg in package_list:
+            if not os.path.basename(pkg).startswith(("rpm", "run-postinsts", "busybox", "bash", "update-alternatives", "libc6", "curl", "musl")):
+                bb.utils.remove(pkg)
+
         bb.utils.unlockfile(lf)
         cmd = '%s --update -q %s' % (rpm_createrepo, idx_path)
 
@@ -404,4 +419,9 @@
 
 testimage_main[vardepsexclude] += "BB_ORIGENV DATETIME"
 
+python () {
+    if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"):
+        bb.build.addtask("testimage", "do_build", "do_image_complete", d)
+}
+
 inherit testsdk
diff --git a/poky/meta/classes/testsdk.bbclass b/poky/meta/classes/testsdk.bbclass
index 6981d40..458c3f4 100644
--- a/poky/meta/classes/testsdk.bbclass
+++ b/poky/meta/classes/testsdk.bbclass
@@ -23,7 +23,7 @@
                     'IMAGE_BASENAME': d.getVar("IMAGE_BASENAME"),
                     'IMAGE_PKGTYPE': d.getVar("IMAGE_PKGTYPE"),
                     'STARTTIME': d.getVar("DATETIME"),
-                    'HOST_DISTRO': ('-'.join(platform.linux_distribution())).replace(' ', '-'),
+                    'HOST_DISTRO': oe.lsb.distro_identifier().replace(' ', '-'),
                     'LAYERS': get_layers(d.getVar("BBLAYERS"))}
     return configuration
 get_sdk_configuration[vardepsexclude] = "DATETIME"
@@ -66,6 +66,14 @@
     host_pkg_manifest = OESDKTestContextExecutor._load_manifest(
         d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"))
 
+    processes = d.getVar("TESTIMAGE_NUMBER_THREADS") or d.getVar("BB_NUMBER_THREADS")
+    if processes:
+        try:
+            import testtools, subunit
+        except ImportError:
+            bb.warn("Failed to import testtools or subunit, the testcases will run serially")
+            processes = None
+
     sdk_dir = d.expand("${WORKDIR}/testimage-sdk/")
     bb.utils.remove(sdk_dir, True)
     bb.utils.mkdirhier(sdk_dir)
@@ -89,7 +97,10 @@
             import traceback
             bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
 
-        result = tc.runTests()
+        if processes:
+            result = tc.runTests(processes=int(processes))
+        else:
+            result = tc.runTests()
 
         component = "%s %s" % (pn, OESDKTestContextExecutor.name)
         context_msg = "%s:%s" % (os.path.basename(tcname), os.path.basename(sdk_env))
@@ -179,6 +190,7 @@
             f.write('SSTATE_MIRRORS += " \\n file://.* file://%s/PATH"\n' % test_data.get('SSTATE_DIR'))
             f.write('SOURCE_MIRROR_URL = "file://%s"\n' % test_data.get('DL_DIR'))
             f.write('INHERIT += "own-mirrors"\n')
+            f.write('PREMIRRORS_prepend = " git://git.yoctoproject.org/.* git://%s/git2/git.yoctoproject.org.BASENAME \\n "\n' % test_data.get('DL_DIR'))
 
         # We need to do this in case we have a minimal SDK
         subprocess.check_output(". %s > /dev/null; devtool sdk-install meta-extsdk-toolchain" % \
@@ -218,3 +230,8 @@
 addtask testsdkext
 do_testsdkext[nostamp] = "1"
 
+python () {
+    if oe.types.boolean(d.getVar("TESTIMAGE_AUTO") or "False"):
+        bb.build.addtask("testsdk", None, "do_populate_sdk", d)
+        bb.build.addtask("testsdkext", None, "do_populate_sdk_ext", d)
+}
diff --git a/poky/meta/classes/toolchain-scripts.bbclass b/poky/meta/classes/toolchain-scripts.bbclass
index 71da5e5..6d1ba69 100644
--- a/poky/meta/classes/toolchain-scripts.bbclass
+++ b/poky/meta/classes/toolchain-scripts.bbclass
@@ -51,6 +51,10 @@
 	echo "export OECORE_NATIVE_SYSROOT=\"$sdkpathnative\"" >> $script
 	echo 'export OECORE_TARGET_SYSROOT="$SDKTARGETSYSROOT"' >> $script
 	echo "export OECORE_ACLOCAL_OPTS=\"-I $sdkpathnative/usr/share/aclocal\"" >> $script
+	echo 'export OECORE_BASELIB="${baselib}"' >> $script
+	echo 'export OECORE_TARGET_ARCH="${TARGET_ARCH}"' >>$script
+	echo 'export OECORE_TARGET_OS="${TARGET_OS}"' >>$script
+
 	echo 'unset command_not_found_handle' >> $script
 
 	toolchain_shared_env_script
@@ -118,14 +122,34 @@
 }
 
 toolchain_create_post_relocate_script() {
-	script=$1
-	rm -f $script
-	touch $script
+	relocate_script=$1
+	env_dir=$2
+	rm -f $relocate_script
+	touch $relocate_script
 
-    cat >> $script <<EOF
+	cat >> $relocate_script <<EOF
+# Source top-level SDK env scripts in case they are needed for the relocate
+# scripts.
+for env_setup_script in ${env_dir}/environment-setup-*; do
+    . \$env_setup_script
+    status=\$?
+    if [ \$status != 0 ]; then
+        echo "\$0: Failed to source \$env_setup_script with status \$status"
+        exit \$status
+    fi
+done
+
 if [ -d "${SDKPATHNATIVE}/post-relocate-setup.d/" ]; then
-    for s in ${SDKPATHNATIVE}/post-relocate-setup.d/*.sh; do
+    for s in ${SDKPATHNATIVE}/post-relocate-setup.d/*; do
+        if [ ! -x \$s ]; then
+            continue
+        fi
         \$s "\$1"
+        status=\$?
+        if [ \$status != 0 ]; then
+            echo "post-relocate command \"\$s \$1\" failed with status \$status" >&2
+            exit \$status
+        fi
     done
     rm -rf "${SDKPATHNATIVE}/post-relocate-setup.d"
 fi
diff --git a/poky/meta/classes/uboot-extlinux-config.bbclass b/poky/meta/classes/uboot-extlinux-config.bbclass
index 61dff14..c65c421 100644
--- a/poky/meta/classes/uboot-extlinux-config.bbclass
+++ b/poky/meta/classes/uboot-extlinux-config.bbclass
@@ -58,7 +58,7 @@
 #
 # The kernel has an internal default console, which you can override with
 # a console=...some_tty...
-UBOOT_EXTLINUX_CONSOLE ??= "console=${console}"
+UBOOT_EXTLINUX_CONSOLE ??= "console=${console},${baudrate}"
 UBOOT_EXTLINUX_LABELS ??= "linux"
 UBOOT_EXTLINUX_FDT ??= ""
 UBOOT_EXTLINUX_FDTDIR ??= "../"
diff --git a/poky/meta/classes/uninative.bbclass b/poky/meta/classes/uninative.bbclass
index de2221a..ba99fb6 100644
--- a/poky/meta/classes/uninative.bbclass
+++ b/poky/meta/classes/uninative.bbclass
@@ -1,10 +1,11 @@
-UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', 'ld-linux.so.2', d)}"
+UNINATIVE_LOADER ?= "${UNINATIVE_STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'i686', 'ld-linux.so.2', '', d)}${@bb.utils.contains('BUILD_ARCH', 'aarch64', 'ld-linux-aarch64.so.1', '', d)}"
 UNINATIVE_STAGING_DIR ?= "${STAGING_DIR}"
 
 UNINATIVE_URL ?= "unset"
 UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc.tar.bz2"
 # Example checksums
-#UNINATIVE_CHECKSUM[i586] = "dead"
+#UNINATIVE_CHECKSUM[aarch64] = "dead"
+#UNINATIVE_CHECKSUM[i686] = "dead"
 #UNINATIVE_CHECKSUM[x86_64] = "dead"
 UNINATIVE_DLDIR ?= "${DL_DIR}/uninative/"
 
diff --git a/poky/meta/classes/update-rc.d.bbclass b/poky/meta/classes/update-rc.d.bbclass
index 06e3b21..265c4be 100644
--- a/poky/meta/classes/update-rc.d.bbclass
+++ b/poky/meta/classes/update-rc.d.bbclass
@@ -39,9 +39,9 @@
 updatercd_postinst() {
 if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
 	if [ -n "$D" ]; then
-		OPT="-r $D"
+		OPT="-f -r $D"
 	else
-		OPT="-s"
+		OPT="-f -s"
 	fi
 	update-rc.d $OPT ${INITSCRIPT_NAME} ${INITSCRIPT_PARAMS}
 fi
diff --git a/poky/meta/classes/useradd_base.bbclass b/poky/meta/classes/useradd_base.bbclass
index 551c82c..0d0bdb8 100644
--- a/poky/meta/classes/useradd_base.bbclass
+++ b/poky/meta/classes/useradd_base.bbclass
@@ -51,10 +51,10 @@
 	local groupname=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-g" || $i == "--group") print $(i+1) }'`
 	local username=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-a" || $i == "--add") print $(i+1) }'`
 	bbnote "${PN}: Running groupmems command with group $groupname and user $username"
-	local mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*" $rootdir/etc/group || true`"
+	local mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*$" $rootdir/etc/group || true`"
 	if test "x$mem_exists" = "x"; then
 		eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupmems \$opts\" || true
-		mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*" $rootdir/etc/group || true`"
+		mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*$" $rootdir/etc/group || true`"
 		if test "x$mem_exists" = "x"; then
 			bbfatal "${PN}: groupmems command did not succeed."
 		fi
diff --git a/poky/meta/classes/utils.bbclass b/poky/meta/classes/utils.bbclass
index 3f4f51b..0016e5c 100644
--- a/poky/meta/classes/utils.bbclass
+++ b/poky/meta/classes/utils.bbclass
@@ -328,15 +328,8 @@
 def all_multilib_tune_values(d, var, unique = True, need_split = True, delim = ' '):
     """Return a string of all ${var} in all multilib tune configuration"""
     values = []
-    value = d.getVar(var) or ""
-    if value != "":
-        if need_split:
-            for item in value.split(delim):
-                values.append(item)
-        else:
-            values.append(value)
-    variants = d.getVar("MULTILIB_VARIANTS") or ""
-    for item in variants.split():
+    variants = (d.getVar("MULTILIB_VARIANTS") or "").split() + ['']
+    for item in variants:
         localdata = get_multilib_datastore(item, d)
         # We need WORKDIR to be consistent with the original datastore
         localdata.setVar("WORKDIR", d.getVar("WORKDIR"))
@@ -367,23 +360,10 @@
     values = {}
     for v in vars:
         values[v] = []
-
-    localdata = bb.data.createCopy(d)
-    overrides = localdata.getVar("OVERRIDES", False).split(":")
-    newoverrides = []
-    for o in overrides:
-        if not o.startswith("virtclass-multilib-"):
-            newoverrides.append(o)
-    localdata.setVar("OVERRIDES", ":".join(newoverrides))
-    localdata.setVar("MLPREFIX", "")
-    origdefault = localdata.getVar("DEFAULTTUNE_MULTILIB_ORIGINAL")
-    if origdefault:
-        localdata.setVar("DEFAULTTUNE", origdefault)
     values['ml'] = ['']
-    for v in vars:
-        values[v].append(localdata.getVar(v))
-    variants = d.getVar("MULTILIB_VARIANTS") or ""
-    for item in variants.split():
+
+    variants = (d.getVar("MULTILIB_VARIANTS") or "").split() + ['']
+    for item in variants:
         localdata = get_multilib_datastore(item, d)
         values[v].append(localdata.getVar(v))
         values['ml'].append(item)