Yocto 2.5

Move OpenBMC to Yocto 2.5(sumo)

Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
Change-Id: I5c5ad6904a16e14c1c397f0baf10c9d465594a78
diff --git a/import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py b/import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py
index 3e86a46..b0365ab 100644
--- a/import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py
+++ b/import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py
@@ -36,10 +36,29 @@
 related_fields['RDEPENDS'] = ['DEPENDS']
 related_fields['RRECOMMENDS'] = ['DEPENDS']
 related_fields['FILELIST'] = ['FILES']
-related_fields['PKGSIZE'] = ['FILELIST']
 related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND']
 related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'NO_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
 
+colours = {
+    'colour_default': '',
+    'colour_add':     '',
+    'colour_remove':  '',
+}
+
+def init_colours(use_colours):
+    global colours
+    if use_colours:
+        colours = {
+            'colour_default': '\033[0m',
+            'colour_add':     '\033[1;32m',
+            'colour_remove':  '\033[1;31m',
+        }
+    else:
+        colours = {
+            'colour_default': '',
+            'colour_add':     '',
+            'colour_remove':  '',
+        }
 
 class ChangeRecord:
     def __init__(self, path, fieldname, oldvalue, newvalue, monitored):
@@ -79,7 +98,17 @@
                                 for name in adirs - bdirs]
             files_ba = [(name, sorted(os.path.basename(item) for item in bitems if os.path.dirname(item) == name)) \
                                 for name in bdirs - adirs]
-            renamed_dirs = [(dir1, dir2) for dir1, files1 in files_ab for dir2, files2 in files_ba if files1 == files2]
+            renamed_dirs = []
+            for dir1, files1 in files_ab:
+                rename = False
+                for dir2, files2 in files_ba:
+                    if files1 == files2 and not rename:
+                        renamed_dirs.append((dir1,dir2))
+                        # Make sure that we don't use this (dir, files) pair again.
+                        files_ba.remove((dir2,files2))
+                        # If a dir has already been found to have a rename, stop and go no further.
+                        rename = True
+
             # remove files that belong to renamed dirs from aitems and bitems
             for dir1, dir2 in renamed_dirs:
                 aitems = [item for item in aitems if os.path.dirname(item) not in (dir1, dir2)]
@@ -88,6 +117,7 @@
 
         if self.fieldname in list_fields or self.fieldname in list_order_fields:
             renamed_dirs = []
+            changed_order = False
             if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
                 (depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue)
                 aitems = pkglist_combine(depvera)
@@ -101,22 +131,33 @@
             removed = list(set(aitems) - set(bitems))
             added = list(set(bitems) - set(aitems))
 
+            if not removed and not added:
+                depvera = bb.utils.explode_dep_versions2(self.oldvalue, sort=False)
+                depverb = bb.utils.explode_dep_versions2(self.newvalue, sort=False)
+                for i, j in zip(depvera.items(), depverb.items()):
+                    if i[0] != j[0]:
+                        changed_order = True
+                        break
+
             lines = []
             if renamed_dirs:
                 for dfrom, dto in renamed_dirs:
-                    lines.append('directory renamed %s -> %s' % (dfrom, dto))
+                    lines.append('directory renamed {colour_remove}{}{colour_default} -> {colour_add}{}{colour_default}'.format(dfrom, dto, **colours))
             if removed or added:
                 if removed and not bitems:
-                    lines.append('removed all items "%s"' % ' '.join(removed))
+                    lines.append('removed all items "{colour_remove}{}{colour_default}"'.format(' '.join(removed), **colours))
                 else:
                     if removed:
-                        lines.append('removed "%s"' % ' '.join(removed))
+                        lines.append('removed "{colour_remove}{value}{colour_default}"'.format(value=' '.join(removed), **colours))
                     if added:
-                        lines.append('added "%s"' % ' '.join(added))
+                        lines.append('added "{colour_add}{value}{colour_default}"'.format(value=' '.join(added), **colours))
             else:
                 lines.append('changed order')
 
-            out = '%s: %s' % (self.fieldname, ', '.join(lines))
+            if not (removed or added or changed_order):
+                out = ''
+            else:
+                out = '%s: %s' % (self.fieldname, ', '.join(lines))
 
         elif self.fieldname in numeric_fields:
             aval = int(self.oldvalue or 0)
@@ -125,9 +166,9 @@
                 percentchg = ((bval - aval) / float(aval)) * 100
             else:
                 percentchg = 100
-            out = '%s changed from %s to %s (%s%d%%)' % (self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg)
+            out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default} ({}{:.0f}%)'.format(self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg, **colours)
         elif self.fieldname in defaultval_map:
-            out = '%s changed from %s to %s' % (self.fieldname, self.oldvalue, self.newvalue)
+            out = '{} changed from {colour_remove}{}{colour_default} to {colour_add}{}{colour_default}'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
             if self.fieldname == 'PKG' and '[default]' in self.newvalue:
                 out += ' - may indicate debian renaming failure'
         elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']:
@@ -163,7 +204,7 @@
             else:
                 out = ''
         else:
-            out = '%s changed from "%s" to "%s"' % (self.fieldname, self.oldvalue, self.newvalue)
+            out = '{} changed from "{colour_remove}{}{colour_default}" to "{colour_add}{}{colour_default}"'.format(self.fieldname, self.oldvalue, self.newvalue, **colours)
 
         if self.related:
             for chg in self.related:
diff --git a/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py b/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py
index ac2fae1..4b94806 100644
--- a/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py
+++ b/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py
@@ -95,7 +95,7 @@
                     destname = os.path.join(layerdestpath, f_basename)
                     _smart_copy(f, destname)
             else:
-                if os.path.exists(layerdestpath):
+                if os.path.exists(os.path.join(layerdestpath, 'conf/layer.conf')):
                     bb.note("Skipping layer %s, already handled" % layer)
                 else:
                     _smart_copy(layer, layerdestpath)
diff --git a/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py b/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py
index 9cc88f0..b172729 100644
--- a/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py
+++ b/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py
@@ -12,6 +12,7 @@
         self.gpg_path = d.getVar('GPG_PATH')
         self.gpg_version = self.get_gpg_version()
         self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmsign")
+        self.gpg_agent_bin = bb.utils.which(os.getenv('PATH'), "gpg-agent")
 
     def export_pubkey(self, output_file, keyid, armor=True):
         """Export GPG public key to a file"""
@@ -31,7 +32,7 @@
         """Sign RPM files"""
 
         cmd = self.rpm_bin + " --addsign --define '_gpg_name %s'  " % keyid
-        gpg_args = '--no-permission-warning --batch --passphrase=%s' % passphrase
+        gpg_args = '--no-permission-warning --batch --passphrase=%s --agent-program=%s|--auto-expand-secmem' % (passphrase, self.gpg_agent_bin)
         if self.gpg_version > (2,1,):
             gpg_args += ' --pinentry-mode=loopback'
         cmd += "--define '_gpg_sign_cmd_extra_args %s' " % gpg_args
@@ -71,6 +72,9 @@
         if self.gpg_version > (2,1,):
             cmd += ['--pinentry-mode', 'loopback']
 
+        if self.gpg_agent_bin:
+            cmd += ["--agent-program=%s|--auto-expand-secmem" % (self.gpg_agent_bin)]
+
         cmd += [input_file]
 
         try:
@@ -99,7 +103,7 @@
         import subprocess
         try:
             ver_str = subprocess.check_output((self.gpg_bin, "--version", "--no-permission-warning")).split()[2].decode("utf-8")
-            return tuple([int(i) for i in ver_str.split('.')])
+            return tuple([int(i) for i in ver_str.split("-")[0].split('.')])
         except subprocess.CalledProcessError as e:
             raise bb.build.FuncFailed("Could not get gpg version: %s" % e)
 
diff --git a/import-layers/yocto-poky/meta/lib/oe/manifest.py b/import-layers/yocto-poky/meta/lib/oe/manifest.py
index 60c49be..674303c 100644
--- a/import-layers/yocto-poky/meta/lib/oe/manifest.py
+++ b/import-layers/yocto-poky/meta/lib/oe/manifest.py
@@ -274,8 +274,8 @@
                     if pkg_list is not None:
                         pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
 
-            for pkg_type in pkgs:
-                for pkg in pkgs[pkg_type].split():
+            for pkg_type in sorted(pkgs):
+                for pkg in sorted(pkgs[pkg_type].split()):
                     manifest.write("%s,%s\n" % (pkg_type, pkg))
 
     def create_final(self):
diff --git a/import-layers/yocto-poky/meta/lib/oe/package.py b/import-layers/yocto-poky/meta/lib/oe/package.py
index 1e5c3aa..4f3e21a 100644
--- a/import-layers/yocto-poky/meta/lib/oe/package.py
+++ b/import-layers/yocto-poky/meta/lib/oe/package.py
@@ -72,8 +72,7 @@
     # 16 - kernel module
     def is_elf(path):
         exec_type = 0
-        ret, result = oe.utils.getstatusoutput(
-            "file \"%s\"" % path.replace("\"", "\\\""))
+        ret, result = oe.utils.getstatusoutput("file -b '%s'" % path)
 
         if ret:
             bb.error("split_and_strip_files: 'file %s' failed" % path)
diff --git a/import-layers/yocto-poky/meta/lib/oe/package_manager.py b/import-layers/yocto-poky/meta/lib/oe/package_manager.py
index ed8fec8..2d8aeba 100644
--- a/import-layers/yocto-poky/meta/lib/oe/package_manager.py
+++ b/import-layers/yocto-poky/meta/lib/oe/package_manager.py
@@ -12,6 +12,7 @@
 import oe.path
 import string
 from oe.gpg_sign import get_signer
+import hashlib
 
 # this can be used by all PM backends to create the index files in parallel
 def create_index(arg):
@@ -22,12 +23,12 @@
     if result:
         bb.note(result)
 
-"""
-This method parse the output from the package managerand return
-a dictionary with the information of the packages. This is used
-when the packages are in deb or ipk format.
-"""
 def opkg_query(cmd_output):
+    """
+    This method parse the output from the package managerand return
+    a dictionary with the information of the packages. This is used
+    when the packages are in deb or ipk format.
+    """
     verregex = re.compile(' \([=<>]* [^ )]*\)')
     output = dict()
     pkg = ""
@@ -83,6 +84,11 @@
 
     return output
 
+# Note: this should be bb.fatal in the future.
+def failed_postinsts_warn(pkgs, log_path):
+    bb.warn("""Intentionally failing postinstall scriptlets of %s to defer them to first boot is deprecated. Please place them into pkg_postinst_ontarget_${PN} ().
+If deferring to first boot wasn't the intent, then scriptlet failure may mean an issue in the recipe, or a regression elsewhere.
+Details of the failure are in %s.""" %(pkgs, log_path))
 
 class Indexer(object, metaclass=ABCMeta):
     def __init__(self, d, deploy_dir):
@@ -96,13 +102,16 @@
 
 class RpmIndexer(Indexer):
     def write_index(self):
+        self.do_write_index(self.deploy_dir)
+
+    def do_write_index(self, deploy_dir):
         if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
             signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
         else:
             signer = None
 
         createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c")
-        result = create_index("%s --update -q %s" % (createrepo_c, self.deploy_dir))
+        result = create_index("%s --update -q %s" % (createrepo_c, deploy_dir))
         if result:
             bb.fatal(result)
 
@@ -110,17 +119,28 @@
         if signer:
             sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
             is_ascii_sig = (sig_type.upper() != "BIN")
-            signer.detach_sign(os.path.join(self.deploy_dir, 'repodata', 'repomd.xml'),
+            signer.detach_sign(os.path.join(deploy_dir, 'repodata', 'repomd.xml'),
                                self.d.getVar('PACKAGE_FEED_GPG_NAME'),
                                self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
                                armor=is_ascii_sig)
 
+class RpmSubdirIndexer(RpmIndexer):
+    def write_index(self):
+        bb.note("Generating package index for %s" %(self.deploy_dir))
+        self.do_write_index(self.deploy_dir)
+        for entry in os.walk(self.deploy_dir):
+            if os.path.samefile(self.deploy_dir, entry[0]):
+                for dir in entry[1]:
+                    if dir != 'repodata':
+                        dir_path = oe.path.join(self.deploy_dir, dir)
+                        bb.note("Generating package index for %s" %(dir_path))
+                        self.do_write_index(dir_path)
 
 class OpkgIndexer(Indexer):
     def write_index(self):
         arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
                      "SDK_PACKAGE_ARCHS",
-                     "MULTILIB_ARCHS"]
+                     ]
 
         opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
         if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
@@ -307,39 +327,103 @@
     This is an abstract class. Do not instantiate this directly.
     """
 
-    def __init__(self, d):
+    def __init__(self, d, target_rootfs):
         self.d = d
+        self.target_rootfs = target_rootfs
         self.deploy_dir = None
         self.deploy_lock = None
+        self._initialize_intercepts()
 
-    """
-    Update the package manager package database.
-    """
+    def _initialize_intercepts(self):
+        bb.note("Initializing intercept dir for %s" % self.target_rootfs)
+        postinst_intercepts_dir = self.d.getVar("POSTINST_INTERCEPTS_DIR")
+        if not postinst_intercepts_dir:
+            postinst_intercepts_dir = self.d.expand("${COREBASE}/scripts/postinst-intercepts")
+        # As there might be more than one instance of PackageManager operating at the same time
+        # we need to isolate the intercept_scripts directories from each other,
+        # hence the ugly hash digest in dir name.
+        self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'),
+                                      "intercept_scripts-%s" %(hashlib.sha256(self.target_rootfs.encode()).hexdigest()) )
+
+        bb.utils.remove(self.intercepts_dir, True)
+        shutil.copytree(postinst_intercepts_dir, self.intercepts_dir)
+
+    @abstractmethod
+    def _handle_intercept_failure(self, failed_script):
+        pass
+
+    def _postpone_to_first_boot(self, postinst_intercept_hook):
+        with open(postinst_intercept_hook) as intercept:
+            registered_pkgs = None
+            for line in intercept.read().split("\n"):
+                m = re.match("^##PKGS:(.*)", line)
+                if m is not None:
+                    registered_pkgs = m.group(1).strip()
+                    break
+
+            if registered_pkgs is not None:
+                bb.note("If an image is being built, the postinstalls for the following packages "
+                        "will be postponed for first boot: %s" %
+                        registered_pkgs)
+
+                # call the backend dependent handler
+                self._handle_intercept_failure(registered_pkgs)
+
+
+    def run_intercepts(self):
+        intercepts_dir = self.intercepts_dir
+
+        bb.note("Running intercept scripts:")
+        os.environ['D'] = self.target_rootfs
+        os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
+        for script in os.listdir(intercepts_dir):
+            script_full = os.path.join(intercepts_dir, script)
+
+            if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
+                continue
+
+            if script == "delay_to_first_boot":
+                self._postpone_to_first_boot(script_full)
+                continue
+
+            bb.note("> Executing %s intercept ..." % script)
+
+            try:
+                output = subprocess.check_output(script_full, stderr=subprocess.STDOUT)
+                if output: bb.note(output.decode("utf-8"))
+            except subprocess.CalledProcessError as e:
+                bb.warn("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+                bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
+                self._postpone_to_first_boot(script_full)
+
     @abstractmethod
     def update(self):
+        """
+        Update the package manager package database.
+        """
         pass
 
-    """
-    Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
-    True, installation failures are ignored.
-    """
     @abstractmethod
     def install(self, pkgs, attempt_only=False):
+        """
+        Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
+        True, installation failures are ignored.
+        """
         pass
 
-    """
-    Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
-    is False, the any dependencies are left in place.
-    """
     @abstractmethod
     def remove(self, pkgs, with_dependencies=True):
+        """
+        Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
+        is False, then any dependencies are left in place.
+        """
         pass
 
-    """
-    This function creates the index files
-    """
     @abstractmethod
     def write_index(self):
+        """
+        This function creates the index files
+        """
         pass
 
     @abstractmethod
@@ -350,30 +434,28 @@
     def list_installed(self):
         pass
 
-    """
-    Returns the path to a tmpdir where resides the contents of a package.
-
-    Deleting the tmpdir is responsability of the caller.
-
-    """
     @abstractmethod
     def extract(self, pkg):
+        """
+        Returns the path to a tmpdir where resides the contents of a package.
+        Deleting the tmpdir is responsability of the caller.
+        """
         pass
 
-    """
-    Add remote package feeds into repository manager configuration. The parameters
-    for the feeds are set by feed_uris, feed_base_paths and feed_archs.
-    See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
-    for their description.
-    """
     @abstractmethod
     def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+        """
+        Add remote package feeds into repository manager configuration. The parameters
+        for the feeds are set by feed_uris, feed_base_paths and feed_archs.
+        See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
+        for their description.
+        """
         pass
 
-    """
-    Install all packages that match a glob.
-    """
     def install_glob(self, globs, sdk=False):
+        """
+        Install all packages that match a glob.
+        """
         # TODO don't have sdk here but have a property on the superclass
         # (and respect in install_complementary)
         if sdk:
@@ -393,14 +475,14 @@
                          "'%s' returned %d:\n%s" %
                          (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
 
-    """
-    Install complementary packages based upon the list of currently installed
-    packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
-    these packages, if they don't exist then no error will occur.  Note: every
-    backend needs to call this function explicitly after the normal package
-    installation
-    """
     def install_complementary(self, globs=None):
+        """
+        Install complementary packages based upon the list of currently installed
+        packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
+        these packages, if they don't exist then no error will occur.  Note: every
+        backend needs to call this function explicitly after the normal package
+        installation
+        """
         if globs is None:
             globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
             split_linguas = set()
@@ -457,13 +539,13 @@
 
         self.deploy_lock = None
 
-    """
-    Construct URIs based on the following pattern: uri/base_path where 'uri'
-    and 'base_path' correspond to each element of the corresponding array
-    argument leading to len(uris) x len(base_paths) elements on the returned
-    array
-    """
     def construct_uris(self, uris, base_paths):
+        """
+        Construct URIs based on the following pattern: uri/base_path where 'uri'
+        and 'base_path' correspond to each element of the corresponding array
+        argument leading to len(uris) x len(base_paths) elements on the returned
+        array
+        """
         def _append(arr1, arr2, sep='/'):
             res = []
             narr1 = [a.rstrip(sep) for a in arr1]
@@ -477,18 +559,98 @@
             return res
         return _append(uris, base_paths)
 
+def create_packages_dir(d, rpm_repo_dir, deploydir, taskname, filterbydependencies):
+    """
+    Go through our do_package_write_X dependencies and hardlink the packages we depend
+    upon into the repo directory. This prevents us seeing other packages that may
+    have been built that we don't depend upon and also packages for architectures we don't
+    support.
+    """
+    import errno
+
+    taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+    mytaskname = d.getVar("BB_RUNTASK")
+    pn = d.getVar("PN")
+    seendirs = set()
+    multilibs = {}
+   
+    rpm_subrepo_dir = oe.path.join(rpm_repo_dir, "rpm")
+
+    bb.utils.remove(rpm_subrepo_dir, recurse=True)
+    bb.utils.mkdirhier(rpm_subrepo_dir)
+
+    # Detect bitbake -b usage
+    nodeps = d.getVar("BB_LIMITEDDEPS") or False
+    if nodeps or not filterbydependencies:
+        oe.path.symlink(deploydir, rpm_subrepo_dir, True)
+        return
+
+    start = None
+    for dep in taskdepdata:
+        data = taskdepdata[dep]
+        if data[1] == mytaskname and data[0] == pn:
+            start = dep
+            break
+    if start is None:
+        bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
+    rpmdeps = set()
+    start = [start]
+    seen = set(start)
+    # Support direct dependencies (do_rootfs -> rpms)
+    # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> rpms)
+    while start:
+        next = []
+        for dep2 in start:
+            for dep in taskdepdata[dep2][3]:
+                if taskdepdata[dep][0] != pn:
+                    if "do_" + taskname in dep:
+                        rpmdeps.add(dep)
+                elif dep not in seen:
+                    next.append(dep)
+                    seen.add(dep)
+        start = next
+
+    for dep in rpmdeps:
+        c = taskdepdata[dep][0]
+        manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs)
+        if not manifest:
+            bb.fatal("No manifest generated from: %s in %s" % (c, taskdepdata[dep][2]))
+        if not os.path.exists(manifest):
+            continue
+        with open(manifest, "r") as f:
+            for l in f:
+                l = l.strip()
+                dest = l.replace(deploydir, "")
+                dest = rpm_subrepo_dir + dest
+                if l.endswith("/"):
+                    if dest not in seendirs:
+                        bb.utils.mkdirhier(dest)
+                        seendirs.add(dest)
+                    continue
+                # Try to hardlink the file, copy if that fails
+                destdir = os.path.dirname(dest)
+                if destdir not in seendirs:
+                    bb.utils.mkdirhier(destdir)
+                    seendirs.add(destdir)
+                try:
+                    os.link(l, dest)
+                except OSError as err:
+                    if err.errno == errno.EXDEV:
+                        bb.utils.copyfile(l, dest)
+                    else:
+                        raise
+
 class RpmPM(PackageManager):
     def __init__(self,
                  d,
                  target_rootfs,
                  target_vendor,
                  task_name='target',
-                 providename=None,
                  arch_var=None,
                  os_var=None,
-                 rpm_repo_workdir="oe-rootfs-repo"):
-        super(RpmPM, self).__init__(d)
-        self.target_rootfs = target_rootfs
+                 rpm_repo_workdir="oe-rootfs-repo",
+                 filterbydependencies=True):
+        super(RpmPM, self).__init__(d, target_rootfs)
         self.target_vendor = target_vendor
         self.task_name = task_name
         if arch_var == None:
@@ -501,8 +663,7 @@
             self.primary_arch = self.d.getVar('MACHINE_ARCH')
 
         self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir)
-        bb.utils.mkdirhier(self.rpm_repo_dir)
-        oe.path.symlink(self.d.getVar('DEPLOY_DIR_RPM'), oe.path.join(self.rpm_repo_dir, "rpm"), True)
+        create_packages_dir(self.d, self.rpm_repo_dir, d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies)
 
         self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name)
         if not os.path.exists(self.d.expand('${T}/saved_packaging_data')):
@@ -577,7 +738,7 @@
             gpg_opts += 'repo_gpgcheck=1\n'
             gpg_opts += 'gpgkey=file://%s/pki/packagefeed-gpg/PACKAGEFEED-GPG-KEY-%s-%s\n' % (self.d.getVar('sysconfdir'), self.d.getVar('DISTRO'), self.d.getVar('DISTRO_CODENAME'))
 
-        if self.d.getVar('RPM_SIGN_PACKAGES') == '0':
+        if self.d.getVar('RPM_SIGN_PACKAGES') != '1':
             gpg_opts += 'gpgcheck=0\n'
 
         bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d"))
@@ -602,8 +763,7 @@
         os.environ['OFFLINE_ROOT'] = self.target_rootfs
         os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
         os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
-        os.environ['INTERCEPT_DIR'] = oe.path.join(self.d.getVar('WORKDIR'),
-                                                   "intercept_scripts")
+        os.environ['INTERCEPT_DIR'] = self.intercepts_dir
         os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
 
 
@@ -628,6 +788,8 @@
             if line.startswith("Non-fatal POSTIN scriptlet failure in rpm package"):
                 failed_scriptlets_pkgnames[line.split()[-1]] = True
 
+        if len(failed_scriptlets_pkgnames) > 0:
+            failed_postinsts_warn(list(failed_scriptlets_pkgnames.keys()), self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
         for pkg in failed_scriptlets_pkgnames.keys():
             self.save_rpmpostinst(pkg)
 
@@ -730,6 +892,7 @@
                              "--setopt=logdir=%s" % (self.d.getVar('T'))
                             ]
         cmd = [dnf_cmd] + standard_dnf_args + dnf_args
+        bb.note('Running %s' % ' '.join(cmd))
         try:
             output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8")
             if print_output:
@@ -782,6 +945,14 @@
         open(saved_script_name, 'w').write(output)
         os.chmod(saved_script_name, 0o755)
 
+    def _handle_intercept_failure(self, registered_pkgs):
+        rpm_postinsts_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
+        bb.utils.mkdirhier(rpm_postinsts_dir)
+
+        # Save the package postinstalls in /etc/rpm-postinsts
+        for pkg in registered_pkgs.split():
+            self.save_rpmpostinst(pkg)
+
     def extract(self, pkg):
         output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg])
         pkg_name = output.splitlines()[-1]
@@ -819,18 +990,18 @@
 
 
 class OpkgDpkgPM(PackageManager):
-    """
-    This is an abstract class. Do not instantiate this directly.
-    """
-    def __init__(self, d):
-        super(OpkgDpkgPM, self).__init__(d)
+    def __init__(self, d, target_rootfs):
+        """
+        This is an abstract class. Do not instantiate this directly.
+        """
+        super(OpkgDpkgPM, self).__init__(d, target_rootfs)
 
-    """
-    Returns a dictionary with the package info.
-
-    This method extracts the common parts for Opkg and Dpkg
-    """
     def package_info(self, pkg, cmd):
+        """
+        Returns a dictionary with the package info.
+
+        This method extracts the common parts for Opkg and Dpkg
+        """
 
         try:
             output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
@@ -839,14 +1010,14 @@
                      "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
         return opkg_query(output)
 
-    """
-    Returns the path to a tmpdir where resides the contents of a package.
-
-    Deleting the tmpdir is responsability of the caller.
-
-    This method extracts the common parts for Opkg and Dpkg
-    """
     def extract(self, pkg, pkg_info):
+        """
+        Returns the path to a tmpdir where resides the contents of a package.
+
+        Deleting the tmpdir is responsability of the caller.
+
+        This method extracts the common parts for Opkg and Dpkg
+        """
 
         ar_cmd = bb.utils.which(os.getenv("PATH"), "ar")
         tar_cmd = bb.utils.which(os.getenv("PATH"), "tar")
@@ -885,12 +1056,13 @@
 
         return tmp_dir
 
+    def _handle_intercept_failure(self, registered_pkgs):
+        self.mark_packages("unpacked", registered_pkgs.split())
 
 class OpkgPM(OpkgDpkgPM):
     def __init__(self, d, target_rootfs, config_file, archs, task_name='target'):
-        super(OpkgPM, self).__init__(d)
+        super(OpkgPM, self).__init__(d, target_rootfs)
 
-        self.target_rootfs = target_rootfs
         self.config_file = config_file
         self.pkg_archs = archs
         self.task_name = task_name
@@ -921,12 +1093,12 @@
 
         self.indexer = OpkgIndexer(self.d, self.deploy_dir)
 
-    """
-    This function will change a package's status in /var/lib/opkg/status file.
-    If 'packages' is None then the new_status will be applied to all
-    packages
-    """
     def mark_packages(self, status_tag, packages=None):
+        """
+        This function will change a package's status in /var/lib/opkg/status file.
+        If 'packages' is None then the new_status will be applied to all
+        packages
+        """
         status_file = os.path.join(self.opkg_dir, "status")
 
         with open(status_file, "r") as sf:
@@ -1079,8 +1251,7 @@
         os.environ['OFFLINE_ROOT'] = self.target_rootfs
         os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
         os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
-        os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR'),
-                                                   "intercept_scripts")
+        os.environ['INTERCEPT_DIR'] = self.intercepts_dir
         os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
 
         try:
@@ -1088,6 +1259,13 @@
             bb.note(cmd)
             output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
             bb.note(output)
+            failed_pkgs = []
+            for line in output.split('\n'):
+                if line.endswith("configuration required on target."):
+                    bb.warn(line)
+                    failed_pkgs.append(line.split(".")[0])
+            if failed_pkgs:
+                failed_postinsts_warn(failed_pkgs, self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
         except subprocess.CalledProcessError as e:
             (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. "
                                               "Command '%s' returned %d:\n%s" %
@@ -1170,10 +1348,10 @@
                 # is separated from the following entry
                 status.write("\n")
 
-    '''
-    The following function dummy installs pkgs and returns the log of output.
-    '''
     def dummy_install(self, pkgs):
+        """
+        The following function dummy installs pkgs and returns the log of output.
+        """
         if len(pkgs) == 0:
             return
 
@@ -1228,10 +1406,10 @@
                             self.opkg_dir,
                             symlinks=True)
 
-    """
-    Returns a dictionary with the package info.
-    """
     def package_info(self, pkg):
+        """
+        Returns a dictionary with the package info.
+        """
         cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg)
         pkg_info = super(OpkgPM, self).package_info(pkg, cmd)
 
@@ -1242,12 +1420,12 @@
 
         return pkg_info
 
-    """
-    Returns the path to a tmpdir where resides the contents of a package.
-
-    Deleting the tmpdir is responsability of the caller.
-    """
     def extract(self, pkg):
+        """
+        Returns the path to a tmpdir where resides the contents of a package.
+
+        Deleting the tmpdir is responsability of the caller.
+        """
         pkg_info = self.package_info(pkg)
         if not pkg_info:
             bb.fatal("Unable to get information for package '%s' while "
@@ -1260,8 +1438,7 @@
 
 class DpkgPM(OpkgDpkgPM):
     def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None):
-        super(DpkgPM, self).__init__(d)
-        self.target_rootfs = target_rootfs
+        super(DpkgPM, self).__init__(d, target_rootfs)
         self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB')
         if apt_conf_dir is None:
             self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
@@ -1281,12 +1458,12 @@
 
         self.indexer = DpkgIndexer(self.d, self.deploy_dir)
 
-    """
-    This function will change a package's status in /var/lib/dpkg/status file.
-    If 'packages' is None then the new_status will be applied to all
-    packages
-    """
     def mark_packages(self, status_tag, packages=None):
+        """
+        This function will change a package's status in /var/lib/dpkg/status file.
+        If 'packages' is None then the new_status will be applied to all
+        packages
+        """
         status_file = self.target_rootfs + "/var/lib/dpkg/status"
 
         with open(status_file, "r") as sf:
@@ -1309,11 +1486,11 @@
 
         os.rename(status_file + ".tmp", status_file)
 
-    """
-    Run the pre/post installs for package "package_name". If package_name is
-    None, then run all pre/post install scriptlets.
-    """
     def run_pre_post_installs(self, package_name=None):
+        """
+        Run the pre/post installs for package "package_name". If package_name is
+        None, then run all pre/post install scriptlets.
+        """
         info_dir = self.target_rootfs + "/var/lib/dpkg/info"
         ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"])
         control_scripts = [
@@ -1335,8 +1512,7 @@
         os.environ['OFFLINE_ROOT'] = self.target_rootfs
         os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
         os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
-        os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR'),
-                                                   "intercept_scripts")
+        os.environ['INTERCEPT_DIR'] = self.intercepts_dir
         os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
 
         failed_pkgs = []
@@ -1351,9 +1527,10 @@
                                 stderr=subprocess.STDOUT).decode("utf-8")
                         bb.note(output)
                     except subprocess.CalledProcessError as e:
-                        bb.note("%s for package %s failed with %d:\n%s" %
+                        bb.warn("%s for package %s failed with %d:\n%s" %
                                 (control_script.name, pkg_name, e.returncode,
                                     e.output.decode("utf-8")))
+                        failed_postinsts_warn([pkg_name], self.d.expand("${T}/log.do_${BB_CURRENTTASK}"))
                         failed_pkgs.append(pkg_name)
                         break
 
@@ -1558,10 +1735,10 @@
     def list_installed(self):
         return DpkgPkgsList(self.d, self.target_rootfs).list_pkgs()
 
-    """
-    Returns a dictionary with the package info.
-    """
     def package_info(self, pkg):
+        """
+        Returns a dictionary with the package info.
+        """
         cmd = "%s show %s" % (self.apt_cache_cmd, pkg)
         pkg_info = super(DpkgPM, self).package_info(pkg, cmd)
 
@@ -1572,12 +1749,12 @@
 
         return pkg_info
 
-    """
-    Returns the path to a tmpdir where resides the contents of a package.
-
-    Deleting the tmpdir is responsability of the caller.
-    """
     def extract(self, pkg):
+        """
+        Returns the path to a tmpdir where resides the contents of a package.
+
+        Deleting the tmpdir is responsability of the caller.
+        """
         pkg_info = self.package_info(pkg)
         if not pkg_info:
             bb.fatal("Unable to get information for package '%s' while "
@@ -1592,7 +1769,7 @@
     classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split()
 
     indexer_map = {
-        "rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM')),
+        "rpm": (RpmSubdirIndexer, d.getVar('DEPLOY_DIR_RPM')),
         "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')),
         "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB'))
     }
@@ -1608,12 +1785,3 @@
 
             if result is not None:
                 bb.fatal(result)
-
-if __name__ == "__main__":
-    """
-    We should be able to run this as a standalone script, from outside bitbake
-    environment.
-    """
-    """
-    TBD
-    """
diff --git a/import-layers/yocto-poky/meta/lib/oe/patch.py b/import-layers/yocto-poky/meta/lib/oe/patch.py
index 584bf6c..af7aa52 100644
--- a/import-layers/yocto-poky/meta/lib/oe/patch.py
+++ b/import-layers/yocto-poky/meta/lib/oe/patch.py
@@ -36,6 +36,22 @@
         (exitstatus, output) = oe.utils.getstatusoutput(cmd)
         if exitstatus != 0:
             raise CmdError(cmd, exitstatus >> 8, output)
+        if " fuzz " in output:
+            bb.warn("""
+Some of the context lines in patches were ignored. This can lead to incorrectly applied patches.
+The context lines in the patches can be updated with devtool:
+
+    devtool modify <recipe>
+    devtool finish --force-patch-refresh <recipe> <layer_path>
+
+Then the updated patches and the source tree (in devtool's workspace)
+should be reviewed to make sure the patches apply in the correct place
+and don't introduce duplicate lines (which can, and does happen
+when some of the context is ignored). Further information:
+http://lists.openembedded.org/pipermail/openembedded-core/2018-March/148675.html
+https://bugzilla.yoctoproject.org/show_bug.cgi?id=10450
+Details:
+{}""".format(output))
         return output
 
     finally:
@@ -212,7 +228,7 @@
         self.patches.insert(i, patch)
 
     def _applypatch(self, patch, force = False, reverse = False, run = True):
-        shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']]
+        shellcmd = ["cat", patch['file'], "|", "patch", "--no-backup-if-mismatch", "-p", patch['strippath']]
         if reverse:
             shellcmd.append('-R')
 
@@ -432,7 +448,7 @@
         import re
         tempdir = tempfile.mkdtemp(prefix='oepatch')
         try:
-            shellcmd = ["git", "format-patch", startcommit, "-o", tempdir]
+            shellcmd = ["git", "format-patch", "--no-signature", "--no-numbered", startcommit, "-o", tempdir]
             if paths:
                 shellcmd.append('--')
                 shellcmd.extend(paths)
diff --git a/import-layers/yocto-poky/meta/lib/oe/path.py b/import-layers/yocto-poky/meta/lib/oe/path.py
index 1ea03d5..76c58fa 100644
--- a/import-layers/yocto-poky/meta/lib/oe/path.py
+++ b/import-layers/yocto-poky/meta/lib/oe/path.py
@@ -237,3 +237,25 @@
         raise
 
     return file
+
+def is_path_parent(possible_parent, *paths):
+    """
+    Return True if a path is the parent of another, False otherwise.
+    Multiple paths to test can be specified in which case all
+    specified test paths must be under the parent in order to
+    return True.
+    """
+    def abs_path_trailing(pth):
+        pth_abs = os.path.abspath(pth)
+        if not pth_abs.endswith(os.sep):
+            pth_abs += os.sep
+        return pth_abs
+
+    possible_parent_abs = abs_path_trailing(possible_parent)
+    if not paths:
+        return False
+    for path in paths:
+        path_abs = abs_path_trailing(path)
+        if not path_abs.startswith(possible_parent_abs):
+            return False
+    return True
diff --git a/import-layers/yocto-poky/meta/lib/oe/recipeutils.py b/import-layers/yocto-poky/meta/lib/oe/recipeutils.py
index cab8e40..aa64553 100644
--- a/import-layers/yocto-poky/meta/lib/oe/recipeutils.py
+++ b/import-layers/yocto-poky/meta/lib/oe/recipeutils.py
@@ -22,7 +22,7 @@
 # Help us to find places to insert values
 recipe_progression = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LICENSE_FLAGS', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRCPV', 'SRC_URI', 'S', 'do_fetch()', 'do_unpack()', 'do_patch()', 'EXTRA_OECONF', 'EXTRA_OECMAKE', 'EXTRA_OESCONS', 'do_configure()', 'EXTRA_OEMAKE', 'do_compile()', 'do_install()', 'do_populate_sysroot()', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'populate_packages()', 'do_package()', 'do_deploy()']
 # Variables that sometimes are a bit long but shouldn't be wrapped
-nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', 'SRC_URI[md5sum]', 'SRC_URI[sha256sum]']
+nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER', 'SRC_URI\[(.+\.)?md5sum\]', 'SRC_URI\[(.+\.)?sha256sum\]']
 list_vars = ['SRC_URI', 'LIC_FILES_CHKSUM']
 meta_vars = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION']
 
@@ -142,6 +142,10 @@
     else:
         newline = ''
 
+    nowrap_vars_res = []
+    for item in nowrap_vars:
+        nowrap_vars_res.append(re.compile('^%s$' % item))
+
     recipe_progression_res = []
     recipe_progression_restrs = []
     for item in recipe_progression:
@@ -174,7 +178,12 @@
             return
         rawtext = '%s = "%s"%s' % (name, values[name], newline)
         addlines = []
-        if name in nowrap_vars:
+        nowrap = False
+        for nowrap_re in nowrap_vars_res:
+            if nowrap_re.match(name):
+                nowrap = True
+                break
+        if nowrap:
             addlines.append(rawtext)
         elif name in list_vars:
             splitvalue = split_var_value(values[name], assignment=False)
@@ -242,7 +251,7 @@
     return changed, tolines
 
 
-def patch_recipe_file(fn, values, patch=False, relpath=''):
+def patch_recipe_file(fn, values, patch=False, relpath='', redirect_output=None):
     """Update or insert variable values into a recipe file (assuming you
        have already identified the exact file you want to update.)
        Note that some manual inspection/intervention may be required
@@ -254,7 +263,11 @@
 
     _, tolines = patch_recipe_lines(fromlines, values)
 
-    if patch:
+    if redirect_output:
+        with open(os.path.join(redirect_output, os.path.basename(fn)), 'w') as f:
+            f.writelines(tolines)
+        return None
+    elif patch:
         relfn = os.path.relpath(fn, relpath)
         diff = difflib.unified_diff(fromlines, tolines, 'a/%s' % relfn, 'b/%s' % relfn)
         return diff
@@ -304,7 +317,7 @@
 
     return filevars
 
-def patch_recipe(d, fn, varvalues, patch=False, relpath=''):
+def patch_recipe(d, fn, varvalues, patch=False, relpath='', redirect_output=None):
     """Modify a list of variable values in the specified recipe. Handles inc files if
     used by the recipe.
     """
@@ -314,7 +327,7 @@
     patches = []
     for f,v in locs.items():
         vals = {k: varvalues[k] for k in v}
-        patchdata = patch_recipe_file(f, vals, patch, relpath)
+        patchdata = patch_recipe_file(f, vals, patch, relpath, redirect_output)
         if patch:
             patches.append(patchdata)
 
@@ -395,7 +408,7 @@
     # fetcher) though note that this only encompasses actual container formats
     # i.e. that can contain multiple files as opposed to those that only
     # contain a compressed stream (i.e. .tar.gz as opposed to just .gz)
-    archive_exts = ['.tar', '.tgz', '.tar.gz', '.tar.Z', '.tbz', '.tbz2', '.tar.bz2', '.tar.xz', '.tar.lz', '.zip', '.jar', '.rpm', '.srpm', '.deb', '.ipk', '.tar.7z', '.7z']
+    archive_exts = ['.tar', '.tgz', '.tar.gz', '.tar.Z', '.tbz', '.tbz2', '.tar.bz2', '.txz', '.tar.xz', '.tar.lz', '.zip', '.jar', '.rpm', '.srpm', '.deb', '.ipk', '.tar.7z', '.7z']
     ret = {}
     for uri in uris:
         if fetch.ud[uri].type == 'file':
@@ -575,7 +588,7 @@
     return (appendpath, pathok)
 
 
-def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None):
+def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None, redirect_output=None):
     """
     Writes a bbappend file for a recipe
     Parameters:
@@ -602,6 +615,9 @@
             value pairs, or simply a list of the lines.
         removevalues:
             Variable values to remove - a dict of names/values.
+        redirect_output:
+            If specified, redirects writing the output file to the
+            specified directory (for dry-run purposes)
     """
 
     if not removevalues:
@@ -616,7 +632,8 @@
         bb.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.' % (os.path.join(destlayerdir, 'conf', 'layer.conf'), os.path.dirname(appendpath)))
 
     appenddir = os.path.dirname(appendpath)
-    bb.utils.mkdirhier(appenddir)
+    if not redirect_output:
+        bb.utils.mkdirhier(appenddir)
 
     # FIXME check if the bbappend doesn't get overridden by a higher priority layer?
 
@@ -693,9 +710,18 @@
         if instfunclines:
             bbappendlines.append(('do_install_append%s()' % appendoverride, '', instfunclines))
 
-    bb.note('Writing append file %s' % appendpath)
+    if redirect_output:
+        bb.note('Writing append file %s (dry-run)' % appendpath)
+        outfile = os.path.join(redirect_output, os.path.basename(appendpath))
+        # Only take a copy if the file isn't already there (this function may be called
+        # multiple times per operation when we're handling overrides)
+        if os.path.exists(appendpath) and not os.path.exists(outfile):
+            shutil.copy2(appendpath, outfile)
+    else:
+        bb.note('Writing append file %s' % appendpath)
+        outfile = appendpath
 
-    if os.path.exists(appendpath):
+    if os.path.exists(outfile):
         # Work around lack of nonlocal in python 2
         extvars = {'destsubdir': destsubdir}
 
@@ -767,7 +793,7 @@
         if removevalues:
             varnames.extend(list(removevalues.keys()))
 
-        with open(appendpath, 'r') as f:
+        with open(outfile, 'r') as f:
             (updated, newlines) = bb.utils.edit_metadata(f, varnames, appendfile_varfunc)
 
         destsubdir = extvars['destsubdir']
@@ -784,20 +810,27 @@
         updated = True
 
     if updated:
-        with open(appendpath, 'w') as f:
+        with open(outfile, 'w') as f:
             f.writelines(newlines)
 
     if copyfiles:
         if machine:
             destsubdir = os.path.join(destsubdir, machine)
+        if redirect_output:
+            outdir = redirect_output
+        else:
+            outdir = appenddir
         for newfile, srcfile in copyfiles.items():
-            filedest = os.path.join(appenddir, destsubdir, os.path.basename(srcfile))
+            filedest = os.path.join(outdir, destsubdir, os.path.basename(srcfile))
             if os.path.abspath(newfile) != os.path.abspath(filedest):
                 if newfile.startswith(tempfile.gettempdir()):
                     newfiledisp = os.path.basename(newfile)
                 else:
                     newfiledisp = newfile
-                bb.note('Copying %s to %s' % (newfiledisp, filedest))
+                if redirect_output:
+                    bb.note('Copying %s to %s (dry-run)' % (newfiledisp, os.path.join(appenddir, destsubdir, os.path.basename(srcfile))))
+                else:
+                    bb.note('Copying %s to %s' % (newfiledisp, filedest))
                 bb.utils.mkdirhier(os.path.dirname(filedest))
                 shutil.copyfile(newfile, filedest)
 
@@ -867,25 +900,25 @@
             FetchError when don't have network access or upstream site don't response.
             NoMethodError when uri latest_versionstring method isn't implemented.
 
-        Returns a dictonary with version, type and datetime.
+        Returns a dictonary with version, repository revision, current_version, type and datetime.
         Type can be A for Automatic, M for Manual and U for Unknown.
     """
     from bb.fetch2 import decodeurl
     from datetime import datetime
 
     ru = {}
+    ru['current_version'] = rd.getVar('PV')
     ru['version'] = ''
     ru['type'] = 'U'
     ru['datetime'] = ''
-
-    pv = rd.getVar('PV')
+    ru['revision'] = ''
 
     # XXX: If don't have SRC_URI means that don't have upstream sources so
     # returns the current recipe version, so that upstream version check
     # declares a match.
     src_uris = rd.getVar('SRC_URI')
     if not src_uris:
-        ru['version'] = pv
+        ru['version'] = ru['current_version']
         ru['type'] = 'M'
         ru['datetime'] = datetime.now()
         return ru
@@ -894,6 +927,9 @@
     src_uri = src_uris.split()[0]
     uri_type, _, _, _, _, _ =  decodeurl(src_uri)
 
+    (pv, pfx, sfx) = get_recipe_pv_without_srcpv(rd.getVar('PV'), uri_type)
+    ru['current_version'] = pv
+
     manual_upstream_version = rd.getVar("RECIPE_UPSTREAM_VERSION")
     if manual_upstream_version:
         # manual tracking of upstream version.
@@ -914,33 +950,22 @@
         ru['datetime'] = datetime.now()
     else:
         ud = bb.fetch2.FetchData(src_uri, rd)
-        pupver = ud.method.latest_versionstring(ud, rd)
-        (upversion, revision) = pupver
-
-        # format git version version+gitAUTOINC+HASH
-        if uri_type == 'git':
-            (pv, pfx, sfx) = get_recipe_pv_without_srcpv(pv, uri_type)
-
-            # if contains revision but not upversion use current pv
-            if upversion == '' and revision:
-                upversion = pv
-
-            if upversion:
-                tmp = upversion
-                upversion = ''
-
-                if pfx:
-                    upversion = pfx + tmp
-                else:
-                    upversion = tmp
-
-                if sfx:
-                    upversion = upversion + sfx + revision[:10]
+        if rd.getVar("UPSTREAM_CHECK_COMMITS") == "1":
+            revision = ud.method.latest_revision(ud, rd, 'default')
+            upversion = pv
+            if revision != rd.getVar("SRCREV"):
+                upversion = upversion + "-new-commits-available" 
+        else:
+            pupver = ud.method.latest_versionstring(ud, rd)
+            (upversion, revision) = pupver
 
         if upversion:
             ru['version'] = upversion
             ru['type'] = 'A'
 
+        if revision:
+            ru['revision'] = revision
+
         ru['datetime'] = datetime.now()
 
     return ru
diff --git a/import-layers/yocto-poky/meta/lib/oe/rootfs.py b/import-layers/yocto-poky/meta/lib/oe/rootfs.py
index 754ef56..f8f717c 100644
--- a/import-layers/yocto-poky/meta/lib/oe/rootfs.py
+++ b/import-layers/yocto-poky/meta/lib/oe/rootfs.py
@@ -92,10 +92,6 @@
                 self.d.getVar('PACKAGE_FEED_ARCHS'))
 
 
-    @abstractmethod
-    def _handle_intercept_failure(self, failed_script):
-        pass
-
     """
     The _cleanup() method should be used to clean-up stuff that we don't really
     want to end up on target. For example, in the case of RPM, the DB locks.
@@ -178,20 +174,10 @@
         post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND")
         rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND')
 
-        postinst_intercepts_dir = self.d.getVar("POSTINST_INTERCEPTS_DIR")
-        if not postinst_intercepts_dir:
-            postinst_intercepts_dir = self.d.expand("${COREBASE}/scripts/postinst-intercepts")
-        intercepts_dir = os.path.join(self.d.getVar('WORKDIR'),
-                                      "intercept_scripts")
-
-        bb.utils.remove(intercepts_dir, True)
-
         bb.utils.mkdirhier(self.image_rootfs)
 
         bb.utils.mkdirhier(self.deploydir)
 
-        shutil.copytree(postinst_intercepts_dir, intercepts_dir)
-
         execute_pre_post_process(self.d, pre_process_cmds)
 
         if self.progress_reporter:
@@ -207,7 +193,7 @@
 
         execute_pre_post_process(self.d, rootfs_post_install_cmds)
 
-        self._run_intercepts()
+        self.pm.run_intercepts()
 
         execute_pre_post_process(self.d, post_process_cmds)
 
@@ -293,44 +279,6 @@
             # Remove the package manager data files
             self.pm.remove_packaging_data()
 
-    def _run_intercepts(self):
-        intercepts_dir = os.path.join(self.d.getVar('WORKDIR'),
-                                      "intercept_scripts")
-
-        bb.note("Running intercept scripts:")
-        os.environ['D'] = self.image_rootfs
-        os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
-        for script in os.listdir(intercepts_dir):
-            script_full = os.path.join(intercepts_dir, script)
-
-            if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
-                continue
-
-            bb.note("> Executing %s intercept ..." % script)
-
-            try:
-                output = subprocess.check_output(script_full, stderr=subprocess.STDOUT)
-                if output: bb.note(output.decode("utf-8"))
-            except subprocess.CalledProcessError as e:
-                bb.warn("The postinstall intercept hook '%s' failed, details in log.do_rootfs" % script)
-                bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
-
-                with open(script_full) as intercept:
-                    registered_pkgs = None
-                    for line in intercept.read().split("\n"):
-                        m = re.match("^##PKGS:(.*)", line)
-                        if m is not None:
-                            registered_pkgs = m.group(1).strip()
-                            break
-
-                    if registered_pkgs is not None:
-                        bb.warn("The postinstalls for the following packages "
-                                "will be postponed for first boot: %s" %
-                                registered_pkgs)
-
-                        # call the backend dependent handler
-                        self._handle_intercept_failure(registered_pkgs)
-
     def _run_ldconfig(self):
         if self.d.getVar('LDCONFIGDEPEND'):
             bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v")
@@ -523,14 +471,6 @@
         self._log_check_warn()
         self._log_check_error()
 
-    def _handle_intercept_failure(self, registered_pkgs):
-        rpm_postinsts_dir = self.image_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
-        bb.utils.mkdirhier(rpm_postinsts_dir)
-
-        # Save the package postinstalls in /etc/rpm-postinsts
-        for pkg in registered_pkgs.split():
-            self.pm.save_rpmpostinst(pkg)
-
     def _cleanup(self):
         self.pm._invoke_dnf(["clean", "all"])
 
@@ -711,9 +651,6 @@
         src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
         return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
 
-    def _handle_intercept_failure(self, registered_pkgs):
-        self.pm.mark_packages("unpacked", registered_pkgs.split())
-
     def _log_check(self):
         self._log_check_warn()
         self._log_check_error()
@@ -982,9 +919,6 @@
         src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
         return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
 
-    def _handle_intercept_failure(self, registered_pkgs):
-        self.pm.mark_packages("unpacked", registered_pkgs.split())
-
     def _log_check(self):
         self._log_check_warn()
         self._log_check_error()
diff --git a/import-layers/yocto-poky/meta/lib/oe/sdk.py b/import-layers/yocto-poky/meta/lib/oe/sdk.py
index 7f71cfb..d6a5033 100644
--- a/import-layers/yocto-poky/meta/lib/oe/sdk.py
+++ b/import-layers/yocto-poky/meta/lib/oe/sdk.py
@@ -162,40 +162,21 @@
         self.host_manifest = RpmManifest(d, self.manifest_dir,
                                          Manifest.MANIFEST_TYPE_SDK_HOST)
 
-        target_providename = ['/bin/sh',
-                              '/bin/bash',
-                              '/usr/bin/env',
-                              '/usr/bin/perl',
-                              'pkgconfig'
-                              ]
-
         rpm_repo_workdir = "oe-sdk-repo"
         if "sdk_ext" in d.getVar("BB_RUNTASK"):
             rpm_repo_workdir = "oe-sdk-ext-repo"
 
-
         self.target_pm = RpmPM(d,
                                self.sdk_target_sysroot,
                                self.d.getVar('TARGET_VENDOR'),
                                'target',
-                               target_providename,
                                rpm_repo_workdir=rpm_repo_workdir
                                )
 
-        sdk_providename = ['/bin/sh',
-                           '/bin/bash',
-                           '/usr/bin/env',
-                           '/usr/bin/perl',
-                           'pkgconfig',
-                           'libGL.so()(64bit)',
-                           'libGL.so'
-                           ]
-
         self.host_pm = RpmPM(d,
                              self.sdk_host_sysroot,
                              self.d.getVar('SDK_VENDOR'),
                              'host',
-                             sdk_providename,
                              "SDK_PACKAGE_ARCHS",
                              "SDK_OS",
                              rpm_repo_workdir=rpm_repo_workdir
@@ -228,6 +209,8 @@
 
         self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
 
+        self.target_pm.run_intercepts()
+
         execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
 
         if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
@@ -237,6 +220,8 @@
         self._populate_sysroot(self.host_pm, self.host_manifest)
         self.install_locales(self.host_pm)
 
+        self.host_pm.run_intercepts()
+
         execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
 
         if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
@@ -312,6 +297,8 @@
 
         self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
 
+        self.target_pm.run_intercepts()
+
         execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
 
         if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
@@ -321,6 +308,8 @@
         self._populate_sysroot(self.host_pm, self.host_manifest)
         self.install_locales(self.host_pm)
 
+        self.host_pm.run_intercepts()
+
         execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
 
         if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
@@ -397,6 +386,8 @@
 
         self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
 
+        self.target_pm.run_intercepts()
+
         execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
 
         self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
@@ -408,6 +399,8 @@
         self._populate_sysroot(self.host_pm, self.host_manifest)
         self.install_locales(self.host_pm)
 
+        self.host_pm.run_intercepts()
+
         execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
 
         self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
diff --git a/import-layers/yocto-poky/meta/lib/oe/sstatesig.py b/import-layers/yocto-poky/meta/lib/oe/sstatesig.py
index 3a8778e..b82e0f4 100644
--- a/import-layers/yocto-poky/meta/lib/oe/sstatesig.py
+++ b/import-layers/yocto-poky/meta/lib/oe/sstatesig.py
@@ -1,4 +1,5 @@
 import bb.siggen
+import oe
 
 def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
     # Return True if we should keep the dependency, False to drop it
@@ -28,15 +29,14 @@
             return False
         return True
 
-    # Quilt (patch application) changing isn't likely to affect anything
-    excludelist = ['quilt-native', 'subversion-native', 'git-native', 'ccache-native']
-    if depname in excludelist and recipename != depname:
-        return False
-
     # Exclude well defined recipe->dependency
     if "%s->%s" % (recipename, depname) in siggen.saferecipedeps:
         return False
 
+    # Check for special wildcard
+    if "*->%s" % depname in siggen.saferecipedeps and recipename != depname:
+        return False
+
     # Don't change native/cross/nativesdk recipe dependencies any further
     if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename):
         return True
@@ -368,3 +368,37 @@
     if extrainf:
         d2.setVar("SSTATE_MANMACH", extrainf)
     return (d2.expand("${SSTATE_MANFILEPREFIX}.%s" % task), d2)
+
+def find_sstate_manifest(taskdata, taskdata2, taskname, d, multilibcache):
+    d2 = d
+    variant = ''
+    if taskdata2.startswith("virtual:multilib"):
+        variant = taskdata2.split(":")[2]
+        if variant not in multilibcache:
+            multilibcache[variant] = oe.utils.get_multilib_datastore(variant, d)
+        d2 = multilibcache[variant]
+
+    if taskdata.endswith("-native"):
+        pkgarchs = ["${BUILD_ARCH}"]
+    elif taskdata.startswith("nativesdk-"):
+        pkgarchs = ["${SDK_ARCH}_${SDK_OS}", "allarch"]
+    elif "-cross-canadian" in taskdata:
+        pkgarchs = ["${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}"]
+    elif "-cross-" in taskdata:
+        pkgarchs = ["${BUILD_ARCH}_${TARGET_ARCH}"]
+    elif "-crosssdk" in taskdata:
+        pkgarchs = ["${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"]
+    else:
+        pkgarchs = ['${MACHINE_ARCH}']
+        pkgarchs = pkgarchs + list(reversed(d2.getVar("PACKAGE_EXTRA_ARCHS").split()))
+        pkgarchs.append('allarch')
+        pkgarchs.append('${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX}')
+
+    for pkgarch in pkgarchs:
+        manifest = d2.expand("${SSTATE_MANIFESTS}/manifest-%s-%s.%s" % (pkgarch, taskdata, taskname))
+        if os.path.exists(manifest):
+            return manifest, d2
+    bb.warn("Manifest %s not found in %s (variant '%s')?" % (manifest, d2.expand(" ".join(pkgarchs)), variant))
+    return None, d2
+
+
diff --git a/import-layers/yocto-poky/meta/lib/oe/utils.py b/import-layers/yocto-poky/meta/lib/oe/utils.py
index 643ab78..80f0442 100644
--- a/import-layers/yocto-poky/meta/lib/oe/utils.py
+++ b/import-layers/yocto-poky/meta/lib/oe/utils.py
@@ -86,17 +86,6 @@
     from re import match
     return " ".join([x for x in str.split() if not match(f, x, 0)])
 
-def param_bool(cfg, field, dflt = None):
-    """Lookup <field> in <cfg> map and convert it to a boolean; take
-    <dflt> when this <field> does not exist"""
-    value = cfg.get(field, dflt)
-    strvalue = str(value).lower()
-    if strvalue in ('yes', 'y', 'true', 't', '1'):
-        return True
-    elif strvalue in ('no', 'n', 'false', 'f', '0'):
-        return False
-    raise ValueError("invalid value for boolean parameter '%s': '%s'" % (field, value))
-
 def build_depends_string(depends, task):
     """Append a taskname to a string of dependencies as used by the [depends] flag"""
     return " ".join(dep + ":" + task for dep in depends.split())
@@ -167,6 +156,49 @@
     """
     return bb.utils.contains_any("DISTRO_FEATURES", features, truevalue, falsevalue, d)
 
+def parallel_make(d):
+    """
+    Return the integer value for the number of parallel threads to use when
+    building, scraped out of PARALLEL_MAKE. If no parallelization option is
+    found, returns None
+
+    e.g. if PARALLEL_MAKE = "-j 10", this will return 10 as an integer.
+    """
+    pm = (d.getVar('PARALLEL_MAKE') or '').split()
+    # look for '-j' and throw other options (e.g. '-l') away
+    while pm:
+        opt = pm.pop(0)
+        if opt == '-j':
+            v = pm.pop(0)
+        elif opt.startswith('-j'):
+            v = opt[2:].strip()
+        else:
+            continue
+
+        return int(v)
+
+    return None
+
+def parallel_make_argument(d, fmt, limit=None):
+    """
+    Helper utility to construct a parallel make argument from the number of
+    parallel threads specified in PARALLEL_MAKE.
+
+    Returns the input format string `fmt` where a single '%d' will be expanded
+    with the number of parallel threads to use. If `limit` is specified, the
+    number of parallel threads will be no larger than it. If no parallelization
+    option is found in PARALLEL_MAKE, returns an empty string
+
+    e.g. if PARALLEL_MAKE = "-j 10", parallel_make_argument(d, "-n %d") will return
+    "-n 10"
+    """
+    v = parallel_make(d)
+    if v:
+        if limit:
+            v = min(limit, v)
+        return fmt % v
+    return ''
+
 def packages_filter_out_system(d):
     """
     Return a list of packages from PACKAGES with the "system" packages such as
@@ -292,6 +324,14 @@
     version = match.group(1)
     return "-%s" % version if version in ("4.8", "4.9") else ""
 
+
+def get_multilib_datastore(variant, d):
+    localdata = bb.data.createCopy(d)
+    overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + variant
+    localdata.setVar("OVERRIDES", overrides)
+    localdata.setVar("MLPREFIX", variant + "-")
+    return localdata
+
 #
 # Python 2.7 doesn't have threaded pools (just multiprocessing)
 # so implement a version here