Yocto 2.3

Move OpenBMC to Yocto 2.3(pyro).

Tested: Built and verified Witherspoon and Palmetto images
Change-Id: I50744030e771f4850afc2a93a10d3507e76d36bc
Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
Resolves: openbmc/openbmc#2461
diff --git a/import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py b/import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py
index b6c0265..3a5b7b6 100644
--- a/import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py
+++ b/import-layers/yocto-poky/meta/lib/oe/buildhistory_analysis.py
@@ -1,6 +1,6 @@
 # Report significant differences in the buildhistory repository since a specific revision
 #
-# Copyright (C) 2012 Intel Corporation
+# Copyright (C) 2012-2013, 2016-2017 Intel Corporation
 # Author: Paul Eggleton <paul.eggleton@linux.intel.com>
 #
 # Note: requires GitPython 0.3.1+
@@ -13,7 +13,10 @@
 import difflib
 import git
 import re
+import hashlib
+import collections
 import bb.utils
+import bb.tinfoil
 
 
 # How to display fields
@@ -69,7 +72,22 @@
                     pkglist.append(k)
             return pkglist
 
+        def detect_renamed_dirs(aitems, bitems):
+            adirs = set(map(os.path.dirname, aitems))
+            bdirs = set(map(os.path.dirname, bitems))
+            files_ab = [(name, sorted(os.path.basename(item) for item in aitems if os.path.dirname(item) == name)) \
+                                for name in adirs - bdirs]
+            files_ba = [(name, sorted(os.path.basename(item) for item in bitems if os.path.dirname(item) == name)) \
+                                for name in bdirs - adirs]
+            renamed_dirs = [(dir1, dir2) for dir1, files1 in files_ab for dir2, files2 in files_ba if files1 == files2]
+            # remove files that belong to renamed dirs from aitems and bitems
+            for dir1, dir2 in renamed_dirs:
+                aitems = [item for item in aitems if os.path.dirname(item) not in (dir1, dir2)]
+                bitems = [item for item in bitems if os.path.dirname(item) not in (dir1, dir2)]
+            return renamed_dirs, aitems, bitems
+
         if self.fieldname in list_fields or self.fieldname in list_order_fields:
+            renamed_dirs = []
             if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
                 (depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue)
                 aitems = pkglist_combine(depvera)
@@ -77,16 +95,29 @@
             else:
                 aitems = self.oldvalue.split()
                 bitems = self.newvalue.split()
+                if self.fieldname == 'FILELIST':
+                    renamed_dirs, aitems, bitems = detect_renamed_dirs(aitems, bitems)
+
             removed = list(set(aitems) - set(bitems))
             added = list(set(bitems) - set(aitems))
 
+            lines = []
+            if renamed_dirs:
+                for dfrom, dto in renamed_dirs:
+                    lines.append('directory renamed %s -> %s' % (dfrom, dto))
             if removed or added:
                 if removed and not bitems:
-                    out = '%s: removed all items "%s"' % (self.fieldname, ' '.join(removed))
+                    lines.append('removed all items "%s"' % ' '.join(removed))
                 else:
-                    out = '%s:%s%s' % (self.fieldname, ' removed "%s"' % ' '.join(removed) if removed else '', ' added "%s"' % ' '.join(added) if added else '')
+                    if removed:
+                        lines.append('removed "%s"' % ' '.join(removed))
+                    if added:
+                        lines.append('added "%s"' % ' '.join(added))
             else:
-                out = '%s changed order' % self.fieldname
+                lines.append('changed order')
+
+            out = '%s: %s' % (self.fieldname, ', '.join(lines))
+
         elif self.fieldname in numeric_fields:
             aval = int(self.oldvalue or 0)
             bval = int(self.newvalue or 0)
@@ -382,13 +413,115 @@
     return changes
 
 
-def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False):
+def compare_siglists(a_blob, b_blob, taskdiff=False):
+    # FIXME collapse down a recipe's tasks?
+    alines = a_blob.data_stream.read().decode('utf-8').splitlines()
+    blines = b_blob.data_stream.read().decode('utf-8').splitlines()
+    keys = []
+    pnmap = {}
+    def readsigs(lines):
+        sigs = {}
+        for line in lines:
+            linesplit = line.split()
+            if len(linesplit) > 2:
+                sigs[linesplit[0]] = linesplit[2]
+                if not linesplit[0] in keys:
+                    keys.append(linesplit[0])
+                pnmap[linesplit[1]] = linesplit[0].rsplit('.', 1)[0]
+        return sigs
+    adict = readsigs(alines)
+    bdict = readsigs(blines)
+    out = []
+
+    changecount = 0
+    addcount = 0
+    removecount = 0
+    if taskdiff:
+        with bb.tinfoil.Tinfoil() as tinfoil:
+            tinfoil.prepare(config_only=True)
+
+            changes = collections.OrderedDict()
+
+            def compare_hashfiles(pn, taskname, hash1, hash2):
+                hashes = [hash1, hash2]
+                hashfiles = bb.siggen.find_siginfo(pn, taskname, hashes, tinfoil.config_data)
+
+                if not taskname:
+                    (pn, taskname) = pn.rsplit('.', 1)
+                    pn = pnmap.get(pn, pn)
+                desc = '%s.%s' % (pn, taskname)
+
+                if len(hashfiles) == 0:
+                    out.append("Unable to find matching sigdata for %s with hashes %s or %s" % (desc, hash1, hash2))
+                elif not hash1 in hashfiles:
+                    out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash1))
+                elif not hash2 in hashfiles:
+                    out.append("Unable to find matching sigdata for %s with hash %s" % (desc, hash2))
+                else:
+                    out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, collapsed=True)
+                    for line in out2:
+                        m = hashlib.sha256()
+                        m.update(line.encode('utf-8'))
+                        entry = changes.get(m.hexdigest(), (line, []))
+                        if desc not in entry[1]:
+                            changes[m.hexdigest()] = (line, entry[1] + [desc])
+
+            # Define recursion callback
+            def recursecb(key, hash1, hash2):
+                compare_hashfiles(key, None, hash1, hash2)
+                return []
+
+            for key in keys:
+                siga = adict.get(key, None)
+                sigb = bdict.get(key, None)
+                if siga is not None and sigb is not None and siga != sigb:
+                    changecount += 1
+                    (pn, taskname) = key.rsplit('.', 1)
+                    compare_hashfiles(pn, taskname, siga, sigb)
+                elif siga is None:
+                    addcount += 1
+                elif sigb is None:
+                    removecount += 1
+        for key, item in changes.items():
+            line, tasks = item
+            if len(tasks) == 1:
+                desc = tasks[0]
+            elif len(tasks) == 2:
+                desc = '%s and %s' % (tasks[0], tasks[1])
+            else:
+                desc = '%s and %d others' % (tasks[-1], len(tasks)-1)
+            out.append('%s: %s' % (desc, line))
+    else:
+        for key in keys:
+            siga = adict.get(key, None)
+            sigb = bdict.get(key, None)
+            if siga is not None and sigb is not None and siga != sigb:
+                out.append('%s changed from %s to %s' % (key, siga, sigb))
+                changecount += 1
+            elif siga is None:
+                out.append('%s was added' % key)
+                addcount += 1
+            elif sigb is None:
+                out.append('%s was removed' % key)
+                removecount += 1
+    out.append('Summary: %d tasks added, %d tasks removed, %d tasks modified (%.1f%%)' % (addcount, removecount, changecount, (changecount / float(len(bdict)) * 100)))
+    return '\n'.join(out)
+
+
+def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False, sigs=False, sigsdiff=False):
     repo = git.Repo(repopath)
     assert repo.bare == False
     commit = repo.commit(revision1)
     diff = commit.diff(revision2)
 
     changes = []
+
+    if sigs or sigsdiff:
+        for d in diff.iter_change_type('M'):
+            if d.a_blob.path == 'siglist.txt':
+                changes.append(compare_siglists(d.a_blob, d.b_blob, taskdiff=sigsdiff))
+        return changes
+
     for d in diff.iter_change_type('M'):
         path = os.path.dirname(d.a_blob.path)
         if path.startswith('packages/'):
diff --git a/import-layers/yocto-poky/meta/lib/oe/classextend.py b/import-layers/yocto-poky/meta/lib/oe/classextend.py
index 4c8a000..d2eeaf0 100644
--- a/import-layers/yocto-poky/meta/lib/oe/classextend.py
+++ b/import-layers/yocto-poky/meta/lib/oe/classextend.py
@@ -25,7 +25,7 @@
         return name
 
     def map_variable(self, varname, setvar = True):
-        var = self.d.getVar(varname, True)
+        var = self.d.getVar(varname)
         if not var:
             return ""
         var = var.split()
@@ -38,7 +38,7 @@
         return newdata
 
     def map_regexp_variable(self, varname, setvar = True):
-        var = self.d.getVar(varname, True)
+        var = self.d.getVar(varname)
         if not var:
             return ""
         var = var.split()
@@ -60,7 +60,7 @@
             return dep
         else:
             # Do not extend for that already have multilib prefix
-            var = self.d.getVar("MULTILIB_VARIANTS", True)
+            var = self.d.getVar("MULTILIB_VARIANTS")
             if var:
                 var = var.split()
                 for v in var:
@@ -74,7 +74,7 @@
             varname = varname + "_" + suffix
         orig = self.d.getVar("EXTENDPKGV", False)
         self.d.setVar("EXTENDPKGV", "EXTENDPKGV")
-        deps = self.d.getVar(varname, True)
+        deps = self.d.getVar(varname)
         if not deps:
             self.d.setVar("EXTENDPKGV", orig)
             return
@@ -87,7 +87,7 @@
         self.d.setVar("EXTENDPKGV", orig)
 
     def map_packagevars(self):
-        for pkg in (self.d.getVar("PACKAGES", True).split() + [""]):
+        for pkg in (self.d.getVar("PACKAGES").split() + [""]):
             self.map_depends_variable("RDEPENDS", pkg)
             self.map_depends_variable("RRECOMMENDS", pkg)
             self.map_depends_variable("RSUGGESTS", pkg)
@@ -97,7 +97,7 @@
             self.map_depends_variable("PKG", pkg)
 
     def rename_packages(self):
-        for pkg in (self.d.getVar("PACKAGES", True) or "").split():
+        for pkg in (self.d.getVar("PACKAGES") or "").split():
             if pkg.startswith(self.extname):
                self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg])
                continue
diff --git a/import-layers/yocto-poky/meta/lib/oe/classutils.py b/import-layers/yocto-poky/meta/lib/oe/classutils.py
index e7856c8..45cd524 100644
--- a/import-layers/yocto-poky/meta/lib/oe/classutils.py
+++ b/import-layers/yocto-poky/meta/lib/oe/classutils.py
@@ -36,7 +36,7 @@
     @classmethod
     def prioritized(tcls):
         return sorted(list(tcls.registry.values()),
-                      key=lambda v: v.priority, reverse=True)
+                      key=lambda v: (v.priority, v.name), reverse=True)
 
     def unregister(cls):
         for key in cls.registry.keys():
diff --git a/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py b/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py
index 29ac6d4..a372904 100644
--- a/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py
+++ b/import-layers/yocto-poky/meta/lib/oe/copy_buildsystem.py
@@ -21,8 +21,8 @@
     def __init__(self, context, d):
         self.d = d
         self.context = context
-        self.layerdirs = [os.path.abspath(pth) for pth in d.getVar('BBLAYERS', True).split()]
-        self.layers_exclude = (d.getVar('SDK_LAYERS_EXCLUDE', True) or "").split()
+        self.layerdirs = [os.path.abspath(pth) for pth in d.getVar('BBLAYERS').split()]
+        self.layers_exclude = (d.getVar('SDK_LAYERS_EXCLUDE') or "").split()
 
     def copy_bitbake_and_layers(self, destdir, workspace_name=None):
         # Copy in all metadata layers + bitbake (as repositories)
@@ -30,7 +30,7 @@
         bb.utils.mkdirhier(destdir)
         layers = list(self.layerdirs)
 
-        corebase = os.path.abspath(self.d.getVar('COREBASE', True))
+        corebase = os.path.abspath(self.d.getVar('COREBASE'))
         layers.append(corebase)
 
         # Exclude layers
@@ -46,7 +46,7 @@
                 extranum += 1
                 workspace_newname = '%s-%d' % (workspace_name, extranum)
 
-        corebase_files = self.d.getVar('COREBASE_FILES', True).split()
+        corebase_files = self.d.getVar('COREBASE_FILES').split()
         corebase_files = [corebase + '/' +x for x in corebase_files]
         # Make sure bitbake goes in
         bitbake_dir = bb.__file__.rsplit('/', 3)[0]
@@ -100,7 +100,7 @@
                 # Drop all bbappends except the one for the image the SDK is being built for
                 # (because of externalsrc, the workspace bbappends will interfere with the
                 # locked signatures if present, and we don't need them anyway)
-                image_bbappend = os.path.splitext(os.path.basename(self.d.getVar('FILE', True)))[0] + '.bbappend'
+                image_bbappend = os.path.splitext(os.path.basename(self.d.getVar('FILE')))[0] + '.bbappend'
                 appenddir = os.path.join(layerdestpath, 'appends')
                 if os.path.isdir(appenddir):
                     for fn in os.listdir(appenddir):
@@ -208,7 +208,7 @@
     import shutil
     bb.note('Generating sstate-cache...')
 
-    nativelsbstring = d.getVar('NATIVELSBSTRING', True)
+    nativelsbstring = d.getVar('NATIVELSBSTRING')
     bb.process.run("gen-lockedsig-cache %s %s %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache, nativelsbstring, filterfile or ''))
     if fixedlsbstring and nativelsbstring != fixedlsbstring:
         nativedir = output_sstate_cache + '/' + nativelsbstring
diff --git a/import-layers/yocto-poky/meta/lib/oe/data.py b/import-layers/yocto-poky/meta/lib/oe/data.py
index ee48950..b8901e6 100644
--- a/import-layers/yocto-poky/meta/lib/oe/data.py
+++ b/import-layers/yocto-poky/meta/lib/oe/data.py
@@ -1,9 +1,10 @@
+import json
 import oe.maketype
 
 def typed_value(key, d):
     """Construct a value for the specified metadata variable, using its flags
     to determine the type and parameters for construction."""
-    var_type = d.getVarFlag(key, 'type', True)
+    var_type = d.getVarFlag(key, 'type')
     flags = d.getVarFlags(key)
     if flags is not None:
         flags = dict((flag, d.expand(value))
@@ -12,6 +13,35 @@
         flags = {}
 
     try:
-        return oe.maketype.create(d.getVar(key, True) or '', var_type, **flags)
+        return oe.maketype.create(d.getVar(key) or '', var_type, **flags)
     except (TypeError, ValueError) as exc:
         bb.msg.fatal("Data", "%s: %s" % (key, str(exc)))
+
+def export2json(d, json_file, expand=True, searchString="",replaceString=""):
+    data2export = {}
+    keys2export = []
+
+    for key in d.keys():
+        if key.startswith("_"):
+            continue
+        elif key.startswith("BB"):
+            continue
+        elif key.startswith("B_pn"):
+            continue
+        elif key.startswith("do_"):
+            continue
+        elif d.getVarFlag(key, "func"):
+            continue
+
+        keys2export.append(key)
+
+    for key in keys2export:
+        try:
+            data2export[key] = d.getVar(key, expand).replace(searchString,replaceString)
+        except bb.data_smart.ExpansionError:
+            data2export[key] = ''
+        except AttributeError:
+            pass
+
+    with open(json_file, "w") as f:
+        json.dump(data2export, f, skipkeys=True, indent=4, sort_keys=True)
diff --git a/import-layers/yocto-poky/meta/lib/oe/distro_check.py b/import-layers/yocto-poky/meta/lib/oe/distro_check.py
index 87c52fa..37f04ed 100644
--- a/import-layers/yocto-poky/meta/lib/oe/distro_check.py
+++ b/import-layers/yocto-poky/meta/lib/oe/distro_check.py
@@ -1,32 +1,17 @@
-from contextlib import contextmanager
-
-from bb.utils import export_proxies
-
 def create_socket(url, d):
     import urllib
+    from bb.utils import export_proxies
 
-    socket = None
-    try:
-        export_proxies(d)
-        socket = urllib.request.urlopen(url)
-    except:
-        bb.warn("distro_check: create_socket url %s can't access" % url)
-
-    return socket
+    export_proxies(d)
+    return urllib.request.urlopen(url)
 
 def get_links_from_url(url, d):
     "Return all the href links found on the web location"
 
     from bs4 import BeautifulSoup, SoupStrainer
 
+    soup = BeautifulSoup(create_socket(url,d), "html.parser", parse_only=SoupStrainer("a"))
     hyperlinks = []
-
-    webpage = ''
-    sock = create_socket(url,d)
-    if sock:
-        webpage = sock.read()
-
-    soup = BeautifulSoup(webpage, "html.parser", parse_only=SoupStrainer("a"))
     for line in soup.find_all('a', href=True):
         hyperlinks.append(line['href'].strip('/'))
     return hyperlinks
@@ -37,6 +22,7 @@
     maxstr=""
     for link in get_links_from_url(url, d):
         try:
+            # TODO use LooseVersion
             release = float(link)
         except:
             release = 0
@@ -47,144 +33,112 @@
 
 def is_src_rpm(name):
     "Check if the link is pointing to a src.rpm file"
-    if name[-8:] == ".src.rpm":
-        return True
-    else:
-        return False
+    return name.endswith(".src.rpm")
 
 def package_name_from_srpm(srpm):
     "Strip out the package name from the src.rpm filename"
-    strings = srpm.split('-')
-    package_name = strings[0]
-    for i in range(1, len (strings) - 1):
-        str = strings[i]
-        if not str[0].isdigit():
-            package_name += '-' + str
-    return package_name
 
-def clean_package_list(package_list):
-    "Removes multiple entries of packages and sorts the list"
-    set = {}
-    map(set.__setitem__, package_list, [])
-    return set.keys()
-
-
-def get_latest_released_meego_source_package_list(d):
-    "Returns list of all the name os packages in the latest meego distro"
-
-    package_names = []
-    try:
-        f = open("/tmp/Meego-1.1", "r")
-        for line in f:
-            package_names.append(line[:-1] + ":" + "main") # Also strip the '\n' at the end
-    except IOError: pass
-    package_list=clean_package_list(package_names)
-    return "1.0", package_list
+    # ca-certificates-2016.2.7-1.0.fc24.src.rpm
+    # ^name           ^ver     ^release^removed
+    (name, version, release) = srpm.replace(".src.rpm", "").rsplit("-", 2)
+    return name
 
 def get_source_package_list_from_url(url, section, d):
     "Return a sectioned list of package names from a URL list"
 
     bb.note("Reading %s: %s" % (url, section))
     links = get_links_from_url(url, d)
-    srpms = list(filter(is_src_rpm, links))
-    names_list = list(map(package_name_from_srpm, srpms))
+    srpms = filter(is_src_rpm, links)
+    names_list = map(package_name_from_srpm, srpms)
 
-    new_pkgs = []
+    new_pkgs = set()
     for pkgs in names_list:
-       new_pkgs.append(pkgs + ":" + section)
-
+       new_pkgs.add(pkgs + ":" + section)
     return new_pkgs
 
+def get_source_package_list_from_url_by_letter(url, section, d):
+    import string
+    from urllib.error import HTTPError
+    packages = set()
+    for letter in (string.ascii_lowercase + string.digits):
+        # Not all subfolders may exist, so silently handle 404
+        try:
+            packages |= get_source_package_list_from_url(url + "/" + letter, section, d)
+        except HTTPError as e:
+            if e.code != 404: raise
+    return packages
+
 def get_latest_released_fedora_source_package_list(d):
     "Returns list of all the name os packages in the latest fedora distro"
     latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/", d)
-
-    package_names = get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Fedora/source/SRPMS/" % latest, "main", d)
-
-#    package_names += get_source_package_list_from_url("http://download.fedora.redhat.com/pub/fedora/linux/releases/%s/Everything/source/SPRMS/" % latest, "everything")
-    package_names += get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates", d)
-
-    package_list=clean_package_list(package_names)
-        
-    return latest, package_list
+    package_names = get_source_package_list_from_url_by_letter("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Everything/source/tree/Packages/" % latest, "main", d)
+    package_names |= get_source_package_list_from_url_by_letter("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates", d)
+    return latest, package_names
 
 def get_latest_released_opensuse_source_package_list(d):
     "Returns list of all the name os packages in the latest opensuse distro"
     latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/",d)
 
     package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/%s/repo/oss/suse/src/" % latest, "main", d)
-    package_names += get_source_package_list_from_url("http://download.opensuse.org/update/%s/rpm/src/" % latest, "updates", d)
-
-    package_list=clean_package_list(package_names)
-    return latest, package_list
+    package_names |= get_source_package_list_from_url("http://download.opensuse.org/update/%s/src/" % latest, "updates", d)
+    return latest, package_names
 
 def get_latest_released_mandriva_source_package_list(d):
     "Returns list of all the name os packages in the latest mandriva distro"
     latest = find_latest_numeric_release("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/", d)
     package_names = get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/release/" % latest, "main", d)
-#    package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/contrib/release/" % latest, "contrib")
-    package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/updates/" % latest, "updates", d)
+    package_names |= get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/updates/" % latest, "updates", d)
+    return latest, package_names
 
-    package_list=clean_package_list(package_names)
-    return latest, package_list
+def get_latest_released_clear_source_package_list(d):
+    latest = find_latest_numeric_release("https://download.clearlinux.org/releases/", d)
+    package_names = get_source_package_list_from_url("https://download.clearlinux.org/releases/%s/clear/source/SRPMS/" % latest, "main", d)
+    return latest, package_names
 
 def find_latest_debian_release(url, d):
     "Find the latest listed debian release on the given url"
 
-    releases = []
-    for link in get_links_from_url(url, d):
-        if link[:6] == "Debian":
-            if ';' not in link:
-                releases.append(link)
+    releases = [link.replace("Debian", "")
+                for link in get_links_from_url(url, d)
+                if link.startswith("Debian")]
     releases.sort()
     try:
-        return releases.pop()[6:]
+        return releases[-1]
     except:
         return "_NotFound_"
 
 def get_debian_style_source_package_list(url, section, d):
     "Return the list of package-names stored in the debian style Sources.gz file"
-    import tempfile
     import gzip
 
-    webpage = ''
-    sock = create_socket(url,d)
-    if sock:
-        webpage = sock.read()
-
-    tmpfile = tempfile.NamedTemporaryFile(mode='wb', prefix='oecore.', suffix='.tmp', delete=False)
-    tmpfilename=tmpfile.name
-    tmpfile.write(sock.read())
-    tmpfile.close()
-    bb.note("Reading %s: %s" % (url, section))
-
-    f = gzip.open(tmpfilename)
-    package_names = []
-    for line in f:
-        if line[:9] == "Package: ":
-            package_names.append(line[9:-1] + ":" + section) # Also strip the '\n' at the end
-    os.unlink(tmpfilename)
-
+    package_names = set()
+    for line in gzip.open(create_socket(url, d), mode="rt"):
+        if line.startswith("Package:"):
+            pkg = line.split(":", 1)[1].strip()
+            package_names.add(pkg + ":" + section)
     return package_names
 
 def get_latest_released_debian_source_package_list(d):
-    "Returns list of all the name os packages in the latest debian distro"
+    "Returns list of all the name of packages in the latest debian distro"
     latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/", d)
-    url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz" 
+    url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz"
     package_names = get_debian_style_source_package_list(url, "main", d)
-#    url = "http://ftp.debian.org/debian/dists/stable/contrib/source/Sources.gz" 
-#    package_names += get_debian_style_source_package_list(url, "contrib")
-    url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz" 
-    package_names += get_debian_style_source_package_list(url, "updates", d)
-    package_list=clean_package_list(package_names)
-    return latest, package_list
+    url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz"
+    package_names |= get_debian_style_source_package_list(url, "updates", d)
+    return latest, package_names
 
 def find_latest_ubuntu_release(url, d):
-    "Find the latest listed ubuntu release on the given url"
+    """
+    Find the latest listed Ubuntu release on the given ubuntu/dists/ URL.
+
+    To avoid matching development releases look for distributions that have
+    updates, so the resulting distro could be any supported release.
+    """
     url += "?C=M;O=D" # Descending Sort by Last Modified
     for link in get_links_from_url(url, d):
-        if link[-8:] == "-updates":
-            return link[:-8]
+        if "-updates" in link:
+            distro = link.replace("-updates", "")
+            return distro
     return "_NotFound_"
 
 def get_latest_released_ubuntu_source_package_list(d):
@@ -192,52 +146,45 @@
     latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/", d)
     url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest
     package_names = get_debian_style_source_package_list(url, "main", d)
-#    url = "http://archive.ubuntu.com/ubuntu/dists/%s/multiverse/source/Sources.gz" % latest
-#    package_names += get_debian_style_source_package_list(url, "multiverse")
-#    url = "http://archive.ubuntu.com/ubuntu/dists/%s/universe/source/Sources.gz" % latest
-#    package_names += get_debian_style_source_package_list(url, "universe")
     url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest
-    package_names += get_debian_style_source_package_list(url, "updates", d)
-    package_list=clean_package_list(package_names)
-    return latest, package_list
+    package_names |= get_debian_style_source_package_list(url, "updates", d)
+    return latest, package_names
 
 def create_distro_packages_list(distro_check_dir, d):
+    import shutil
+
     pkglst_dir = os.path.join(distro_check_dir, "package_lists")
-    if not os.path.isdir (pkglst_dir):
-        os.makedirs(pkglst_dir)
-    # first clear old stuff
-    for file in os.listdir(pkglst_dir):
-        os.unlink(os.path.join(pkglst_dir, file))
- 
-    per_distro_functions = [
-                            ["Debian", get_latest_released_debian_source_package_list],
-                            ["Ubuntu", get_latest_released_ubuntu_source_package_list],
-                            ["Fedora", get_latest_released_fedora_source_package_list],
-                            ["OpenSuSE", get_latest_released_opensuse_source_package_list],
-                            ["Mandriva", get_latest_released_mandriva_source_package_list],
-                            ["Meego", get_latest_released_meego_source_package_list]
-                           ]
- 
-    from datetime import datetime
-    begin = datetime.now()
-    for distro in per_distro_functions:
-        name = distro[0]
-        release, package_list = distro[1](d)
+    bb.utils.remove(pkglst_dir, True)
+    bb.utils.mkdirhier(pkglst_dir)
+
+    per_distro_functions = (
+                            ("Debian", get_latest_released_debian_source_package_list),
+                            ("Ubuntu", get_latest_released_ubuntu_source_package_list),
+                            ("Fedora", get_latest_released_fedora_source_package_list),
+                            ("OpenSuSE", get_latest_released_opensuse_source_package_list),
+                            ("Mandriva", get_latest_released_mandriva_source_package_list),
+                            ("Clear", get_latest_released_clear_source_package_list),
+                           )
+
+    for name, fetcher_func in per_distro_functions:
+        try:
+            release, package_list = fetcher_func(d)
+        except Exception as e:
+            bb.warn("Cannot fetch packages for %s: %s" % (name, e))
         bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list)))
+        if len(package_list) == 0:
+            bb.error("Didn't fetch any packages for %s %s" % (name, release))
+
         package_list_file = os.path.join(pkglst_dir, name + "-" + release)
-        f = open(package_list_file, "w+b")
-        for pkg in package_list:
-            f.write(pkg + "\n")
-        f.close()
-    end = datetime.now()
-    delta = end - begin
-    bb.note("package_list generatiosn took this much time: %d seconds" % delta.seconds)
+        with open(package_list_file, 'w') as f:
+            for pkg in sorted(package_list):
+                f.write(pkg + "\n")
 
 def update_distro_data(distro_check_dir, datetime, d):
     """
-        If distro packages list data is old then rebuild it.
-        The operations has to be protected by a lock so that
-        only one thread performes it at a time.
+    If distro packages list data is old then rebuild it.
+    The operations has to be protected by a lock so that
+    only one thread performes it at a time.
     """
     if not os.path.isdir (distro_check_dir):
         try:
@@ -264,71 +211,59 @@
             f.seek(0)
             f.write(datetime)
 
-    except OSError:
-        raise Exception('Unable to read/write this file: %s' % (datetime_file))
+    except OSError as e:
+        raise Exception('Unable to open timestamp: %s' % e)
     finally:
         fcntl.lockf(f, fcntl.LOCK_UN)
         f.close()
- 
+
 def compare_in_distro_packages_list(distro_check_dir, d):
     if not os.path.isdir(distro_check_dir):
         raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed")
-        
+
     localdata = bb.data.createCopy(d)
     pkglst_dir = os.path.join(distro_check_dir, "package_lists")
     matching_distros = []
-    pn = d.getVar('PN', True)
-    recipe_name = d.getVar('PN', True)
+    pn = recipe_name = d.getVar('PN')
     bb.note("Checking: %s" % pn)
 
-    trim_dict = dict({"-native":"-native", "-cross":"-cross", "-initial":"-initial"})
-
     if pn.find("-native") != -1:
         pnstripped = pn.split("-native")
-        localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
-        bb.data.update_data(localdata)
+        localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
         recipe_name = pnstripped[0]
 
     if pn.startswith("nativesdk-"):
         pnstripped = pn.split("nativesdk-")
-        localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES', True))
-        bb.data.update_data(localdata)
+        localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES'))
         recipe_name = pnstripped[1]
 
     if pn.find("-cross") != -1:
         pnstripped = pn.split("-cross")
-        localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
-        bb.data.update_data(localdata)
+        localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
         recipe_name = pnstripped[0]
 
     if pn.find("-initial") != -1:
         pnstripped = pn.split("-initial")
-        localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
-        bb.data.update_data(localdata)
+        localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES'))
         recipe_name = pnstripped[0]
 
     bb.note("Recipe: %s" % recipe_name)
-    tmp = localdata.getVar('DISTRO_PN_ALIAS', True)
 
     distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'})
-
-    if tmp:
-        list = tmp.split(' ')
-        for str in list:
-            if str and str.find("=") == -1 and distro_exceptions[str]:
-                matching_distros.append(str)
+    tmp = localdata.getVar('DISTRO_PN_ALIAS') or ""
+    for str in tmp.split():
+        if str and str.find("=") == -1 and distro_exceptions[str]:
+            matching_distros.append(str)
 
     distro_pn_aliases = {}
-    if tmp:
-        list = tmp.split(' ')
-        for str in list:
-            if str.find("=") != -1:
-                (dist, pn_alias) = str.split('=')
-                distro_pn_aliases[dist.strip().lower()] = pn_alias.strip()
- 
+    for str in tmp.split():
+        if "=" in str:
+            (dist, pn_alias) = str.split('=')
+            distro_pn_aliases[dist.strip().lower()] = pn_alias.strip()
+
     for file in os.listdir(pkglst_dir):
         (distro, distro_release) = file.split("-")
-        f = open(os.path.join(pkglst_dir, file), "rb")
+        f = open(os.path.join(pkglst_dir, file), "r")
         for line in f:
             (pkg, section) = line.split(":")
             if distro.lower() in distro_pn_aliases:
@@ -341,38 +276,34 @@
                 break
         f.close()
 
-    
-    if tmp != None:
-        list = tmp.split(' ')
-        for item in list:
-            matching_distros.append(item)
+    for item in tmp.split():
+        matching_distros.append(item)
     bb.note("Matching: %s" % matching_distros)
     return matching_distros
 
 def create_log_file(d, logname):
-    import subprocess
-    logpath = d.getVar('LOG_DIR', True)
+    logpath = d.getVar('LOG_DIR')
     bb.utils.mkdirhier(logpath)
     logfn, logsuffix = os.path.splitext(logname)
-    logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME', True), logsuffix))
+    logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME'), logsuffix))
     if not os.path.exists(logfile):
             slogfile = os.path.join(logpath, logname)
             if os.path.exists(slogfile):
                     os.remove(slogfile)
-            subprocess.call("touch %s" % logfile, shell=True)
+            open(logfile, 'w+').close()
             os.symlink(logfile, slogfile)
             d.setVar('LOG_FILE', logfile)
     return logfile
 
 
 def save_distro_check_result(result, datetime, result_file, d):
-    pn = d.getVar('PN', True)
-    logdir = d.getVar('LOG_DIR', True)
+    pn = d.getVar('PN')
+    logdir = d.getVar('LOG_DIR')
     if not logdir:
         bb.error("LOG_DIR variable is not defined, can't write the distro_check results")
         return
-    if not os.path.isdir(logdir):
-        os.makedirs(logdir)
+    bb.utils.mkdirhier(logdir)
+
     line = pn
     for i in result:
         line = line + "," + i
diff --git a/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py b/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py
index ba61f98..7ce767e 100644
--- a/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py
+++ b/import-layers/yocto-poky/meta/lib/oe/gpg_sign.py
@@ -7,11 +7,11 @@
 class LocalSigner(object):
     """Class for handling local (on the build host) signing"""
     def __init__(self, d):
-        self.gpg_bin = d.getVar('GPG_BIN', True) or \
+        self.gpg_bin = d.getVar('GPG_BIN') or \
                   bb.utils.which(os.getenv('PATH'), 'gpg')
-        self.gpg_path = d.getVar('GPG_PATH', True)
+        self.gpg_path = d.getVar('GPG_PATH')
         self.gpg_version = self.get_gpg_version()
-        self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpm")
+        self.rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmsign")
 
     def export_pubkey(self, output_file, keyid, armor=True):
         """Export GPG public key to a file"""
@@ -31,9 +31,10 @@
         """Sign RPM files"""
 
         cmd = self.rpm_bin + " --addsign --define '_gpg_name %s'  " % keyid
-        cmd += "--define '_gpg_passphrase %s' " % passphrase
+        gpg_args = '--batch --passphrase=%s' % passphrase
         if self.gpg_version > (2,1,):
-            cmd += "--define '_gpg_sign_cmd_extra_args --pinentry-mode=loopback' "
+            gpg_args += ' --pinentry-mode=loopback'
+        cmd += "--define '_gpg_sign_cmd_extra_args %s' " % gpg_args
         if self.gpg_bin:
             cmd += "--define '%%__gpg %s' " % self.gpg_bin
         if self.gpg_path:
diff --git a/import-layers/yocto-poky/meta/lib/oe/lsb.py b/import-layers/yocto-poky/meta/lib/oe/lsb.py
index e0bdfba..3a945e0 100644
--- a/import-layers/yocto-poky/meta/lib/oe/lsb.py
+++ b/import-layers/yocto-poky/meta/lib/oe/lsb.py
@@ -1,26 +1,54 @@
-def release_dict():
-    """Return the output of lsb_release -ir as a dictionary"""
+def release_dict_osr():
+    """ Populate a dict with pertinent values from /etc/os-release """
+    if not os.path.exists('/etc/os-release'):
+        return None
+
+    data = {}
+    with open('/etc/os-release') as f:
+        for line in f:
+            try:
+                key, val = line.rstrip().split('=', 1)
+            except ValueError:
+                continue
+            if key == 'ID':
+                data['DISTRIB_ID'] = val.strip('"')
+            if key == 'VERSION_ID':
+                data['DISTRIB_RELEASE'] = val.strip('"')
+
+    return data
+
+def release_dict_lsb():
+    """ Return the output of lsb_release -ir as a dictionary """
     from subprocess import PIPE
 
     try:
         output, err = bb.process.run(['lsb_release', '-ir'], stderr=PIPE)
     except bb.process.CmdError as exc:
-        return None
+        return {}
+
+    lsb_map = { 'Distributor ID': 'DISTRIB_ID',
+                'Release': 'DISTRIB_RELEASE'}
+    lsb_keys = lsb_map.keys()
 
     data = {}
     for line in output.splitlines():
-        if line.startswith("-e"): line = line[3:]
+        if line.startswith("-e"):
+            line = line[3:]
         try:
             key, value = line.split(":\t", 1)
         except ValueError:
             continue
-        else:
-            data[key] = value
+        if key in lsb_keys:
+            data[lsb_map[key]] = value
+
+    if len(data.keys()) != 2:
+        return None
+
     return data
 
 def release_dict_file():
-    """ Try to gather LSB release information manually when lsb_release tool is unavailable """
-    data = None
+    """ Try to gather release information manually when other methods fail """
+    data = {}
     try:
         if os.path.exists('/etc/lsb-release'):
             data = {}
@@ -37,14 +65,6 @@
             if match:
                 data['DISTRIB_ID'] = match.group(1)
                 data['DISTRIB_RELEASE'] = match.group(2)
-        elif os.path.exists('/etc/os-release'):
-            data = {}
-            with open('/etc/os-release') as f:
-                for line in f:
-                    if line.startswith('NAME='):
-                        data['DISTRIB_ID'] = line[5:].rstrip().strip('"')
-                    if line.startswith('VERSION_ID='):
-                        data['DISTRIB_RELEASE'] = line[11:].rstrip().strip('"')
         elif os.path.exists('/etc/SuSE-release'):
             data = {}
             data['DISTRIB_ID'] = 'SUSE LINUX'
@@ -55,7 +75,7 @@
                         break
 
     except IOError:
-        return None
+        return {}
     return data
 
 def distro_identifier(adjust_hook=None):
@@ -64,15 +84,17 @@
 
     import re
 
-    lsb_data = release_dict()
-    if lsb_data:
-        distro_id, release = lsb_data['Distributor ID'], lsb_data['Release']
-    else:
-        lsb_data_file = release_dict_file()
-        if lsb_data_file:
-            distro_id, release = lsb_data_file['DISTRIB_ID'], lsb_data_file.get('DISTRIB_RELEASE', None)
-        else:
-            distro_id, release = None, None
+    # Try /etc/os-release first, then the output of `lsb_release -ir` and
+    # finally fall back on parsing various release files in order to determine
+    # host distro name and version.
+    distro_data = release_dict_osr()
+    if not distro_data:
+        distro_data = release_dict_lsb()
+    if not distro_data:
+        distro_data = release_dict_file()
+
+    distro_id = distro_data.get('DISTRIB_ID', '')
+    release = distro_data.get('DISTRIB_RELEASE', '')
 
     if adjust_hook:
         distro_id, release = adjust_hook(distro_id, release)
@@ -82,7 +104,7 @@
     distro_id = re.sub(r'\W', '', distro_id)
 
     if release:
-        id_str = '{0}-{1}'.format(distro_id, release)
+        id_str = '{0}-{1}'.format(distro_id.lower(), release)
     else:
         id_str = distro_id
     return id_str.replace(' ','-').replace('/','-')
diff --git a/import-layers/yocto-poky/meta/lib/oe/manifest.py b/import-layers/yocto-poky/meta/lib/oe/manifest.py
index 95f8eb2..60c49be 100644
--- a/import-layers/yocto-poky/meta/lib/oe/manifest.py
+++ b/import-layers/yocto-poky/meta/lib/oe/manifest.py
@@ -59,9 +59,9 @@
 
         if manifest_dir is None:
             if manifest_type != self.MANIFEST_TYPE_IMAGE:
-                self.manifest_dir = self.d.getVar('SDK_DIR', True)
+                self.manifest_dir = self.d.getVar('SDK_DIR')
             else:
-                self.manifest_dir = self.d.getVar('WORKDIR', True)
+                self.manifest_dir = self.d.getVar('WORKDIR')
         else:
             self.manifest_dir = manifest_dir
 
@@ -82,7 +82,7 @@
     This will be used for testing until the class is implemented properly!
     """
     def _create_dummy_initial(self):
-        image_rootfs = self.d.getVar('IMAGE_ROOTFS', True)
+        image_rootfs = self.d.getVar('IMAGE_ROOTFS')
         pkg_list = dict()
         if image_rootfs.find("core-image-sato-sdk") > 0:
             pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
@@ -104,7 +104,7 @@
             pkg_list['lgp'] = \
                 "locale-base-en-us locale-base-en-gb"
         elif image_rootfs.find("core-image-minimal") > 0:
-            pkg_list[self.PKG_TYPE_MUST_INSTALL] = "run-postinsts packagegroup-core-boot"
+            pkg_list[self.PKG_TYPE_MUST_INSTALL] = "packagegroup-core-boot"
 
         with open(self.initial_manifest, "w+") as manifest:
             manifest.write(self.initial_manifest_file_header)
@@ -195,7 +195,7 @@
         for pkg in pkg_list.split():
             pkg_type = self.PKG_TYPE_MUST_INSTALL
 
-            ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split()
+            ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
 
             for ml_variant in ml_variants:
                 if pkg.startswith(ml_variant + '-'):
@@ -216,13 +216,13 @@
 
             for var in self.var_maps[self.manifest_type]:
                 if var in self.vars_to_split:
-                    split_pkgs = self._split_multilib(self.d.getVar(var, True))
+                    split_pkgs = self._split_multilib(self.d.getVar(var))
                     if split_pkgs is not None:
                         pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
                 else:
-                    pkg_list = self.d.getVar(var, True)
+                    pkg_list = self.d.getVar(var)
                     if pkg_list is not None:
-                        pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True)
+                        pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
 
             for pkg_type in pkgs:
                 for pkg in pkgs[pkg_type].split():
@@ -245,7 +245,7 @@
         for pkg in pkg_list.split():
             pkg_type = self.PKG_TYPE_MUST_INSTALL
 
-            ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split()
+            ml_variants = self.d.getVar('MULTILIB_VARIANTS').split()
 
             for ml_variant in ml_variants:
                 if pkg.startswith(ml_variant + '-'):
@@ -266,13 +266,13 @@
 
             for var in self.var_maps[self.manifest_type]:
                 if var in self.vars_to_split:
-                    split_pkgs = self._split_multilib(self.d.getVar(var, True))
+                    split_pkgs = self._split_multilib(self.d.getVar(var))
                     if split_pkgs is not None:
                         pkgs = dict(list(pkgs.items()) + list(split_pkgs.items()))
                 else:
-                    pkg_list = self.d.getVar(var, True)
+                    pkg_list = self.d.getVar(var)
                     if pkg_list is not None:
-                        pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True)
+                        pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var)
 
             for pkg_type in pkgs:
                 for pkg in pkgs[pkg_type].split():
@@ -310,7 +310,7 @@
             manifest.write(self.initial_manifest_file_header)
 
             for var in self.var_maps[self.manifest_type]:
-                pkg_list = self.d.getVar(var, True)
+                pkg_list = self.d.getVar(var)
 
                 if pkg_list is None:
                     continue
@@ -332,7 +332,7 @@
                     'ipk': OpkgManifest,
                     'deb': DpkgManifest}
 
-    manifest = manifest_map[d.getVar('IMAGE_PKGTYPE', True)](d, manifest_dir, manifest_type)
+    manifest = manifest_map[d.getVar('IMAGE_PKGTYPE')](d, manifest_dir, manifest_type)
 
     if final_manifest:
         manifest.create_final()
diff --git a/import-layers/yocto-poky/meta/lib/oe/package.py b/import-layers/yocto-poky/meta/lib/oe/package.py
index 02642f2..4797e7d 100644
--- a/import-layers/yocto-poky/meta/lib/oe/package.py
+++ b/import-layers/yocto-poky/meta/lib/oe/package.py
@@ -18,23 +18,24 @@
         newmode = origmode | stat.S_IWRITE | stat.S_IREAD
         os.chmod(file, newmode)
 
-    extraflags = ""
+    stripcmd = [strip]
 
     # kernel module    
     if elftype & 16:
-        extraflags = "--strip-debug --remove-section=.comment --remove-section=.note --preserve-dates"
+        stripcmd.extend(["--strip-debug", "--remove-section=.comment",
+            "--remove-section=.note", "--preserve-dates"])
     # .so and shared library
     elif ".so" in file and elftype & 8:
-        extraflags = "--remove-section=.comment --remove-section=.note --strip-unneeded"
+        stripcmd.extend(["--remove-section=.comment", "--remove-section=.note", "--strip-unneeded"])
     # shared or executable:
     elif elftype & 8 or elftype & 4:
-        extraflags = "--remove-section=.comment --remove-section=.note"
+        stripcmd.extend(["--remove-section=.comment", "--remove-section=.note"])
 
-    stripcmd = "'%s' %s '%s'" % (strip, extraflags, file)
+    stripcmd.append(file)
     bb.debug(1, "runstrip: %s" % stripcmd)
 
     try:
-        output = subprocess.check_output(stripcmd, stderr=subprocess.STDOUT, shell=True)
+        output = subprocess.check_output(stripcmd, stderr=subprocess.STDOUT)
     except subprocess.CalledProcessError as e:
         bb.error("runstrip: '%s' strip command failed with %s (%s)" % (stripcmd, e.returncode, e.output))
 
@@ -60,32 +61,59 @@
     provides = {}
     requires = {}
 
-    r = re.compile(r'[<>=]+ +[^ ]*')
+    file_re = re.compile(r'\s+\d+\s(.*)')
+    dep_re = re.compile(r'\s+(\S)\s+(.*)')
+    r = re.compile(r'[<>=]+\s+\S*')
 
     def process_deps(pipe, pkg, pkgdest, provides, requires):
+        file = None
         for line in pipe:
-            f = line.decode("utf-8").split(" ", 1)[0].strip()
-            line = line.decode("utf-8").split(" ", 1)[1].strip()
+            line = line.decode("utf-8")
 
-            if line.startswith("Requires:"):
+            m = file_re.match(line)
+            if m:
+                file = m.group(1)
+                file = file.replace(pkgdest + "/" + pkg, "")
+                file = file_translate(file)
+                continue
+
+            m = dep_re.match(line)
+            if not m or not file:
+                continue
+
+            type, dep = m.groups()
+
+            if type == 'R':
                 i = requires
-            elif line.startswith("Provides:"):
+            elif type == 'P':
                 i = provides
             else:
+               continue
+
+            if dep.startswith("python("):
                 continue
 
-            file = f.replace(pkgdest + "/" + pkg, "")
-            file = file_translate(file)
-            value = line.split(":", 1)[1].strip()
-            value = r.sub(r'(\g<0>)', value)
+            # Ignore all perl(VMS::...) and perl(Mac::...) dependencies. These
+            # are typically used conditionally from the Perl code, but are
+            # generated as unconditional dependencies.
+            if dep.startswith('perl(VMS::') or dep.startswith('perl(Mac::'):
+                continue
 
-            if value.startswith("rpmlib("):
+            # Ignore perl dependencies on .pl files.
+            if dep.startswith('perl(') and dep.endswith('.pl)'):
                 continue
-            if value == "python":
-                continue
+
+            # Remove perl versions and perl module versions since they typically
+            # do not make sense when used as package versions.
+            if dep.startswith('perl') and r.search(dep):
+                dep = dep.split()[0]
+
+            # Put parentheses around any version specifications.
+            dep = r.sub(r'(\g<0>)',dep)
+
             if file not in i:
                 i[file] = []
-            i[file].append(value)
+            i[file].append(dep)
 
         return provides, requires
 
@@ -103,7 +131,7 @@
     import re
 
     shlib_provider = {}
-    shlibs_dirs = d.getVar('SHLIBSDIRS', True).split()
+    shlibs_dirs = d.getVar('SHLIBSDIRS').split()
     list_re = re.compile('^(.*)\.list$')
     # Go from least to most specific since the last one found wins
     for dir in reversed(shlibs_dirs):
@@ -149,6 +177,7 @@
                         continue
                     pkgitems.append(pathitem)
                 pkgname = '-'.join(pkgitems).replace('_', '-')
+                pkgname = pkgname.replace('@', '')
                 pkgfile = os.path.join(root, dn, 'package.json')
                 data = None
                 if os.path.exists(pkgfile):
diff --git a/import-layers/yocto-poky/meta/lib/oe/package_manager.py b/import-layers/yocto-poky/meta/lib/oe/package_manager.py
index 13577b1..3a2daad 100644
--- a/import-layers/yocto-poky/meta/lib/oe/package_manager.py
+++ b/import-layers/yocto-poky/meta/lib/oe/package_manager.py
@@ -102,108 +102,14 @@
 
 
 class RpmIndexer(Indexer):
-    def get_ml_prefix_and_os_list(self, arch_var=None, os_var=None):
-        package_archs = collections.OrderedDict()
-        target_os = collections.OrderedDict()
-
-        if arch_var is not None and os_var is not None:
-            package_archs['default'] = self.d.getVar(arch_var, True).split()
-            package_archs['default'].reverse()
-            target_os['default'] = self.d.getVar(os_var, True).strip()
-        else:
-            package_archs['default'] = self.d.getVar("PACKAGE_ARCHS", True).split()
-            # arch order is reversed.  This ensures the -best- match is
-            # listed first!
-            package_archs['default'].reverse()
-            target_os['default'] = self.d.getVar("TARGET_OS", True).strip()
-            multilibs = self.d.getVar('MULTILIBS', True) or ""
-            for ext in multilibs.split():
-                eext = ext.split(':')
-                if len(eext) > 1 and eext[0] == 'multilib':
-                    localdata = bb.data.createCopy(self.d)
-                    default_tune_key = "DEFAULTTUNE_virtclass-multilib-" + eext[1]
-                    default_tune = localdata.getVar(default_tune_key, False)
-                    if default_tune is None:
-                        default_tune_key = "DEFAULTTUNE_ML_" + eext[1]
-                        default_tune = localdata.getVar(default_tune_key, False)
-                    if default_tune:
-                        localdata.setVar("DEFAULTTUNE", default_tune)
-                        bb.data.update_data(localdata)
-                        package_archs[eext[1]] = localdata.getVar('PACKAGE_ARCHS',
-                                                                  True).split()
-                        package_archs[eext[1]].reverse()
-                        target_os[eext[1]] = localdata.getVar("TARGET_OS",
-                                                              True).strip()
-
-        ml_prefix_list = collections.OrderedDict()
-        for mlib in package_archs:
-            if mlib == 'default':
-                ml_prefix_list[mlib] = package_archs[mlib]
-            else:
-                ml_prefix_list[mlib] = list()
-                for arch in package_archs[mlib]:
-                    if arch in ['all', 'noarch', 'any']:
-                        ml_prefix_list[mlib].append(arch)
-                    else:
-                        ml_prefix_list[mlib].append(mlib + "_" + arch)
-
-        return (ml_prefix_list, target_os)
-
     def write_index(self):
-        sdk_pkg_archs = (self.d.getVar('SDK_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
-        all_mlb_pkg_archs = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
+        if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+            raise NotImplementedError('Package feed signing not yet implementd for rpm')
 
-        mlb_prefix_list = self.get_ml_prefix_and_os_list()[0]
-
-        archs = set()
-        for item in mlb_prefix_list:
-            archs = archs.union(set(i.replace('-', '_') for i in mlb_prefix_list[item]))
-
-        if len(archs) == 0:
-            archs = archs.union(set(all_mlb_pkg_archs))
-
-        archs = archs.union(set(sdk_pkg_archs))
-
-        rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo")
-        if self.d.getVar('PACKAGE_FEED_SIGN', True) == '1':
-            signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND', True))
-        else:
-            signer = None
-        index_cmds = []
-        repomd_files = []
-        rpm_dirs_found = False
-        for arch in archs:
-            dbpath = os.path.join(self.d.getVar('WORKDIR', True), 'rpmdb', arch)
-            if os.path.exists(dbpath):
-                bb.utils.remove(dbpath, True)
-            arch_dir = os.path.join(self.deploy_dir, arch)
-            if not os.path.isdir(arch_dir):
-                continue
-
-            index_cmds.append("%s --dbpath %s --update -q %s" % \
-                             (rpm_createrepo, dbpath, arch_dir))
-            repomd_files.append(os.path.join(arch_dir, 'repodata', 'repomd.xml'))
-
-            rpm_dirs_found = True
-
-        if not rpm_dirs_found:
-            bb.note("There are no packages in %s" % self.deploy_dir)
-            return
-
-        # Create repodata
-        result = oe.utils.multiprocess_exec(index_cmds, create_index)
+        createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c")
+        result = create_index("%s --update -q %s" % (createrepo_c, self.deploy_dir))
         if result:
-            bb.fatal('%s' % ('\n'.join(result)))
-        # Sign repomd
-        if signer:
-            for repomd in repomd_files:
-                feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE', True)
-                is_ascii_sig = (feed_sig_type.upper() != "BIN")
-                signer.detach_sign(repomd,
-                                   self.d.getVar('PACKAGE_FEED_GPG_NAME', True),
-                                   self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE', True),
-                                   armor=is_ascii_sig)
-
+            bb.fatal(result)
 
 class OpkgIndexer(Indexer):
     def write_index(self):
@@ -212,8 +118,8 @@
                      "MULTILIB_ARCHS"]
 
         opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
-        if self.d.getVar('PACKAGE_FEED_SIGN', True) == '1':
-            signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND', True))
+        if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
+            signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND'))
         else:
             signer = None
 
@@ -223,7 +129,7 @@
         index_cmds = set()
         index_sign_files = set()
         for arch_var in arch_vars:
-            archs = self.d.getVar(arch_var, True)
+            archs = self.d.getVar(arch_var)
             if archs is None:
                 continue
 
@@ -251,12 +157,12 @@
             bb.fatal('%s' % ('\n'.join(result)))
 
         if signer:
-            feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE', True)
+            feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE')
             is_ascii_sig = (feed_sig_type.upper() != "BIN")
             for f in index_sign_files:
                 signer.detach_sign(f,
-                                   self.d.getVar('PACKAGE_FEED_GPG_NAME', True),
-                                   self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE', True),
+                                   self.d.getVar('PACKAGE_FEED_GPG_NAME'),
+                                   self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'),
                                    armor=is_ascii_sig)
 
 
@@ -290,16 +196,16 @@
 
         os.environ['APT_CONFIG'] = self.apt_conf_file
 
-        pkg_archs = self.d.getVar('PACKAGE_ARCHS', True)
+        pkg_archs = self.d.getVar('PACKAGE_ARCHS')
         if pkg_archs is not None:
             arch_list = pkg_archs.split()
-        sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS', True)
+        sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS')
         if sdk_pkg_archs is not None:
             for a in sdk_pkg_archs.split():
                 if a not in pkg_archs:
                     arch_list.append(a)
 
-        all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").split()
+        all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
         arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
 
         apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
@@ -332,7 +238,7 @@
         result = oe.utils.multiprocess_exec(index_cmds, create_index)
         if result:
             bb.fatal('%s' % ('\n'.join(result)))
-        if self.d.getVar('PACKAGE_FEED_SIGN', True) == '1':
+        if self.d.getVar('PACKAGE_FEED_SIGN') == '1':
             raise NotImplementedError('Package feed signing not implementd for dpkg')
 
 
@@ -346,119 +252,9 @@
     def list_pkgs(self):
         pass
 
-
 class RpmPkgsList(PkgsList):
-    def __init__(self, d, rootfs_dir, arch_var=None, os_var=None):
-        super(RpmPkgsList, self).__init__(d, rootfs_dir)
-
-        self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm")
-        self.image_rpmlib = os.path.join(self.rootfs_dir, 'var/lib/rpm')
-
-        self.ml_prefix_list, self.ml_os_list = \
-            RpmIndexer(d, rootfs_dir).get_ml_prefix_and_os_list(arch_var, os_var)
-
-        # Determine rpm version
-        cmd = "%s --version" % self.rpm_cmd
-        try:
-            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
-        except subprocess.CalledProcessError as e:
-            bb.fatal("Getting rpm version failed. Command '%s' "
-                     "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
-    '''
-    Translate the RPM/Smart format names to the OE multilib format names
-    '''
-    def _pkg_translate_smart_to_oe(self, pkg, arch):
-        new_pkg = pkg
-        new_arch = arch
-        fixed_arch = arch.replace('_', '-')
-        found = 0
-        for mlib in self.ml_prefix_list:
-            for cmp_arch in self.ml_prefix_list[mlib]:
-                fixed_cmp_arch = cmp_arch.replace('_', '-')
-                if fixed_arch == fixed_cmp_arch:
-                    if mlib == 'default':
-                        new_pkg = pkg
-                        new_arch = cmp_arch
-                    else:
-                        new_pkg = mlib + '-' + pkg
-                        # We need to strip off the ${mlib}_ prefix on the arch
-                        new_arch = cmp_arch.replace(mlib + '_', '')
-
-                    # Workaround for bug 3565. Simply look to see if we
-                    # know of a package with that name, if not try again!
-                    filename = os.path.join(self.d.getVar('PKGDATA_DIR', True),
-                                            'runtime-reverse',
-                                            new_pkg)
-                    if os.path.exists(filename):
-                        found = 1
-                        break
-
-            if found == 1 and fixed_arch == fixed_cmp_arch:
-                break
-        #bb.note('%s, %s -> %s, %s' % (pkg, arch, new_pkg, new_arch))
-        return new_pkg, new_arch
-
-    def _list_pkg_deps(self):
-        cmd = [bb.utils.which(os.getenv('PATH'), "rpmresolve"),
-               "-t", self.image_rpmlib]
-
-        try:
-            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8")
-        except subprocess.CalledProcessError as e:
-            bb.fatal("Cannot get the package dependencies. Command '%s' "
-                     "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
-
-        return output
-
     def list_pkgs(self):
-        cmd = self.rpm_cmd + ' --root ' + self.rootfs_dir
-        cmd += ' -D "_dbpath /var/lib/rpm" -qa'
-        cmd += " --qf '[%{NAME} %{ARCH} %{VERSION} %{PACKAGEORIGIN}\n]'"
-
-        try:
-            # bb.note(cmd)
-            tmp_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip().decode("utf-8")
-        except subprocess.CalledProcessError as e:
-            bb.fatal("Cannot get the installed packages list. Command '%s' "
-                     "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
-        output = dict()
-        deps = dict()
-        dependencies = self._list_pkg_deps()
-
-        # Populate deps dictionary for better manipulation
-        for line in dependencies.splitlines():
-            try:
-                pkg, dep = line.split("|")
-                if not pkg in deps:
-                    deps[pkg] = list()
-                if not dep in deps[pkg]:
-                    deps[pkg].append(dep)
-            except:
-                # Ignore any other lines they're debug or errors
-                pass
-
-        for line in tmp_output.split('\n'):
-            if len(line.strip()) == 0:
-                continue
-            pkg = line.split()[0]
-            arch = line.split()[1]
-            ver = line.split()[2]
-            dep = deps.get(pkg, [])
-
-            # Skip GPG keys
-            if pkg == 'gpg-pubkey':
-                continue
-
-            pkgorigin = line.split()[3]
-            new_pkg, new_arch = self._pkg_translate_smart_to_oe(pkg, arch)
-
-            output[new_pkg] = {"arch":new_arch, "ver":ver,
-                        "filename":pkgorigin, "deps":dep}
-
-        return output
-
+        return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR')).list_installed()
 
 class OpkgPkgsList(PkgsList):
     def __init__(self, d, rootfs_dir, config_file):
@@ -466,7 +262,7 @@
 
         self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
         self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
-        self.opkg_args += self.d.getVar("OPKG_ARGS", True)
+        self.opkg_args += self.d.getVar("OPKG_ARGS")
 
     def list_pkgs(self, format=None):
         cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args)
@@ -514,9 +310,6 @@
         self.d = d
         self.deploy_dir = None
         self.deploy_lock = None
-        self.feed_uris = self.d.getVar('PACKAGE_FEED_URIS', True) or ""
-        self.feed_base_paths = self.d.getVar('PACKAGE_FEED_BASE_PATHS', True) or ""
-        self.feed_archs = self.d.getVar('PACKAGE_FEED_ARCHS', True)
 
     """
     Update the package manager package database.
@@ -556,8 +349,24 @@
     def list_installed(self):
         pass
 
+    """
+    Returns the path to a tmpdir where resides the contents of a package.
+
+    Deleting the tmpdir is responsability of the caller.
+
+    """
     @abstractmethod
-    def insert_feeds_uris(self):
+    def extract(self, pkg):
+        pass
+
+    """
+    Add remote package feeds into repository manager configuration. The parameters
+    for the feeds are set by feed_uris, feed_base_paths and feed_archs.
+    See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS
+    for their description.
+    """
+    @abstractmethod
+    def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
         pass
 
     """
@@ -568,20 +377,11 @@
     installation
     """
     def install_complementary(self, globs=None):
-        # we need to write the list of installed packages to a file because the
-        # oe-pkgdata-util reads it from a file
-        installed_pkgs_file = os.path.join(self.d.getVar('WORKDIR', True),
-                                           "installed_pkgs.txt")
-        with open(installed_pkgs_file, "w+") as installed_pkgs:
-            pkgs = self.list_installed()
-            output = oe.utils.format_pkg_list(pkgs, "arch")
-            installed_pkgs.write(output)
-
         if globs is None:
-            globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY', True)
+            globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY')
             split_linguas = set()
 
-            for translation in self.d.getVar('IMAGE_LINGUAS', True).split():
+            for translation in self.d.getVar('IMAGE_LINGUAS').split():
                 split_linguas.add(translation)
                 split_linguas.add(translation.split('-')[0])
 
@@ -593,22 +393,29 @@
         if globs is None:
             return
 
-        cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"),
-               "-p", self.d.getVar('PKGDATA_DIR', True), "glob", installed_pkgs_file,
-               globs]
-        exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY', True)
-        if exclude:
-            cmd.extend(['--exclude=' + '|'.join(exclude.split())])
-        try:
-            bb.note("Installing complementary packages ...")
-            bb.note('Running %s' % cmd)
-            complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
-        except subprocess.CalledProcessError as e:
-            bb.fatal("Could not compute complementary packages list. Command "
-                     "'%s' returned %d:\n%s" %
-                     (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
-        self.install(complementary_pkgs.split(), attempt_only=True)
-        os.remove(installed_pkgs_file)
+        # we need to write the list of installed packages to a file because the
+        # oe-pkgdata-util reads it from a file
+        with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs:
+            pkgs = self.list_installed()
+            output = oe.utils.format_pkg_list(pkgs, "arch")
+            installed_pkgs.write(output)
+            installed_pkgs.flush()
+
+            cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"),
+                   "-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name,
+                   globs]
+            exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY')
+            if exclude:
+                cmd.extend(['--exclude=' + '|'.join(exclude.split())])
+            try:
+                bb.note("Installing complementary packages ...")
+                bb.note('Running %s' % cmd)
+                complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8")
+            except subprocess.CalledProcessError as e:
+                bb.fatal("Could not compute complementary packages list. Command "
+                         "'%s' returned %d:\n%s" %
+                         (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+            self.install(complementary_pkgs.split(), attempt_only=True)
 
     def deploy_dir_lock(self):
         if self.deploy_dir is None:
@@ -654,829 +461,299 @@
                  task_name='target',
                  providename=None,
                  arch_var=None,
-                 os_var=None):
+                 os_var=None,
+                 rpm_repo_workdir="oe-rootfs-repo"):
         super(RpmPM, self).__init__(d)
         self.target_rootfs = target_rootfs
         self.target_vendor = target_vendor
         self.task_name = task_name
-        self.providename = providename
-        self.fullpkglist = list()
-        self.deploy_dir = self.d.getVar('DEPLOY_DIR_RPM', True)
-        self.etcrpm_dir = os.path.join(self.target_rootfs, "etc/rpm")
-        self.install_dir_name = "oe_install"
-        self.install_dir_path = os.path.join(self.target_rootfs, self.install_dir_name)
-        self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm")
-        self.smart_cmd = bb.utils.which(os.getenv('PATH'), "smart")
-        # 0 = default, only warnings
-        # 1 = --log-level=info (includes information about executing scriptlets and their output)
-        # 2 = --log-level=debug
-        # 3 = --log-level=debug plus dumps of scriplet content and command invocation
-        self.debug_level = int(d.getVar('ROOTFS_RPM_DEBUG', True) or "0")
-        self.smart_opt = "--log-level=%s --data-dir=%s" % \
-                         ("warning" if self.debug_level == 0 else
-                          "info" if self.debug_level == 1 else
-                          "debug",
-                          os.path.join(target_rootfs, 'var/lib/smart'))
-        self.scriptlet_wrapper = self.d.expand('${WORKDIR}/scriptlet_wrapper')
+        if arch_var == None:
+            self.archs = self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').replace("-","_")
+        else:
+            self.archs = self.d.getVar(arch_var).replace("-","_")
+        if task_name == "host":
+            self.primary_arch = self.d.getVar('SDK_ARCH')
+        else:
+            self.primary_arch = self.d.getVar('MACHINE_ARCH')
+
+        self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir)
+        bb.utils.mkdirhier(self.rpm_repo_dir)
+        oe.path.symlink(self.d.getVar('DEPLOY_DIR_RPM'), oe.path.join(self.rpm_repo_dir, "rpm"), True)
+
+        self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name)
+        if not os.path.exists(self.d.expand('${T}/saved_packaging_data')):
+            bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data'))
+        self.packaging_data_dirs = ['var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf']
         self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
                                                self.task_name)
-        self.saved_rpmlib = self.d.expand('${T}/saved/%s' % self.task_name)
-        self.image_rpmlib = os.path.join(self.target_rootfs, 'var/lib/rpm')
-
         if not os.path.exists(self.d.expand('${T}/saved')):
             bb.utils.mkdirhier(self.d.expand('${T}/saved'))
 
-        packageindex_dir = os.path.join(self.d.getVar('WORKDIR', True), 'rpms')
-        self.indexer = RpmIndexer(self.d, packageindex_dir)
-        self.pkgs_list = RpmPkgsList(self.d, self.target_rootfs, arch_var, os_var)
+    def _configure_dnf(self):
+        # libsolv handles 'noarch' internally, we don't need to specify it explicitly
+        archs = [i for i in reversed(self.archs.split()) if i not in ["any", "all", "noarch"]]
+        # This prevents accidental matching against libsolv's built-in policies
+        if len(archs) <= 1:
+            archs = archs + ["bogusarch"]
+        confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/")
+        bb.utils.mkdirhier(confdir)
+        open(confdir + "arch", 'w').write(":".join(archs))
+        distro_codename = self.d.getVar('DISTRO_CODENAME')
+        open(confdir + "releasever", 'w').write(distro_codename if distro_codename is not None else '')
 
-        self.ml_prefix_list, self.ml_os_list = self.indexer.get_ml_prefix_and_os_list(arch_var, os_var)
+        open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w').write("")
 
-    def insert_feeds_uris(self):
-        if self.feed_uris == "":
-            return
 
-        arch_list = []
-        if self.feed_archs is not None:
-            # User define feed architectures
-            arch_list = self.feed_archs.split()
+    def _configure_rpm(self):
+        # We need to configure rpm to use our primary package architecture as the installation architecture,
+        # and to make it compatible with other package architectures that we use.
+        # Otherwise it will refuse to proceed with packages installation.
+        platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/")
+        rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/")
+        bb.utils.mkdirhier(platformconfdir)
+        open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch)
+        open(rpmrcconfdir + "rpmrc", 'w').write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch))
+
+        open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n")
+        if self.d.getVar('RPM_PREFER_ELF_ARCH'):
+            open(platformconfdir + "macros", 'a').write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH')))
         else:
-            # List must be prefered to least preferred order
-            default_platform_extra = list()
-            platform_extra = list()
-            bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or ""
-            for mlib in self.ml_os_list:
-                for arch in self.ml_prefix_list[mlib]:
-                    plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib]
-                    if mlib == bbextendvariant:
-                        if plt not in default_platform_extra:
-                            default_platform_extra.append(plt)
-                    else:
-                        if plt not in platform_extra:
-                            platform_extra.append(plt)
-            platform_extra = default_platform_extra + platform_extra
+            open(platformconfdir + "macros", 'a').write("%_prefer_color 7")
 
-            for canonical_arch in platform_extra:
-                arch = canonical_arch.split('-')[0]
-                if not os.path.exists(os.path.join(self.deploy_dir, arch)):
-                    continue
-                arch_list.append(arch)
+        if self.d.getVar('RPM_SIGN_PACKAGES') == '1':
+            signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND'))
+            pubkey_path = oe.path.join(self.d.getVar('B'), 'rpm-key')
+            signer.export_pubkey(pubkey_path, self.d.getVar('RPM_GPG_NAME'))
+            rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmkeys")
+            cmd = [rpm_bin, '--root=%s' % self.target_rootfs, '--import', pubkey_path]
+            try:
+                subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+            except subprocess.CalledProcessError as e:
+                bb.fatal("Importing GPG key failed. Command '%s' "
+                        "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
 
-        feed_uris = self.construct_uris(self.feed_uris.split(), self.feed_base_paths.split())
-
-        uri_iterator = 0
-        channel_priority = 10 + 5 * len(feed_uris) * (len(arch_list) if arch_list else 1)
-
-        for uri in feed_uris:
-            if arch_list:
-                for arch in arch_list:
-                    bb.note('Adding Smart channel url%d%s (%s)' %
-                            (uri_iterator, arch, channel_priority))
-                    self._invoke_smart('channel --add url%d-%s type=rpm-md baseurl=%s/%s -y'
-                                       % (uri_iterator, arch, uri, arch))
-                    self._invoke_smart('channel --set url%d-%s priority=%d' %
-                                       (uri_iterator, arch, channel_priority))
-                    channel_priority -= 5
-            else:
-                bb.note('Adding Smart channel url%d (%s)' %
-                        (uri_iterator, channel_priority))
-                self._invoke_smart('channel --add url%d type=rpm-md baseurl=%s -y'
-                                   % (uri_iterator, uri))
-                self._invoke_smart('channel --set url%d priority=%d' %
-                                   (uri_iterator, channel_priority))
-                channel_priority -= 5
-
-            uri_iterator += 1
-
-    '''
-    Create configs for rpm and smart, and multilib is supported
-    '''
     def create_configs(self):
-        target_arch = self.d.getVar('TARGET_ARCH', True)
-        platform = '%s%s-%s' % (target_arch.replace('-', '_'),
-                                self.target_vendor,
-                                self.ml_os_list['default'])
-
-        # List must be prefered to least preferred order
-        default_platform_extra = list()
-        platform_extra = list()
-        bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or ""
-        for mlib in self.ml_os_list:
-            for arch in self.ml_prefix_list[mlib]:
-                plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib]
-                if mlib == bbextendvariant:
-                    if plt not in default_platform_extra:
-                        default_platform_extra.append(plt)
-                else:
-                    if plt not in platform_extra:
-                        platform_extra.append(plt)
-        platform_extra = default_platform_extra + platform_extra
-
-        self._create_configs(platform, platform_extra)
-
-    def _invoke_smart(self, args):
-        cmd = "%s %s %s" % (self.smart_cmd, self.smart_opt, args)
-        # bb.note(cmd)
-        try:
-            complementary_pkgs = subprocess.check_output(cmd,
-                                                         stderr=subprocess.STDOUT,
-                                                         shell=True).decode("utf-8")
-            # bb.note(complementary_pkgs)
-            return complementary_pkgs
-        except subprocess.CalledProcessError as e:
-            bb.fatal("Could not invoke smart. Command "
-                     "'%s' returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
-    def _search_pkg_name_in_feeds(self, pkg, feed_archs):
-        for arch in feed_archs:
-            arch = arch.replace('-', '_')
-            regex_match = re.compile(r"^%s-[^-]*-[^-]*@%s$" % \
-                (re.escape(pkg), re.escape(arch)))
-            for p in self.fullpkglist:
-                if regex_match.match(p) is not None:
-                    # First found is best match
-                    # bb.note('%s -> %s' % (pkg, pkg + '@' + arch))
-                    return pkg + '@' + arch
-
-        # Search provides if not found by pkgname.
-        bb.note('Not found %s by name, searching provides ...' % pkg)
-        cmd = "%s %s query --provides %s --show-format='$name-$version'" % \
-                (self.smart_cmd, self.smart_opt, pkg)
-        cmd += " | sed -ne 's/ *Provides://p'"
-        bb.note('cmd: %s' % cmd)
-        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
-        # Found a provider
-        if output:
-            bb.note('Found providers for %s: %s' % (pkg, output))
-            for p in output.split():
-                for arch in feed_archs:
-                    arch = arch.replace('-', '_')
-                    if p.rstrip().endswith('@' + arch):
-                        return p
-
-        return ""
-
-    '''
-    Translate the OE multilib format names to the RPM/Smart format names
-    It searched the RPM/Smart format names in probable multilib feeds first,
-    and then searched the default base feed.
-    '''
-    def _pkg_translate_oe_to_smart(self, pkgs, attempt_only=False):
-        new_pkgs = list()
-
-        for pkg in pkgs:
-            new_pkg = pkg
-            # Search new_pkg in probable multilibs first
-            for mlib in self.ml_prefix_list:
-                # Jump the default archs
-                if mlib == 'default':
-                    continue
-
-                subst = pkg.replace(mlib + '-', '')
-                # if the pkg in this multilib feed
-                if subst != pkg:
-                    feed_archs = self.ml_prefix_list[mlib]
-                    new_pkg = self._search_pkg_name_in_feeds(subst, feed_archs)
-                    if not new_pkg:
-                        # Failed to translate, package not found!
-                        err_msg = '%s not found in the %s feeds (%s) in %s.' % \
-                                  (pkg, mlib, " ".join(feed_archs), self.d.getVar('DEPLOY_DIR_RPM', True))
-                        if not attempt_only:
-                            bb.error(err_msg)
-                            bb.fatal("This is often caused by an empty package declared " \
-                                     "in a recipe's PACKAGES variable. (Empty packages are " \
-                                     "not constructed unless ALLOW_EMPTY_<pkg> = '1' is used.)")
-                        bb.warn(err_msg)
-                    else:
-                        new_pkgs.append(new_pkg)
-
-                    break
-
-            # Apparently not a multilib package...
-            if pkg == new_pkg:
-                # Search new_pkg in default archs
-                default_archs = self.ml_prefix_list['default']
-                new_pkg = self._search_pkg_name_in_feeds(pkg, default_archs)
-                if not new_pkg:
-                    err_msg = '%s not found in the feeds (%s) in %s.' % \
-                                  (pkg, " ".join(default_archs), self.d.getVar('DEPLOY_DIR_RPM', True))
-                    if not attempt_only:
-                        bb.error(err_msg)
-                        bb.fatal("This is often caused by an empty package declared " \
-                                 "in a recipe's PACKAGES variable. (Empty packages are " \
-                                 "not constructed unless ALLOW_EMPTY_<pkg> = '1' is used.)")
-                    bb.warn(err_msg)
-                else:
-                    new_pkgs.append(new_pkg)
-
-        return new_pkgs
-
-    def _create_configs(self, platform, platform_extra):
-        # Setup base system configuration
-        bb.note("configuring RPM platform settings")
-
-        # Configure internal RPM environment when using Smart
-        os.environ['RPM_ETCRPM'] = self.etcrpm_dir
-        bb.utils.mkdirhier(self.etcrpm_dir)
-
-        # Setup temporary directory -- install...
-        if os.path.exists(self.install_dir_path):
-            bb.utils.remove(self.install_dir_path, True)
-        bb.utils.mkdirhier(os.path.join(self.install_dir_path, 'tmp'))
-
-        channel_priority = 5
-        platform_dir = os.path.join(self.etcrpm_dir, "platform")
-        sdkos = self.d.getVar("SDK_OS", True)
-        with open(platform_dir, "w+") as platform_fd:
-            platform_fd.write(platform + '\n')
-            for pt in platform_extra:
-                channel_priority += 5
-                if sdkos:
-                    tmp = re.sub("-%s$" % sdkos, "-%s\n" % sdkos, pt)
-                tmp = re.sub("-linux.*$", "-linux.*\n", tmp)
-                platform_fd.write(tmp)
-
-        # Tell RPM that the "/" directory exist and is available
-        bb.note("configuring RPM system provides")
-        sysinfo_dir = os.path.join(self.etcrpm_dir, "sysinfo")
-        bb.utils.mkdirhier(sysinfo_dir)
-        with open(os.path.join(sysinfo_dir, "Dirnames"), "w+") as dirnames:
-            dirnames.write("/\n")
-
-        if self.providename:
-            providename_dir = os.path.join(sysinfo_dir, "Providename")
-            if not os.path.exists(providename_dir):
-                providename_content = '\n'.join(self.providename)
-                providename_content += '\n'
-                open(providename_dir, "w+").write(providename_content)
-
-        # Configure RPM... we enforce these settings!
-        bb.note("configuring RPM DB settings")
-        # After change the __db.* cache size, log file will not be
-        # generated automatically, that will raise some warnings,
-        # so touch a bare log for rpm write into it.
-        rpmlib_log = os.path.join(self.image_rpmlib, 'log', 'log.0000000001')
-        if not os.path.exists(rpmlib_log):
-            bb.utils.mkdirhier(os.path.join(self.image_rpmlib, 'log'))
-            open(rpmlib_log, 'w+').close()
-
-        DB_CONFIG_CONTENT = "# ================ Environment\n" \
-            "set_data_dir .\n" \
-            "set_create_dir .\n" \
-            "set_lg_dir ./log\n" \
-            "set_tmp_dir ./tmp\n" \
-            "set_flags db_log_autoremove on\n" \
-            "\n" \
-            "# -- thread_count must be >= 8\n" \
-            "set_thread_count 64\n" \
-            "\n" \
-            "# ================ Logging\n" \
-            "\n" \
-            "# ================ Memory Pool\n" \
-            "set_cachesize 0 1048576 0\n" \
-            "set_mp_mmapsize 268435456\n" \
-            "\n" \
-            "# ================ Locking\n" \
-            "set_lk_max_locks 16384\n" \
-            "set_lk_max_lockers 16384\n" \
-            "set_lk_max_objects 16384\n" \
-            "mutex_set_max 163840\n" \
-            "\n" \
-            "# ================ Replication\n"
-
-        db_config_dir = os.path.join(self.image_rpmlib, 'DB_CONFIG')
-        if not os.path.exists(db_config_dir):
-            open(db_config_dir, 'w+').write(DB_CONFIG_CONTENT)
-
-        # Create database so that smart doesn't complain (lazy init)
-        opt = "-qa"
-        cmd = "%s --root %s --dbpath /var/lib/rpm %s > /dev/null" % (
-              self.rpm_cmd, self.target_rootfs, opt)
-        try:
-            subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
-        except subprocess.CalledProcessError as e:
-            bb.fatal("Create rpm database failed. Command '%s' "
-                     "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-        # Import GPG key to RPM database of the target system
-        if self.d.getVar('RPM_SIGN_PACKAGES', True) == '1':
-            pubkey_path = self.d.getVar('RPM_GPG_PUBKEY', True)
-            cmd = "%s --root %s --dbpath /var/lib/rpm --import %s > /dev/null" % (
-                  self.rpm_cmd, self.target_rootfs, pubkey_path)
-            subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
-
-        # Configure smart
-        bb.note("configuring Smart settings")
-        bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
-                        True)
-        self._invoke_smart('config --set rpm-root=%s' % self.target_rootfs)
-        self._invoke_smart('config --set rpm-dbpath=/var/lib/rpm')
-        self._invoke_smart('config --set rpm-extra-macros._var=%s' %
-                           self.d.getVar('localstatedir', True))
-        cmd = "config --set rpm-extra-macros._tmppath=/%s/tmp" % (self.install_dir_name)
-
-        prefer_color = self.d.getVar('RPM_PREFER_ELF_ARCH', True)
-        if prefer_color:
-            if prefer_color not in ['0', '1', '2', '4']:
-                bb.fatal("Invalid RPM_PREFER_ELF_ARCH: %s, it should be one of:\n"
-                        "\t1: ELF32 wins\n"
-                        "\t2: ELF64 wins\n"
-                        "\t4: ELF64 N32 wins (mips64 or mips64el only)" %
-                        prefer_color)
-            if prefer_color == "4" and self.d.getVar("TUNE_ARCH", True) not in \
-                                    ['mips64', 'mips64el']:
-                bb.fatal("RPM_PREFER_ELF_ARCH = \"4\" is for mips64 or mips64el "
-                         "only.")
-            self._invoke_smart('config --set rpm-extra-macros._prefer_color=%s'
-                        % prefer_color)
-
-        self._invoke_smart(cmd)
-        self._invoke_smart('config --set rpm-ignoresize=1')
-
-        # Write common configuration for host and target usage
-        self._invoke_smart('config --set rpm-nolinktos=1')
-        self._invoke_smart('config --set rpm-noparentdirs=1')
-        check_signature = self.d.getVar('RPM_CHECK_SIGNATURES', True)
-        if check_signature and check_signature.strip() == "0":
-            self._invoke_smart('config --set rpm-check-signatures=false')
-        for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split():
-            self._invoke_smart('flag --set ignore-recommends %s' % i)
-
-        # Do the following configurations here, to avoid them being
-        # saved for field upgrade
-        if self.d.getVar('NO_RECOMMENDATIONS', True).strip() == "1":
-            self._invoke_smart('config --set ignore-all-recommends=1')
-        pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or ""
-        for i in pkg_exclude.split():
-            self._invoke_smart('flag --set exclude-packages %s' % i)
-
-        # Optional debugging
-        # self._invoke_smart('config --set rpm-log-level=debug')
-        # cmd = 'config --set rpm-log-file=/tmp/smart-debug-logfile'
-        # self._invoke_smart(cmd)
-        ch_already_added = []
-        for canonical_arch in platform_extra:
-            arch = canonical_arch.split('-')[0]
-            arch_channel = os.path.join(self.d.getVar('WORKDIR', True), 'rpms', arch)
-            oe.path.remove(arch_channel)
-            deploy_arch_dir = os.path.join(self.deploy_dir, arch)
-            if not os.path.exists(deploy_arch_dir):
-                    continue
-
-            lockfilename = self.d.getVar('DEPLOY_DIR_RPM', True) + "/rpm.lock"
-            lf = bb.utils.lockfile(lockfilename, False)
-            oe.path.copyhardlinktree(deploy_arch_dir, arch_channel)
-            bb.utils.unlockfile(lf)
-
-            if not arch in ch_already_added:
-                bb.note('Adding Smart channel %s (%s)' %
-                        (arch, channel_priority))
-                self._invoke_smart('channel --add %s type=rpm-md baseurl=%s -y'
-                                   % (arch, arch_channel))
-                self._invoke_smart('channel --set %s priority=%d' %
-                                   (arch, channel_priority))
-                channel_priority -= 5
-
-                ch_already_added.append(arch)
-
-        bb.note('adding Smart RPM DB channel')
-        self._invoke_smart('channel --add rpmsys type=rpm-sys -y')
-
-        # Construct install scriptlet wrapper.
-        # Scripts need to be ordered when executed, this ensures numeric order.
-        # If we ever run into needing more the 899 scripts, we'll have to.
-        # change num to start with 1000.
-        #
-        scriptletcmd = "$2 $1/$3 $4\n"
-        scriptpath = "$1/$3"
-
-        # When self.debug_level >= 3, also dump the content of the
-        # executed scriptlets and how they get invoked.  We have to
-        # replace "exit 1" and "ERR" because printing those as-is
-        # would trigger a log analysis failure.
-        if self.debug_level >= 3:
-            dump_invocation = 'echo "Executing ${name} ${kind} with: ' + scriptletcmd + '"\n'
-            dump_script = 'cat ' + scriptpath + '| sed -e "s/exit 1/exxxit 1/g" -e "s/ERR/IRR/g"; echo\n'
-        else:
-            dump_invocation = 'echo "Executing ${name} ${kind}"\n'
-            dump_script = ''
-
-        SCRIPTLET_FORMAT = "#!/bin/bash\n" \
-            "\n" \
-            "export PATH=%s\n" \
-            "export D=%s\n" \
-            'export OFFLINE_ROOT="$D"\n' \
-            'export IPKG_OFFLINE_ROOT="$D"\n' \
-            'export OPKG_OFFLINE_ROOT="$D"\n' \
-            "export INTERCEPT_DIR=%s\n" \
-            "export NATIVE_ROOT=%s\n" \
-            "\n" \
-            "name=`head -1 " + scriptpath + " | cut -d\' \' -f 2`\n" \
-            "kind=`head -1 " + scriptpath + " | cut -d\' \' -f 4`\n" \
-            + dump_invocation \
-            + dump_script \
-            + scriptletcmd + \
-            "ret=$?\n" \
-            "echo Result of ${name} ${kind}: ${ret}\n" \
-            "if [ ${ret} -ne 0 ]; then\n" \
-            "  if [ $4 -eq 1 ]; then\n" \
-            "    mkdir -p $1/etc/rpm-postinsts\n" \
-            "    num=100\n" \
-            "    while [ -e $1/etc/rpm-postinsts/${num}-* ]; do num=$((num + 1)); done\n" \
-            '    echo "#!$2" > $1/etc/rpm-postinsts/${num}-${name}\n' \
-            '    echo "# Arg: $4" >> $1/etc/rpm-postinsts/${num}-${name}\n' \
-            "    cat " + scriptpath + " >> $1/etc/rpm-postinsts/${num}-${name}\n" \
-            "    chmod +x $1/etc/rpm-postinsts/${num}-${name}\n" \
-            '    echo "Info: deferring ${name} ${kind} install scriptlet to first boot"\n' \
-            "  else\n" \
-            '    echo "Error: ${name} ${kind} remove scriptlet failed"\n' \
-            "  fi\n" \
-            "fi\n"
-
-        intercept_dir = self.d.expand('${WORKDIR}/intercept_scripts')
-        native_root = self.d.getVar('STAGING_DIR_NATIVE', True)
-        scriptlet_content = SCRIPTLET_FORMAT % (os.environ['PATH'],
-                                                self.target_rootfs,
-                                                intercept_dir,
-                                                native_root)
-        open(self.scriptlet_wrapper, 'w+').write(scriptlet_content)
-
-        bb.note("configuring RPM cross-install scriptlet_wrapper")
-        os.chmod(self.scriptlet_wrapper, 0o755)
-        cmd = 'config --set rpm-extra-macros._cross_scriptlet_wrapper=%s' % \
-              self.scriptlet_wrapper
-        self._invoke_smart(cmd)
-
-        # Debug to show smart config info
-        # bb.note(self._invoke_smart('config --show'))
-
-    def update(self):
-        self._invoke_smart('update rpmsys')
-
-    def get_rdepends_recursively(self, pkgs):
-        # pkgs will be changed during the loop, so use [:] to make a copy.
-        for pkg in pkgs[:]:
-            sub_data = oe.packagedata.read_subpkgdata(pkg, self.d)
-            sub_rdep = sub_data.get("RDEPENDS_" + pkg)
-            if not sub_rdep:
-                continue
-            done = list(bb.utils.explode_dep_versions2(sub_rdep).keys())
-            next = done
-            # Find all the rdepends on dependency chain
-            while next:
-                new = []
-                for sub_pkg in next:
-                    sub_data = oe.packagedata.read_subpkgdata(sub_pkg, self.d)
-                    sub_pkg_rdep = sub_data.get("RDEPENDS_" + sub_pkg)
-                    if not sub_pkg_rdep:
-                        continue
-                    for p in bb.utils.explode_dep_versions2(sub_pkg_rdep):
-                        # Already handled, skip it.
-                        if p in done or p in pkgs:
-                            continue
-                        # It's a new dep
-                        if oe.packagedata.has_subpkgdata(p, self.d):
-                            done.append(p)
-                            new.append(p)
-                next = new
-            pkgs.extend(done)
-        return pkgs
-
-    '''
-    Install pkgs with smart, the pkg name is oe format
-    '''
-    def install(self, pkgs, attempt_only=False):
-
-        if not pkgs:
-            bb.note("There are no packages to install")
-            return
-        bb.note("Installing the following packages: %s" % ' '.join(pkgs))
-        if not attempt_only:
-            # Pull in multilib requires since rpm may not pull in them
-            # correctly, for example,
-            # lib32-packagegroup-core-standalone-sdk-target requires
-            # lib32-libc6, but rpm may pull in libc6 rather than lib32-libc6
-            # since it doesn't know mlprefix (lib32-), bitbake knows it and
-            # can handle it well, find out the RDEPENDS on the chain will
-            # fix the problem. Both do_rootfs and do_populate_sdk have this
-            # issue.
-            # The attempt_only packages don't need this since they are
-            # based on the installed ones.
-            #
-            # Separate pkgs into two lists, one is multilib, the other one
-            # is non-multilib.
-            ml_pkgs = []
-            non_ml_pkgs = pkgs[:]
-            for pkg in pkgs:
-                for mlib in (self.d.getVar("MULTILIB_VARIANTS", True) or "").split():
-                    if pkg.startswith(mlib + '-'):
-                        ml_pkgs.append(pkg)
-                        non_ml_pkgs.remove(pkg)
-
-            if len(ml_pkgs) > 0 and len(non_ml_pkgs) > 0:
-                # Found both foo and lib-foo
-                ml_pkgs = self.get_rdepends_recursively(ml_pkgs)
-                non_ml_pkgs = self.get_rdepends_recursively(non_ml_pkgs)
-                # Longer list makes smart slower, so only keep the pkgs
-                # which have the same BPN, and smart can handle others
-                # correctly.
-                pkgs_new = []
-                for pkg in non_ml_pkgs:
-                    for mlib in (self.d.getVar("MULTILIB_VARIANTS", True) or "").split():
-                        mlib_pkg = mlib + "-" + pkg
-                        if mlib_pkg in ml_pkgs:
-                            pkgs_new.append(pkg)
-                            pkgs_new.append(mlib_pkg)
-                for pkg in pkgs:
-                    if pkg not in pkgs_new:
-                        pkgs_new.append(pkg)
-                pkgs = pkgs_new
-                new_depends = {}
-                deps = bb.utils.explode_dep_versions2(" ".join(pkgs))
-                for depend in deps:
-                    data = oe.packagedata.read_subpkgdata(depend, self.d)
-                    key = "PKG_%s" % depend
-                    if key in data:
-                        new_depend = data[key]
-                    else:
-                        new_depend = depend
-                    new_depends[new_depend] = deps[depend]
-                pkgs = bb.utils.join_deps(new_depends, commasep=True).split(', ')
-        pkgs = self._pkg_translate_oe_to_smart(pkgs, attempt_only)
-        if not pkgs:
-            bb.note("There are no packages to install")
-            return
-        if not attempt_only:
-            bb.note('to be installed: %s' % ' '.join(pkgs))
-            cmd = "%s %s install -y %s" % \
-                  (self.smart_cmd, self.smart_opt, ' '.join(pkgs))
-            bb.note(cmd)
-        else:
-            bb.note('installing attempt only packages...')
-            bb.note('Attempting %s' % ' '.join(pkgs))
-            cmd = "%s %s install --attempt -y %s" % \
-                  (self.smart_cmd, self.smart_opt, ' '.join(pkgs))
-        try:
-            output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8")
-            bb.note(output)
-        except subprocess.CalledProcessError as e:
-            bb.fatal("Unable to install packages. Command '%s' "
-                     "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
-    '''
-    Remove pkgs with smart, the pkg name is smart/rpm format
-    '''
-    def remove(self, pkgs, with_dependencies=True):
-        bb.note('to be removed: ' + ' '.join(pkgs))
-
-        if not with_dependencies:
-            cmd = "%s -e --nodeps " % self.rpm_cmd
-            cmd += "--root=%s " % self.target_rootfs
-            cmd += "--dbpath=/var/lib/rpm "
-            cmd += "--define='_cross_scriptlet_wrapper %s' " % \
-                   self.scriptlet_wrapper
-            cmd += "--define='_tmppath /%s/tmp' %s" % (self.install_dir_name, ' '.join(pkgs))
-        else:
-            # for pkg in pkgs:
-            #   bb.note('Debug: What required: %s' % pkg)
-            #   bb.note(self._invoke_smart('query %s --show-requiredby' % pkg))
-
-            cmd = "%s %s remove -y %s" % (self.smart_cmd,
-                                          self.smart_opt,
-                                          ' '.join(pkgs))
-
-        try:
-            bb.note(cmd)
-            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
-            bb.note(output)
-        except subprocess.CalledProcessError as e:
-            bb.note("Unable to remove packages. Command '%s' "
-                    "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
-    def upgrade(self):
-        bb.note('smart upgrade')
-        self._invoke_smart('upgrade')
+        self._configure_dnf()
+        self._configure_rpm()
 
     def write_index(self):
-        result = self.indexer.write_index()
+        lockfilename = self.d.getVar('DEPLOY_DIR_RPM') + "/rpm.lock"
+        lf = bb.utils.lockfile(lockfilename, False)
+        RpmIndexer(self.d, self.rpm_repo_dir).write_index()
+        bb.utils.unlockfile(lf)
 
-        if result is not None:
-            bb.fatal(result)
+    def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+        from urllib.parse import urlparse
+
+        if feed_uris == "":
+            return
+
+        bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d"))
+        remote_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
+        for uri in remote_uris:
+            repo_base = "oe-remote-repo" + "-".join(urlparse(uri).path.split("/"))
+            if feed_archs is not None:
+                for arch in feed_archs.split():
+                    repo_uri = uri + "/" + arch
+                    repo_id   = "oe-remote-repo"  + "-".join(urlparse(repo_uri).path.split("/"))
+                    repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/"))
+                    open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a').write(
+                             "[%s]\nname=%s\nbaseurl=%s\n\n" % (repo_id, repo_name, repo_uri))
+            else:
+                repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/"))
+                repo_uri = uri
+                open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w').write(
+                             "[%s]\nname=%s\nbaseurl=%s\n" % (repo_base, repo_name, repo_uri))
+
+    def _prepare_pkg_transaction(self):
+        os.environ['D'] = self.target_rootfs
+        os.environ['OFFLINE_ROOT'] = self.target_rootfs
+        os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+        os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+        os.environ['INTERCEPT_DIR'] = oe.path.join(self.d.getVar('WORKDIR'),
+                                                   "intercept_scripts")
+        os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
+
+
+    def install(self, pkgs, attempt_only = False):
+        if len(pkgs) == 0:
+            return
+        self._prepare_pkg_transaction()
+
+        bad_recommendations = self.d.getVar('BAD_RECOMMENDATIONS')
+        package_exclude = self.d.getVar('PACKAGE_EXCLUDE')
+        exclude_pkgs = (bad_recommendations.split() if bad_recommendations else []) + (package_exclude.split() if package_exclude else [])
+
+        output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) +
+                         (["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) +
+                         (["--setopt=install_weak_deps=False"] if self.d.getVar('NO_RECOMMENDATIONS') == "1" else []) +
+                         (["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) +
+                         ["install"] +
+                         pkgs)
+
+        failed_scriptlets_pkgnames = collections.OrderedDict()
+        for line in output.splitlines():
+            if line.startswith("Non-fatal POSTIN scriptlet failure in rpm package"):
+                failed_scriptlets_pkgnames[line.split()[-1]] = True
+
+        for pkg in failed_scriptlets_pkgnames.keys():
+            self.save_rpmpostinst(pkg)
+
+    def remove(self, pkgs, with_dependencies = True):
+        if len(pkgs) == 0:
+            return
+        self._prepare_pkg_transaction()
+
+        if with_dependencies:
+            self._invoke_dnf(["remove"] + pkgs)
+        else:
+            cmd = bb.utils.which(os.getenv('PATH'), "rpm")
+            args = ["-e", "--nodeps", "--root=%s" %self.target_rootfs]
+
+            try:
+                output = subprocess.check_output([cmd] + args + pkgs, stderr=subprocess.STDOUT).decode("utf-8")
+            except subprocess.CalledProcessError as e:
+                bb.fatal("Could not invoke rpm. Command "
+                     "'%s' returned %d:\n%s" % (' '.join([cmd] + args + pkgs), e.returncode, e.output.decode("utf-8")))
+
+    def upgrade(self):
+        self._prepare_pkg_transaction()
+        self._invoke_dnf(["upgrade"])
+
+    def autoremove(self):
+        self._prepare_pkg_transaction()
+        self._invoke_dnf(["autoremove"])
 
     def remove_packaging_data(self):
-        bb.utils.remove(self.image_rpmlib, True)
-        bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
-                        True)
-        bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/opkg'), True)
-
-        # remove temp directory
-        bb.utils.remove(self.install_dir_path, True)
+        self._invoke_dnf(["clean", "all"])
+        for dir in self.packaging_data_dirs:
+            bb.utils.remove(oe.path.join(self.target_rootfs, dir), True)
 
     def backup_packaging_data(self):
-        # Save the rpmlib for increment rpm image generation
-        if os.path.exists(self.saved_rpmlib):
-            bb.utils.remove(self.saved_rpmlib, True)
-        shutil.copytree(self.image_rpmlib,
-                        self.saved_rpmlib,
-                        symlinks=True)
+        # Save the packaging dirs for increment rpm image generation
+        if os.path.exists(self.saved_packaging_data):
+            bb.utils.remove(self.saved_packaging_data, True)
+        for i in self.packaging_data_dirs:
+            source_dir = oe.path.join(self.target_rootfs, i)
+            target_dir = oe.path.join(self.saved_packaging_data, i)
+            shutil.copytree(source_dir, target_dir, symlinks=True)
 
     def recovery_packaging_data(self):
         # Move the rpmlib back
-        if os.path.exists(self.saved_rpmlib):
-            if os.path.exists(self.image_rpmlib):
-                bb.utils.remove(self.image_rpmlib, True)
-
-            bb.note('Recovery packaging data')
-            shutil.copytree(self.saved_rpmlib,
-                            self.image_rpmlib,
+        if os.path.exists(self.saved_packaging_data):
+            for i in self.packaging_data_dirs:
+                target_dir = oe.path.join(self.target_rootfs, i)
+                if os.path.exists(target_dir):
+                    bb.utils.remove(target_dir, True)
+                source_dir = oe.path.join(self.saved_packaging_data, i)
+                shutil.copytree(source_dir,
+                            target_dir,
                             symlinks=True)
 
     def list_installed(self):
-        return self.pkgs_list.list_pkgs()
+        output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{name}-%{version}-%{release}.%{arch}.rpm\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"],
+                                  print_output = False)
+        packages = {}
+        current_package = None
+        current_deps = None
+        current_state = "initial"
+        for line in output.splitlines():
+            if line.startswith("Package:"):
+                package_info = line.split(" ")[1:]
+                current_package = package_info[0]
+                package_arch = package_info[1]
+                package_version = package_info[2]
+                package_rpm = package_info[3]
+                packages[current_package] = {"arch":package_arch, "ver":package_version, "filename":package_rpm}
+                current_deps = []
+            elif line.startswith("Dependencies:"):
+                current_state = "dependencies"
+            elif line.startswith("Recommendations"):
+                current_state = "recommendations"
+            elif line.startswith("DependenciesEndHere:"):
+                current_state = "initial"
+                packages[current_package]["deps"] = current_deps
+            elif len(line) > 0:
+                if current_state == "dependencies":
+                    current_deps.append(line)
+                elif current_state == "recommendations":
+                    current_deps.append("%s [REC]" % line)
 
-    '''
-    If incremental install, we need to determine what we've got,
-    what we need to add, and what to remove...
-    The dump_install_solution will dump and save the new install
-    solution.
-    '''
+        return packages
+
+    def update(self):
+        self._invoke_dnf(["makecache"])
+
+    def _invoke_dnf(self, dnf_args, fatal = True, print_output = True ):
+        os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs
+
+        dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf")
+        standard_dnf_args = (["-v", "--rpmverbosity=debug"] if self.d.getVar('ROOTFS_RPM_DEBUG') else []) + ["-y",
+                             "-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"),
+                             "--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")),
+                             "--repofrompath=oe-repo,%s" % (self.rpm_repo_dir),
+                             "--installroot=%s" % (self.target_rootfs),
+                             "--setopt=logdir=%s" % (self.d.getVar('T'))
+                            ]
+        cmd = [dnf_cmd] + standard_dnf_args + dnf_args
+        try:
+            output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8")
+            if print_output:
+                bb.note(output)
+            return output
+        except subprocess.CalledProcessError as e:
+            if print_output:
+                (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
+                     "'%s' returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8")))
+            else:
+                (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command "
+                     "'%s' returned %d:" % (' '.join(cmd), e.returncode))
+            return e.output.decode("utf-8")
+
     def dump_install_solution(self, pkgs):
-        bb.note('creating new install solution for incremental install')
-        if len(pkgs) == 0:
-            return
+        open(self.solution_manifest, 'w').write(" ".join(pkgs))
+        return pkgs
 
-        pkgs = self._pkg_translate_oe_to_smart(pkgs, False)
-        install_pkgs = list()
-
-        cmd = "%s %s install -y --dump %s 2>%s" %  \
-              (self.smart_cmd,
-               self.smart_opt,
-               ' '.join(pkgs),
-               self.solution_manifest)
-        try:
-            # Disable rpmsys channel for the fake install
-            self._invoke_smart('channel --disable rpmsys')
-
-            subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
-            with open(self.solution_manifest, 'r') as manifest:
-                for pkg in manifest.read().split('\n'):
-                    if '@' in pkg:
-                        install_pkgs.append(pkg)
-        except subprocess.CalledProcessError as e:
-            bb.note("Unable to dump install packages. Command '%s' "
-                    "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-        # Recovery rpmsys channel
-        self._invoke_smart('channel --enable rpmsys')
-        return install_pkgs
-
-    '''
-    If incremental install, we need to determine what we've got,
-    what we need to add, and what to remove...
-    The load_old_install_solution will load the previous install
-    solution
-    '''
     def load_old_install_solution(self):
-        bb.note('load old install solution for incremental install')
-        installed_pkgs = list()
         if not os.path.exists(self.solution_manifest):
-            bb.note('old install solution not exist')
-            return installed_pkgs
+            return []
 
-        with open(self.solution_manifest, 'r') as manifest:
-            for pkg in manifest.read().split('\n'):
-                if '@' in pkg:
-                    installed_pkgs.append(pkg.strip())
+        return open(self.solution_manifest, 'r').read().split()
 
-        return installed_pkgs
-
-    '''
-    Dump all available packages in feeds, it should be invoked after the
-    newest rpm index was created
-    '''
-    def dump_all_available_pkgs(self):
-        available_manifest = self.d.expand('${T}/saved/available_pkgs.txt')
-        available_pkgs = list()
-        cmd = "%s %s query --output %s" %  \
-              (self.smart_cmd, self.smart_opt, available_manifest)
-        try:
-            subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
-            with open(available_manifest, 'r') as manifest:
-                for pkg in manifest.read().split('\n'):
-                    if '@' in pkg:
-                        available_pkgs.append(pkg.strip())
-        except subprocess.CalledProcessError as e:
-            bb.note("Unable to list all available packages. Command '%s' "
-                    "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
-        self.fullpkglist = available_pkgs
-
-        return
+    def _script_num_prefix(self, path):
+        files = os.listdir(path)
+        numbers = set()
+        numbers.add(99)
+        for f in files:
+            numbers.add(int(f.split("-")[0]))
+        return max(numbers) + 1
 
     def save_rpmpostinst(self, pkg):
-        mlibs = (self.d.getVar('MULTILIB_GLOBAL_VARIANTS', False) or "").split()
-
-        new_pkg = pkg
-        # Remove any multilib prefix from the package name
-        for mlib in mlibs:
-            if mlib in pkg:
-                new_pkg = pkg.replace(mlib + '-', '')
-                break
-
-        bb.note('  * postponing %s' % new_pkg)
-        saved_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') + new_pkg
-
-        cmd = self.rpm_cmd + ' -q --scripts --root ' + self.target_rootfs
-        cmd += ' --dbpath=/var/lib/rpm ' + new_pkg
-        cmd += ' | sed -n -e "/^postinstall scriptlet (using .*):$/,/^.* scriptlet (using .*):$/ {/.*/p}"'
-        cmd += ' | sed -e "/postinstall scriptlet (using \(.*\)):$/d"'
-        cmd += ' -e "/^.* scriptlet (using .*):$/d" > %s' % saved_dir
+        bb.note("Saving postinstall script of %s" % (pkg))
+        cmd = bb.utils.which(os.getenv('PATH'), "rpm")
+        args = ["-q", "--root=%s" % self.target_rootfs, "--queryformat", "%{postin}", pkg]
 
         try:
-            bb.note(cmd)
-            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip().decode("utf-8")
-            bb.note(output)
-            os.chmod(saved_dir, 0o755)
+            output = subprocess.check_output([cmd] + args,stderr=subprocess.STDOUT).decode("utf-8")
         except subprocess.CalledProcessError as e:
-            bb.fatal("Invoke save_rpmpostinst failed. Command '%s' "
-                     "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
+            bb.fatal("Could not invoke rpm. Command "
+                     "'%s' returned %d:\n%s" % (' '.join([cmd] + args), e.returncode, e.output.decode("utf-8")))
 
-    '''Write common configuration for target usage'''
-    def rpm_setup_smart_target_config(self):
-        bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
-                        True)
+        # may need to prepend #!/bin/sh to output
 
-        self._invoke_smart('config --set rpm-nolinktos=1')
-        self._invoke_smart('config --set rpm-noparentdirs=1')
-        for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split():
-            self._invoke_smart('flag --set ignore-recommends %s' % i)
-        self._invoke_smart('channel --add rpmsys type=rpm-sys -y')
+        target_path = oe.path.join(self.target_rootfs, self.d.expand('${sysconfdir}/rpm-postinsts/'))
+        bb.utils.mkdirhier(target_path)
+        num = self._script_num_prefix(target_path)
+        saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg))
+        open(saved_script_name, 'w').write(output)
+        os.chmod(saved_script_name, 0o755)
 
-    '''
-    The rpm db lock files were produced after invoking rpm to query on
-    build system, and they caused the rpm on target didn't work, so we
-    need to unlock the rpm db by removing the lock files.
-    '''
-    def unlock_rpm_db(self):
-        # Remove rpm db lock files
-        rpm_db_locks = glob.glob('%s/var/lib/rpm/__db.*' % self.target_rootfs)
-        for f in rpm_db_locks:
-            bb.utils.remove(f, True)
-
-    """
-    Returns a dictionary with the package info.
-    """
-    def package_info(self, pkg):
-        cmd = "%s %s info --urls %s" % (self.smart_cmd, self.smart_opt, pkg)
-        try:
-            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
-        except subprocess.CalledProcessError as e:
-            bb.fatal("Unable to list available packages. Command '%s' "
-                     "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8")))
-
-        # Set default values to avoid UnboundLocalError
-        arch = ""
-        ver = ""
-        filename = ""
-
-        #Parse output
-        for line in output.splitlines():
-            line = line.rstrip()
-            if line.startswith("Name:"):
-                pkg = line.split(": ")[1]
-            elif line.startswith("Version:"):
-                tmp_str = line.split(": ")[1]
-                ver, arch = tmp_str.split("@")
-                break
-
-        # Get filename
-        index = re.search("^URLs", output, re.MULTILINE)
-        tmp_str = output[index.end():]
-        for line in tmp_str.splitlines():
-            if "/" in line:
-                line = line.lstrip()
-                filename = line.split(" ")[0]
-                break
-
-        # To have the same data type than other package_info methods
-        filepath = os.path.join(self.deploy_dir, arch, filename)
-        pkg_dict = {}
-        pkg_dict[pkg] = {"arch":arch, "ver":ver, "filename":filename,
-                         "filepath": filepath}
-
-        return pkg_dict
-
-    """
-    Returns the path to a tmpdir where resides the contents of a package.
-
-    Deleting the tmpdir is responsability of the caller.
-
-    """
     def extract(self, pkg):
-        pkg_info = self.package_info(pkg)
-        if not pkg_info:
-            bb.fatal("Unable to get information for package '%s' while "
-                     "trying to extract the package."  % pkg)
-
-        pkg_path = pkg_info[pkg]["filepath"]
+        output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg])
+        pkg_name = output.splitlines()[-1]
+        if not pkg_name.endswith(".rpm"):
+            bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output))
+        pkg_path = oe.path.join(self.rpm_repo_dir, pkg_name)
 
         cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio")
         rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio")
@@ -1548,20 +825,24 @@
         tmp_dir = tempfile.mkdtemp()
         current_dir = os.getcwd()
         os.chdir(tmp_dir)
+        if self.d.getVar('IMAGE_PKGTYPE') == 'deb':
+            data_tar = 'data.tar.xz'
+        else:
+            data_tar = 'data.tar.gz'
 
         try:
-            cmd = "%s x %s" % (ar_cmd, pkg_path)
-            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
-            cmd = "%s xf data.tar.*" % tar_cmd
-            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+            cmd = [ar_cmd, 'x', pkg_path]
+            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+            cmd = [tar_cmd, 'xf', data_tar]
+            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
         except subprocess.CalledProcessError as e:
             bb.utils.remove(tmp_dir, recurse=True)
             bb.fatal("Unable to extract %s package. Command '%s' "
-                     "returned %d:\n%s" % (pkg_path, cmd, e.returncode, e.output.decode("utf-8")))
+                     "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8")))
         except OSError as e:
             bb.utils.remove(tmp_dir, recurse=True)
             bb.fatal("Unable to extract %s package. Command '%s' "
-                     "returned %d:\n%s at %s" % (pkg_path, cmd, e.errno, e.strerror, e.filename))
+                     "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename))
 
         bb.note("Extracted %s to %s" % (pkg_path, tmp_dir))
         bb.utils.remove(os.path.join(tmp_dir, "debian-binary"))
@@ -1580,13 +861,13 @@
         self.pkg_archs = archs
         self.task_name = task_name
 
-        self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK", True)
+        self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK")
         self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
         self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
         self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs)
-        self.opkg_args += self.d.getVar("OPKG_ARGS", True)
+        self.opkg_args += self.d.getVar("OPKG_ARGS")
 
-        opkg_lib_dir = self.d.getVar('OPKGLIBDIR', True)
+        opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
         if opkg_lib_dir[0] == "/":
             opkg_lib_dir = opkg_lib_dir[1:]
 
@@ -1598,7 +879,7 @@
         if not os.path.exists(self.d.expand('${T}/saved')):
             bb.utils.mkdirhier(self.d.expand('${T}/saved'))
 
-        self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") == "1"
+        self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") == "1"
         if self.from_feeds:
             self._create_custom_config()
         else:
@@ -1643,7 +924,7 @@
                 config_file.write("arch %s %d\n" % (arch, priority))
                 priority += 5
 
-            for line in (self.d.getVar('IPK_FEED_URIS', True) or "").split():
+            for line in (self.d.getVar('IPK_FEED_URIS') or "").split():
                 feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
 
                 if feed_match is not None:
@@ -1660,29 +941,29 @@
             specified as compatible for the current machine.
             NOTE: Development-helper feature, NOT a full-fledged feed.
             """
-            if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True) or "") != "":
+            if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "":
                 for arch in self.pkg_archs.split():
                     cfg_file_name = os.path.join(self.target_rootfs,
-                                                 self.d.getVar("sysconfdir", True),
+                                                 self.d.getVar("sysconfdir"),
                                                  "opkg",
                                                  "local-%s-feed.conf" % arch)
 
                     with open(cfg_file_name, "w+") as cfg_file:
                         cfg_file.write("src/gz local-%s %s/%s" %
                                        (arch,
-                                        self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True),
+                                        self.d.getVar('FEED_DEPLOYDIR_BASE_URI'),
                                         arch))
 
-                        if self.d.getVar('OPKGLIBDIR', True) != '/var/lib':
+                        if self.d.getVar('OPKGLIBDIR') != '/var/lib':
                             # There is no command line option for this anymore, we need to add
                             # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
                             # the default value of "/var/lib" as defined in opkg:
                             # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR     VARDIR "/lib/opkg/lists"
                             # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR      VARDIR "/lib/opkg/info"
                             # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE   VARDIR "/lib/opkg/status"
-                            cfg_file.write("option info_dir     %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'info'))
-                            cfg_file.write("option lists_dir    %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'lists'))
-                            cfg_file.write("option status_file  %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'status'))
+                            cfg_file.write("option info_dir     %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
+                            cfg_file.write("option lists_dir    %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
+                            cfg_file.write("option status_file  %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
 
 
     def _create_config(self):
@@ -1700,33 +981,33 @@
                     config_file.write("src oe-%s file:%s\n" %
                                       (arch, pkgs_dir))
 
-            if self.d.getVar('OPKGLIBDIR', True) != '/var/lib':
+            if self.d.getVar('OPKGLIBDIR') != '/var/lib':
                 # There is no command line option for this anymore, we need to add
                 # info_dir and status_file to config file, if OPKGLIBDIR doesn't have
                 # the default value of "/var/lib" as defined in opkg:
                 # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR     VARDIR "/lib/opkg/lists"
                 # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR      VARDIR "/lib/opkg/info"
                 # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE   VARDIR "/lib/opkg/status"
-                config_file.write("option info_dir     %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'info'))
-                config_file.write("option lists_dir    %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'lists'))
-                config_file.write("option status_file  %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR', True), 'opkg', 'status'))
+                config_file.write("option info_dir     %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info'))
+                config_file.write("option lists_dir    %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists'))
+                config_file.write("option status_file  %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status'))
 
-    def insert_feeds_uris(self):
-        if self.feed_uris == "":
+    def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+        if feed_uris == "":
             return
 
         rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
                                   % self.target_rootfs)
 
-        feed_uris = self.construct_uris(self.feed_uris.split(), self.feed_base_paths.split())
-        archs = self.pkg_archs.split() if self.feed_archs is None else self.feed_archs.split()
+        feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
+        archs = self.pkg_archs.split() if feed_archs is None else feed_archs.split()
 
         with open(rootfs_config, "w+") as config_file:
             uri_iterator = 0
             for uri in feed_uris:
                 if archs:
                     for arch in archs:
-                        if (self.feed_archs is None) and (not os.path.exists(os.path.join(self.deploy_dir, arch))):
+                        if (feed_archs is None) and (not os.path.exists(oe.path.join(self.deploy_dir, arch))):
                             continue
                         bb.note('Adding opkg feed url-%s-%d (%s)' %
                             (arch, uri_iterator, uri))
@@ -1764,9 +1045,9 @@
         os.environ['OFFLINE_ROOT'] = self.target_rootfs
         os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
         os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
-        os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True),
+        os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR'),
                                                    "intercept_scripts")
-        os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True)
+        os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
 
         try:
             bb.note("Installing the following packages: %s" % ' '.join(pkgs))
@@ -1817,7 +1098,7 @@
         return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list_pkgs()
 
     def handle_bad_recommendations(self):
-        bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS", True) or ""
+        bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS") or ""
         if bad_recommendations.strip() == "":
             return
 
@@ -1871,7 +1152,7 @@
         bb.utils.mkdirhier(temp_opkg_dir)
 
         opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
-        opkg_args += self.d.getVar("OPKG_ARGS", True)
+        opkg_args += self.d.getVar("OPKG_ARGS")
 
         cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
         try:
@@ -1947,7 +1228,7 @@
     def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None):
         super(DpkgPM, self).__init__(d)
         self.target_rootfs = target_rootfs
-        self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB', True)
+        self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB')
         if apt_conf_dir is None:
             self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
         else:
@@ -1956,10 +1237,10 @@
         self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
         self.apt_cache_cmd = bb.utils.which(os.getenv('PATH'), "apt-cache")
 
-        self.apt_args = d.getVar("APT_ARGS", True)
+        self.apt_args = d.getVar("APT_ARGS")
 
         self.all_arch_list = archs.split()
-        all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").split()
+        all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split()
         self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list)
 
         self._create_configs(archs, base_archs)
@@ -2000,7 +1281,10 @@
     """
     def run_pre_post_installs(self, package_name=None):
         info_dir = self.target_rootfs + "/var/lib/dpkg/info"
-        suffixes = [(".preinst", "Preinstall"), (".postinst", "Postinstall")]
+        ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"])
+        control_scripts = [
+                ControlScript(".preinst", "Preinstall", "install"),
+                ControlScript(".postinst", "Postinstall", "configure")]
         status_file = self.target_rootfs + "/var/lib/dpkg/status"
         installed_pkgs = []
 
@@ -2017,22 +1301,25 @@
         os.environ['OFFLINE_ROOT'] = self.target_rootfs
         os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
         os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
-        os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True),
+        os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR'),
                                                    "intercept_scripts")
-        os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True)
+        os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE')
 
         failed_pkgs = []
         for pkg_name in installed_pkgs:
-            for suffix in suffixes:
-                p_full = os.path.join(info_dir, pkg_name + suffix[0])
+            for control_script in control_scripts:
+                p_full = os.path.join(info_dir, pkg_name + control_script.suffix)
                 if os.path.exists(p_full):
                     try:
                         bb.note("Executing %s for package: %s ..." %
-                                 (suffix[1].lower(), pkg_name))
-                        subprocess.check_output(p_full, stderr=subprocess.STDOUT)
+                                 (control_script.name.lower(), pkg_name))
+                        output = subprocess.check_output([p_full, control_script.argument],
+                                stderr=subprocess.STDOUT).decode("utf-8")
+                        bb.note(output)
                     except subprocess.CalledProcessError as e:
                         bb.note("%s for package %s failed with %d:\n%s" %
-                                (suffix[1], pkg_name, e.returncode, e.output.decode("utf-8")))
+                                (control_script.name, pkg_name, e.returncode,
+                                    e.output.decode("utf-8")))
                         failed_pkgs.append(pkg_name)
                         break
 
@@ -2112,23 +1399,23 @@
         if result is not None:
             bb.fatal(result)
 
-    def insert_feeds_uris(self):
-        if self.feed_uris == "":
+    def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs):
+        if feed_uris == "":
             return
 
         sources_conf = os.path.join("%s/etc/apt/sources.list"
                                     % self.target_rootfs)
         arch_list = []
 
-        if self.feed_archs is None:
+        if feed_archs is None:
             for arch in self.all_arch_list:
                 if not os.path.exists(os.path.join(self.deploy_dir, arch)):
                     continue
                 arch_list.append(arch)
         else:
-            arch_list = self.feed_archs.split()
+            arch_list = feed_archs.split()
 
-        feed_uris = self.construct_uris(self.feed_uris.split(), self.feed_base_paths.split())
+        feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split())
 
         with open(sources_conf, "w+") as sources_file:
             for uri in feed_uris:
@@ -2168,7 +1455,7 @@
 
                 priority += 5
 
-            pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or ""
+            pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE') or ""
             for pkg in pkg_exclude.split():
                 prefs_file.write(
                     "Package: %s\n"
@@ -2183,14 +1470,13 @@
                                    os.path.join(self.deploy_dir, arch))
 
         base_arch_list = base_archs.split()
-        multilib_variants = self.d.getVar("MULTILIB_VARIANTS", True);
+        multilib_variants = self.d.getVar("MULTILIB_VARIANTS");
         for variant in multilib_variants.split():
             localdata = bb.data.createCopy(self.d)
             variant_tune = localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + variant, False)
-            orig_arch = localdata.getVar("DPKG_ARCH", True)
+            orig_arch = localdata.getVar("DPKG_ARCH")
             localdata.setVar("DEFAULTTUNE", variant_tune)
-            bb.data.update_data(localdata)
-            variant_arch = localdata.getVar("DPKG_ARCH", True)
+            variant_arch = localdata.getVar("DPKG_ARCH")
             if variant_arch not in base_arch_list:
                 base_arch_list.append(variant_arch)
 
@@ -2221,7 +1507,7 @@
 
     def remove_packaging_data(self):
         bb.utils.remove(os.path.join(self.target_rootfs,
-                                     self.d.getVar('opkglibdir', True)), True)
+                                     self.d.getVar('opkglibdir')), True)
         bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
 
     def fix_broken_dependencies(self):
@@ -2269,12 +1555,12 @@
         return tmp_dir
 
 def generate_index_files(d):
-    classes = d.getVar('PACKAGE_CLASSES', True).replace("package_", "").split()
+    classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split()
 
     indexer_map = {
-        "rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM', True)),
-        "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK', True)),
-        "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB', True))
+        "rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM')),
+        "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')),
+        "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB'))
     }
 
     result = None
diff --git a/import-layers/yocto-poky/meta/lib/oe/packagedata.py b/import-layers/yocto-poky/meta/lib/oe/packagedata.py
index 21d4de9..32e5c82 100644
--- a/import-layers/yocto-poky/meta/lib/oe/packagedata.py
+++ b/import-layers/yocto-poky/meta/lib/oe/packagedata.py
@@ -57,7 +57,7 @@
 def _pkgmap(d):
     """Return a dictionary mapping package to recipe name."""
 
-    pkgdatadir = d.getVar("PKGDATA_DIR", True)
+    pkgdatadir = d.getVar("PKGDATA_DIR")
 
     pkgmap = {}
     try:
diff --git a/import-layers/yocto-poky/meta/lib/oe/packagegroup.py b/import-layers/yocto-poky/meta/lib/oe/packagegroup.py
index 9781927..4bc5d3e 100644
--- a/import-layers/yocto-poky/meta/lib/oe/packagegroup.py
+++ b/import-layers/yocto-poky/meta/lib/oe/packagegroup.py
@@ -1,17 +1,17 @@
 import itertools
 
 def is_optional(feature, d):
-    packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True)
+    packages = d.getVar("FEATURE_PACKAGES_%s" % feature)
     if packages:
-        return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional", True))
+        return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional"))
     else:
-        return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional", True))
+        return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional"))
 
 def packages(features, d):
     for feature in features:
-        packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True)
+        packages = d.getVar("FEATURE_PACKAGES_%s" % feature)
         if not packages:
-            packages = d.getVar("PACKAGE_GROUP_%s" % feature, True)
+            packages = d.getVar("PACKAGE_GROUP_%s" % feature)
         for pkg in (packages or "").split():
             yield pkg
 
diff --git a/import-layers/yocto-poky/meta/lib/oe/patch.py b/import-layers/yocto-poky/meta/lib/oe/patch.py
index 0332f10..f1ab3dd 100644
--- a/import-layers/yocto-poky/meta/lib/oe/patch.py
+++ b/import-layers/yocto-poky/meta/lib/oe/patch.py
@@ -81,7 +81,7 @@
                 patch[param] = PatchSet.defaults[param]
 
         if patch.get("remote"):
-            patch["file"] = bb.data.expand(bb.fetch2.localpath(patch["remote"], self.d), self.d)
+            patch["file"] = self.d.expand(bb.fetch2.localpath(patch["remote"], self.d))
 
         patch["filemd5"] = bb.utils.md5_file(patch["file"])
 
@@ -281,8 +281,8 @@
 
     def __init__(self, dir, d):
         PatchTree.__init__(self, dir, d)
-        self.commituser = d.getVar('PATCH_GIT_USER_NAME', True)
-        self.commitemail = d.getVar('PATCH_GIT_USER_EMAIL', True)
+        self.commituser = d.getVar('PATCH_GIT_USER_NAME')
+        self.commitemail = d.getVar('PATCH_GIT_USER_EMAIL')
 
     @staticmethod
     def extractPatchHeader(patchfile):
@@ -371,8 +371,8 @@
     @staticmethod
     def gitCommandUserOptions(cmd, commituser=None, commitemail=None, d=None):
         if d:
-            commituser = d.getVar('PATCH_GIT_USER_NAME', True)
-            commitemail = d.getVar('PATCH_GIT_USER_EMAIL', True)
+            commituser = d.getVar('PATCH_GIT_USER_NAME')
+            commitemail = d.getVar('PATCH_GIT_USER_EMAIL')
         if commituser:
             cmd += ['-c', 'user.name="%s"' % commituser]
         if commitemail:
@@ -428,6 +428,7 @@
     def extractPatches(tree, startcommit, outdir, paths=None):
         import tempfile
         import shutil
+        import re
         tempdir = tempfile.mkdtemp(prefix='oepatch')
         try:
             shellcmd = ["git", "format-patch", startcommit, "-o", tempdir]
@@ -443,10 +444,13 @@
                         try:
                             with open(srcfile, 'r', encoding=encoding) as f:
                                 for line in f:
-                                    if line.startswith(GitApplyTree.patch_line_prefix):
+                                    checkline = line
+                                    if checkline.startswith('Subject: '):
+                                        checkline = re.sub(r'\[.+?\]\s*', '', checkline[9:])
+                                    if checkline.startswith(GitApplyTree.patch_line_prefix):
                                         outfile = line.split()[-1].strip()
                                         continue
-                                    if line.startswith(GitApplyTree.ignore_commit_prefix):
+                                    if checkline.startswith(GitApplyTree.ignore_commit_prefix):
                                         continue
                                     patchlines.append(line)
                         except UnicodeDecodeError:
@@ -547,7 +551,7 @@
 
 class QuiltTree(PatchSet):
     def _runcmd(self, args, run = True):
-        quiltrc = self.d.getVar('QUILTRCFILE', True)
+        quiltrc = self.d.getVar('QUILTRCFILE')
         if not run:
             return ["quilt"] + ["--quiltrc"] + [quiltrc] + args
         runcmd(["quilt"] + ["--quiltrc"] + [quiltrc] + args, self.dir)
@@ -723,7 +727,7 @@
             # Patch application failed
             patchcmd = self.patchset.Push(True, False, False)
 
-            t = self.patchset.d.getVar('T', True)
+            t = self.patchset.d.getVar('T')
             if not t:
                 bb.msg.fatal("Build", "T not set")
             bb.utils.mkdirhier(t)
@@ -765,3 +769,110 @@
             os.chdir(olddir)
             raise
         os.chdir(olddir)
+
+
+def patch_path(url, fetch, workdir, expand=True):
+    """Return the local path of a patch, or None if this isn't a patch"""
+
+    local = fetch.localpath(url)
+    base, ext = os.path.splitext(os.path.basename(local))
+    if ext in ('.gz', '.bz2', '.xz', '.Z'):
+        if expand:
+            local = os.path.join(workdir, base)
+        ext = os.path.splitext(base)[1]
+
+    urldata = fetch.ud[url]
+    if "apply" in urldata.parm:
+        apply = oe.types.boolean(urldata.parm["apply"])
+        if not apply:
+            return
+    elif ext not in (".diff", ".patch"):
+        return
+
+    return local
+
+def src_patches(d, all=False, expand=True):
+    workdir = d.getVar('WORKDIR')
+    fetch = bb.fetch2.Fetch([], d)
+    patches = []
+    sources = []
+    for url in fetch.urls:
+        local = patch_path(url, fetch, workdir, expand)
+        if not local:
+            if all:
+                local = fetch.localpath(url)
+                sources.append(local)
+            continue
+
+        urldata = fetch.ud[url]
+        parm = urldata.parm
+        patchname = parm.get('pname') or os.path.basename(local)
+
+        apply, reason = should_apply(parm, d)
+        if not apply:
+            if reason:
+                bb.note("Patch %s %s" % (patchname, reason))
+            continue
+
+        patchparm = {'patchname': patchname}
+        if "striplevel" in parm:
+            striplevel = parm["striplevel"]
+        elif "pnum" in parm:
+            #bb.msg.warn(None, "Deprecated usage of 'pnum' url parameter in '%s', please use 'striplevel'" % url)
+            striplevel = parm["pnum"]
+        else:
+            striplevel = '1'
+        patchparm['striplevel'] = striplevel
+
+        patchdir = parm.get('patchdir')
+        if patchdir:
+            patchparm['patchdir'] = patchdir
+
+        localurl = bb.fetch.encodeurl(('file', '', local, '', '', patchparm))
+        patches.append(localurl)
+
+    if all:
+        return sources
+
+    return patches
+
+
+def should_apply(parm, d):
+    if "mindate" in parm or "maxdate" in parm:
+        pn = d.getVar('PN')
+        srcdate = d.getVar('SRCDATE_%s' % pn)
+        if not srcdate:
+            srcdate = d.getVar('SRCDATE')
+
+        if srcdate == "now":
+            srcdate = d.getVar('DATE')
+
+        if "maxdate" in parm and parm["maxdate"] < srcdate:
+            return False, 'is outdated'
+
+        if "mindate" in parm and parm["mindate"] > srcdate:
+            return False, 'is predated'
+
+
+    if "minrev" in parm:
+        srcrev = d.getVar('SRCREV')
+        if srcrev and srcrev < parm["minrev"]:
+            return False, 'applies to later revisions'
+
+    if "maxrev" in parm:
+        srcrev = d.getVar('SRCREV')
+        if srcrev and srcrev > parm["maxrev"]:
+            return False, 'applies to earlier revisions'
+
+    if "rev" in parm:
+        srcrev = d.getVar('SRCREV')
+        if srcrev and parm["rev"] not in srcrev:
+            return False, "doesn't apply to revision"
+
+    if "notrev" in parm:
+        srcrev = d.getVar('SRCREV')
+        if srcrev and parm["notrev"] in srcrev:
+            return False, "doesn't apply to revision"
+
+    return True, None
+
diff --git a/import-layers/yocto-poky/meta/lib/oe/path.py b/import-layers/yocto-poky/meta/lib/oe/path.py
index ed7fd1e..448a2b9 100644
--- a/import-layers/yocto-poky/meta/lib/oe/path.py
+++ b/import-layers/yocto-poky/meta/lib/oe/path.py
@@ -50,9 +50,30 @@
     os.remove(path)
     os.symlink(base, path)
 
+def replace_absolute_symlinks(basedir, d):
+    """
+    Walk basedir looking for absolute symlinks and replacing them with relative ones.
+    The absolute links are assumed to be relative to basedir
+    (compared to make_relative_symlink above which tries to compute common ancestors
+    using pattern matching instead)
+    """
+    for walkroot, dirs, files in os.walk(basedir):
+        for file in files + dirs:
+            path = os.path.join(walkroot, file)
+            if not os.path.islink(path):
+                continue
+            link = os.readlink(path)
+            if not os.path.isabs(link):
+                continue
+            walkdir = os.path.dirname(path.rpartition(basedir)[2])
+            base = os.path.relpath(link, walkdir)
+            bb.debug(2, "Replacing absolute path %s with relative path %s" % (link, base))
+            os.remove(path)
+            os.symlink(base, path)
+
 def format_display(path, metadata):
     """ Prepare a path for display to the user. """
-    rel = relative(metadata.getVar("TOPDIR", True), path)
+    rel = relative(metadata.getVar("TOPDIR"), path)
     if len(rel) > len(path):
         return path
     else:
@@ -81,7 +102,6 @@
         subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
         source = ''
         if os.path.isdir(src):
-            import glob
             if len(glob.glob('%s/.??*' % src)) > 0:
                 source = './.??* '
             source += './*'
@@ -95,7 +115,14 @@
         copytree(src, dst)
 
 def remove(path, recurse=True):
-    """Equivalent to rm -f or rm -rf"""
+    """
+    Equivalent to rm -f or rm -rf
+    NOTE: be careful about passing paths that may contain filenames with
+    wildcards in them (as opposed to passing an actual wildcarded path) -
+    since we use glob.glob() to expand the path. Filenames containing
+    square brackets are particularly problematic since the they may not
+    actually expand to match the original filename.
+    """
     for name in glob.glob(path):
         try:
             os.unlink(name)
diff --git a/import-layers/yocto-poky/meta/lib/oe/prservice.py b/import-layers/yocto-poky/meta/lib/oe/prservice.py
index 0054f95..32dfc15 100644
--- a/import-layers/yocto-poky/meta/lib/oe/prservice.py
+++ b/import-layers/yocto-poky/meta/lib/oe/prservice.py
@@ -1,7 +1,7 @@
 
 def prserv_make_conn(d, check = False):
     import prserv.serv
-    host_params = list([_f for _f in (d.getVar("PRSERV_HOST", True) or '').split(':') if _f])
+    host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
     try:
         conn = None
         conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1]))
@@ -15,11 +15,11 @@
     return conn
 
 def prserv_dump_db(d):
-    if not d.getVar('PRSERV_HOST', True):
+    if not d.getVar('PRSERV_HOST'):
         bb.error("Not using network based PR service")
         return None
 
-    conn = d.getVar("__PRSERV_CONN", True)
+    conn = d.getVar("__PRSERV_CONN")
     if conn is None:
         conn = prserv_make_conn(d)
         if conn is None:
@@ -27,18 +27,18 @@
             return None
 
     #dump db
-    opt_version = d.getVar('PRSERV_DUMPOPT_VERSION', True)
-    opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH', True)
-    opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM', True)
-    opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL', True))
+    opt_version = d.getVar('PRSERV_DUMPOPT_VERSION')
+    opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH')
+    opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM')
+    opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL'))
     return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
 
 def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None):
-    if not d.getVar('PRSERV_HOST', True):
+    if not d.getVar('PRSERV_HOST'):
         bb.error("Not using network based PR service")
         return None
 
-    conn = d.getVar("__PRSERV_CONN", True)
+    conn = d.getVar("__PRSERV_CONN")
     if conn is None:
         conn = prserv_make_conn(d)
         if conn is None:
@@ -58,7 +58,7 @@
                (filter_checksum and filter_checksum != checksum):
                continue
             try:
-                value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum, True))
+                value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum))
             except BaseException as exc:
                 bb.debug("Not valid value of %s:%s" % (v,str(exc)))
                 continue
@@ -72,8 +72,8 @@
 def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
     import bb.utils
     #initilize the output file
-    bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR', True))
-    df = d.getVar('PRSERV_DUMPFILE', True)
+    bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR'))
+    df = d.getVar('PRSERV_DUMPFILE')
     #write data
     lf = bb.utils.lockfile("%s.lock" % df)
     f = open(df, "a")
@@ -114,7 +114,7 @@
     bb.utils.unlockfile(lf)
 
 def prserv_check_avail(d):
-    host_params = list([_f for _f in (d.getVar("PRSERV_HOST", True) or '').split(':') if _f])
+    host_params = list([_f for _f in (d.getVar("PRSERV_HOST") or '').split(':') if _f])
     try:
         if len(host_params) != 2:
             raise TypeError
diff --git a/import-layers/yocto-poky/meta/lib/oe/qa.py b/import-layers/yocto-poky/meta/lib/oe/qa.py
index 22d76dc..3231e60 100644
--- a/import-layers/yocto-poky/meta/lib/oe/qa.py
+++ b/import-layers/yocto-poky/meta/lib/oe/qa.py
@@ -129,11 +129,11 @@
         if cmd in self.objdump_output:
             return self.objdump_output[cmd]
 
-        objdump = d.getVar('OBJDUMP', True)
+        objdump = d.getVar('OBJDUMP')
 
         env = os.environ.copy()
         env["LC_ALL"] = "C"
-        env["PATH"] = d.getVar('PATH', True)
+        env["PATH"] = d.getVar('PATH')
 
         try:
             bb.note("%s %s %s" % (objdump, cmd, self.name))
diff --git a/import-layers/yocto-poky/meta/lib/oe/recipeutils.py b/import-layers/yocto-poky/meta/lib/oe/recipeutils.py
index 58e4028..a7fdd36 100644
--- a/import-layers/yocto-poky/meta/lib/oe/recipeutils.py
+++ b/import-layers/yocto-poky/meta/lib/oe/recipeutils.py
@@ -29,18 +29,9 @@
 
 def pn_to_recipe(cooker, pn, mc=''):
     """Convert a recipe name (PN) to the path to the recipe file"""
-    import bb.providers
 
-    if pn in cooker.recipecaches[mc].pkg_pn:
-        best = bb.providers.findBestProvider(pn, cooker.data, cooker.recipecaches[mc], cooker.recipecaches[mc].pkg_pn)
-        return best[3]
-    elif pn in cooker.recipecaches[mc].providers:
-        filenames = cooker.recipecaches[mc].providers[pn]
-        eligible, foundUnique = bb.providers.filterProviders(filenames, pn, cooker.expanded_data, cooker.recipecaches[mc])
-        filename = eligible[0]
-        return filename
-    else:
-        return None
+    best = cooker.findBestProvider(pn, mc)
+    return best[3]
 
 
 def get_unavailable_reasons(cooker, pn):
@@ -61,28 +52,6 @@
     return envdata
 
 
-def parse_recipe_simple(cooker, pn, d, appends=True):
-    """
-    Parse a recipe and optionally all bbappends that apply to it
-    in the current configuration.
-    """
-    import bb.providers
-
-    recipefile = pn_to_recipe(cooker, pn)
-    if not recipefile:
-        skipreasons = get_unavailable_reasons(cooker, pn)
-        # We may as well re-use bb.providers.NoProvider here
-        if skipreasons:
-            raise bb.providers.NoProvider(skipreasons)
-        else:
-            raise bb.providers.NoProvider('Unable to find any recipe file matching %s' % pn)
-    if appends:
-        appendfiles = cooker.collection.get_file_appends(recipefile)
-    else:
-        appendfiles = None
-    return parse_recipe(cooker, recipefile, appendfiles)
-
-
 def get_var_files(fn, varlist, d):
     """Find the file in which each of a list of variables is set.
     Note: requires variable history to be enabled when parsing.
@@ -359,16 +328,16 @@
 
     # FIXME need a warning if the unexpanded SRC_URI value contains variable references
 
-    uris = (d.getVar('SRC_URI', True) or "").split()
+    uris = (d.getVar('SRC_URI') or "").split()
     fetch = bb.fetch2.Fetch(uris, d)
     if download:
         fetch.download()
 
     # Copy local files to target directory and gather any remote files
-    bb_dir = os.path.dirname(d.getVar('FILE', True)) + os.sep
+    bb_dir = os.path.dirname(d.getVar('FILE')) + os.sep
     remotes = []
     copied = []
-    includes = [path for path in d.getVar('BBINCLUDED', True).split() if
+    includes = [path for path in d.getVar('BBINCLUDED').split() if
                 path.startswith(bb_dir) and os.path.exists(path)]
     for path in fetch.localpaths() + includes:
         # Only import files that are under the meta directory
@@ -389,15 +358,21 @@
     return copied, remotes
 
 
-def get_recipe_local_files(d, patches=False):
+def get_recipe_local_files(d, patches=False, archives=False):
     """Get a list of local files in SRC_URI within a recipe."""
-    uris = (d.getVar('SRC_URI', True) or "").split()
+    import oe.patch
+    uris = (d.getVar('SRC_URI') or "").split()
     fetch = bb.fetch2.Fetch(uris, d)
+    # FIXME this list should be factored out somewhere else (such as the
+    # fetcher) though note that this only encompasses actual container formats
+    # i.e. that can contain multiple files as opposed to those that only
+    # contain a compressed stream (i.e. .tar.gz as opposed to just .gz)
+    archive_exts = ['.tar', '.tgz', '.tar.gz', '.tar.Z', '.tbz', '.tbz2', '.tar.bz2', '.tar.xz', '.tar.lz', '.zip', '.jar', '.rpm', '.srpm', '.deb', '.ipk', '.tar.7z', '.7z']
     ret = {}
     for uri in uris:
         if fetch.ud[uri].type == 'file':
             if (not patches and
-                    bb.utils.exec_flat_python_func('patch_path', uri, fetch, '')):
+                    oe.patch.patch_path(uri, fetch, '', expand=False)):
                 continue
             # Skip files that are referenced by absolute path
             fname = fetch.ud[uri].basepath
@@ -409,16 +384,22 @@
                 if os.path.isabs(subdir):
                     continue
                 fname = os.path.join(subdir, fname)
-            ret[fname] = fetch.localpath(uri)
+            localpath = fetch.localpath(uri)
+            if not archives:
+                # Ignore archives that will be unpacked
+                if localpath.endswith(tuple(archive_exts)):
+                    unpack = fetch.ud[uri].parm.get('unpack', True)
+                    if unpack:
+                        continue
+            ret[fname] = localpath
     return ret
 
 
 def get_recipe_patches(d):
     """Get a list of the patches included in SRC_URI within a recipe."""
+    import oe.patch
+    patches = oe.patch.src_patches(d, expand=False)
     patchfiles = []
-    # Execute src_patches() defined in patch.bbclass - this works since that class
-    # is inherited globally
-    patches = bb.utils.exec_flat_python_func('src_patches', d)
     for patch in patches:
         _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
         patchfiles.append(local)
@@ -435,14 +416,12 @@
         change mode ('A' for add, 'D' for delete or 'M' for modify)
     """
     import oe.patch
-    # Execute src_patches() defined in patch.bbclass - this works since that class
-    # is inherited globally
-    patches = bb.utils.exec_flat_python_func('src_patches', d)
+    patches = oe.patch.src_patches(d, expand=False)
     patchedfiles = {}
     for patch in patches:
         _, _, patchfile, _, _, parm = bb.fetch.decodeurl(patch)
         striplevel = int(parm['striplevel'])
-        patchedfiles[patchfile] = oe.patch.PatchSet.getPatchedFiles(patchfile, striplevel, os.path.join(d.getVar('S', True), parm.get('patchdir', '')))
+        patchedfiles[patchfile] = oe.patch.PatchSet.getPatchedFiles(patchfile, striplevel, os.path.join(d.getVar('S'), parm.get('patchdir', '')))
     return patchedfiles
 
 
@@ -480,9 +459,9 @@
     confdata.setVar('LAYERDIR', destlayerdir)
     destlayerconf = os.path.join(destlayerdir, "conf", "layer.conf")
     confdata = bb.cookerdata.parse_config_file(destlayerconf, confdata)
-    pn = d.getVar('PN', True)
+    pn = d.getVar('PN')
 
-    bbfilespecs = (confdata.getVar('BBFILES', True) or '').split()
+    bbfilespecs = (confdata.getVar('BBFILES') or '').split()
     if destdir == destlayerdir:
         for bbfilespec in bbfilespecs:
             if not bbfilespec.endswith('.bbappend'):
@@ -495,8 +474,8 @@
 
     # Try to make up a path that matches BBFILES
     # this is a little crude, but better than nothing
-    bpn = d.getVar('BPN', True)
-    recipefn = os.path.basename(d.getVar('FILE', True))
+    bpn = d.getVar('BPN')
+    recipefn = os.path.basename(d.getVar('FILE'))
     pathoptions = [destdir]
     if extrapathhint:
         pathoptions.append(os.path.join(destdir, extrapathhint))
@@ -520,7 +499,7 @@
     import bb.cookerdata
 
     destlayerdir = os.path.abspath(destlayerdir)
-    recipefile = d.getVar('FILE', True)
+    recipefile = d.getVar('FILE')
     recipefn = os.path.splitext(os.path.basename(recipefile))[0]
     if wildcardver and '_' in recipefn:
         recipefn = recipefn.split('_', 1)[0] + '_%'
@@ -540,7 +519,7 @@
     appendpath = os.path.join(destlayerdir, os.path.relpath(os.path.dirname(recipefile), origlayerdir), appendfn)
     closepath = ''
     pathok = True
-    for bbfilespec in confdata.getVar('BBFILES', True).split():
+    for bbfilespec in confdata.getVar('BBFILES').split():
         if fnmatch.fnmatchcase(appendpath, bbfilespec):
             # Our append path works, we're done
             break
@@ -613,7 +592,7 @@
 
     # FIXME check if the bbappend doesn't get overridden by a higher priority layer?
 
-    layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS', True).split()]
+    layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS').split()]
     if not os.path.abspath(destlayerdir) in layerdirs:
         bb.warn('Specified layer is not currently enabled in bblayers.conf, you will need to add it before this bbappend will be active')
 
@@ -649,7 +628,7 @@
         else:
             bbappendlines.append((varname, op, value))
 
-    destsubdir = rd.getVar('PN', True)
+    destsubdir = rd.getVar('PN')
     if srcfiles:
         bbappendlines.append(('FILESEXTRAPATHS_prepend', ':=', '${THISDIR}/${PN}:'))
 
@@ -668,7 +647,7 @@
                 srcurientry = 'file://%s' % srcfile
                 # Double-check it's not there already
                 # FIXME do we care if the entry is added by another bbappend that might go away?
-                if not srcurientry in rd.getVar('SRC_URI', True).split():
+                if not srcurientry in rd.getVar('SRC_URI').split():
                     if machine:
                         appendline('SRC_URI_append%s' % appendoverride, '=', ' ' + srcurientry)
                     else:
@@ -786,7 +765,11 @@
         for newfile, srcfile in copyfiles.items():
             filedest = os.path.join(appenddir, destsubdir, os.path.basename(srcfile))
             if os.path.abspath(newfile) != os.path.abspath(filedest):
-                bb.note('Copying %s to %s' % (newfile, filedest))
+                if newfile.startswith(tempfile.gettempdir()):
+                    newfiledisp = os.path.basename(newfile)
+                else:
+                    newfiledisp = newfile
+                bb.note('Copying %s to %s' % (newfiledisp, filedest))
                 bb.utils.mkdirhier(os.path.dirname(filedest))
                 shutil.copyfile(newfile, filedest)
 
@@ -813,7 +796,7 @@
     # Sort by length so we get the variables we're interested in first
     for var in sorted(list(d.keys()), key=len):
         if var.endswith('dir') and var.lower() == var:
-            value = d.getVar(var, True)
+            value = d.getVar(var)
             if value.startswith('/') and not '\n' in value and value not in dirvars:
                 dirvars[value] = var
     for dirpath in sorted(list(dirvars.keys()), reverse=True):
@@ -867,12 +850,12 @@
     ru['type'] = 'U'
     ru['datetime'] = ''
 
-    pv = rd.getVar('PV', True)
+    pv = rd.getVar('PV')
 
     # XXX: If don't have SRC_URI means that don't have upstream sources so
     # returns the current recipe version, so that upstream version check
     # declares a match.
-    src_uris = rd.getVar('SRC_URI', True)
+    src_uris = rd.getVar('SRC_URI')
     if not src_uris:
         ru['version'] = pv
         ru['type'] = 'M'
@@ -883,13 +866,13 @@
     src_uri = src_uris.split()[0]
     uri_type, _, _, _, _, _ =  decodeurl(src_uri)
 
-    manual_upstream_version = rd.getVar("RECIPE_UPSTREAM_VERSION", True)
+    manual_upstream_version = rd.getVar("RECIPE_UPSTREAM_VERSION")
     if manual_upstream_version:
         # manual tracking of upstream version.
         ru['version'] = manual_upstream_version
         ru['type'] = 'M'
 
-        manual_upstream_date = rd.getVar("CHECK_DATE", True)
+        manual_upstream_date = rd.getVar("CHECK_DATE")
         if manual_upstream_date:
             date = datetime.strptime(manual_upstream_date, "%b %d, %Y")
         else:
diff --git a/import-layers/yocto-poky/meta/lib/oe/rootfs.py b/import-layers/yocto-poky/meta/lib/oe/rootfs.py
index f967883..96591f3 100644
--- a/import-layers/yocto-poky/meta/lib/oe/rootfs.py
+++ b/import-layers/yocto-poky/meta/lib/oe/rootfs.py
@@ -15,12 +15,13 @@
     This is an abstract class. Do not instantiate this directly.
     """
 
-    def __init__(self, d, progress_reporter=None):
+    def __init__(self, d, progress_reporter=None, logcatcher=None):
         self.d = d
         self.pm = None
-        self.image_rootfs = self.d.getVar('IMAGE_ROOTFS', True)
-        self.deploydir = self.d.getVar('IMGDEPLOYDIR', True)
+        self.image_rootfs = self.d.getVar('IMAGE_ROOTFS')
+        self.deploydir = self.d.getVar('IMGDEPLOYDIR')
         self.progress_reporter = progress_reporter
+        self.logcatcher = logcatcher
 
         self.install_order = Manifest.INSTALL_ORDER
 
@@ -53,6 +54,8 @@
         messages = []
         with open(log_path, 'r') as log:
             for line in log:
+                if self.logcatcher and self.logcatcher.contains(line.rstrip()):
+                    continue
                 for ee in excludes:
                     m = ee.search(line)
                     if m:
@@ -69,7 +72,7 @@
             else:
                 msg = '%d %s messages' % (len(messages), type)
             msg = '[log_check] %s: found %s in the logfile:\n%s' % \
-                (self.d.getVar('PN', True), msg, ''.join(messages))
+                (self.d.getVar('PN'), msg, ''.join(messages))
             if type == 'error':
                 bb.fatal(msg)
             else:
@@ -84,7 +87,10 @@
     def _insert_feed_uris(self):
         if bb.utils.contains("IMAGE_FEATURES", "package-management",
                          True, False, self.d):
-            self.pm.insert_feeds_uris()
+            self.pm.insert_feeds_uris(self.d.getVar('PACKAGE_FEED_URIS') or "",
+                self.d.getVar('PACKAGE_FEED_BASE_PATHS') or "",
+                self.d.getVar('PACKAGE_FEED_ARCHS'))
+
 
     @abstractmethod
     def _handle_intercept_failure(self, failed_script):
@@ -100,7 +106,7 @@
         pass
 
     def _setup_dbg_rootfs(self, dirs):
-        gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS', True) or '0'
+        gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS') or '0'
         if gen_debugfs != '1':
            return
 
@@ -153,7 +159,7 @@
         os.rename(self.image_rootfs + '-orig', self.image_rootfs)
 
     def _exec_shell_cmd(self, cmd):
-        fakerootcmd = self.d.getVar('FAKEROOT', True)
+        fakerootcmd = self.d.getVar('FAKEROOT')
         if fakerootcmd is not None:
             exec_cmd = [fakerootcmd, cmd]
         else:
@@ -168,14 +174,14 @@
 
     def create(self):
         bb.note("###### Generate rootfs #######")
-        pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND", True)
-        post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND", True)
-        rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND', True)
+        pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND")
+        post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND")
+        rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND')
 
-        postinst_intercepts_dir = self.d.getVar("POSTINST_INTERCEPTS_DIR", True)
+        postinst_intercepts_dir = self.d.getVar("POSTINST_INTERCEPTS_DIR")
         if not postinst_intercepts_dir:
             postinst_intercepts_dir = self.d.expand("${COREBASE}/scripts/postinst-intercepts")
-        intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True),
+        intercepts_dir = os.path.join(self.d.getVar('WORKDIR'),
                                       "intercept_scripts")
 
         bb.utils.remove(intercepts_dir, True)
@@ -194,10 +200,10 @@
         # call the package manager dependent create method
         self._create()
 
-        sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir', True)
+        sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir')
         bb.utils.mkdirhier(sysconfdir)
         with open(sysconfdir + "/version", "w+") as ver:
-            ver.write(self.d.getVar('BUILDNAME', True) + "\n")
+            ver.write(self.d.getVar('BUILDNAME') + "\n")
 
         execute_pre_post_process(self.d, rootfs_post_install_cmds)
 
@@ -216,7 +222,7 @@
                          "offline and rootfs is read-only: %s" %
                          delayed_postinsts)
 
-        if self.d.getVar('USE_DEVFS', True) != "1":
+        if self.d.getVar('USE_DEVFS') != "1":
             self._create_devfs()
 
         self._uninstall_unneeded()
@@ -228,7 +234,7 @@
 
         self._run_ldconfig()
 
-        if self.d.getVar('USE_DEPMOD', True) != "0":
+        if self.d.getVar('USE_DEPMOD') != "0":
             self._generate_kernel_module_deps()
 
         self._cleanup()
@@ -244,18 +250,23 @@
         if delayed_postinsts is None:
             if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")):
                 self._exec_shell_cmd(["update-rc.d", "-f", "-r",
-                                      self.d.getVar('IMAGE_ROOTFS', True),
+                                      self.d.getVar('IMAGE_ROOTFS'),
                                       "run-postinsts", "remove"])
 
         image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
                                         True, False, self.d)
-        image_rorfs_force = self.d.getVar('FORCE_RO_REMOVE', True)
+        image_rorfs_force = self.d.getVar('FORCE_RO_REMOVE')
 
         if image_rorfs or image_rorfs_force == "1":
             # Remove components that we don't need if it's a read-only rootfs
-            unneeded_pkgs = self.d.getVar("ROOTFS_RO_UNNEEDED", True).split()
+            unneeded_pkgs = self.d.getVar("ROOTFS_RO_UNNEEDED").split()
             pkgs_installed = image_list_installed_packages(self.d)
-            pkgs_to_remove = [pkg for pkg in pkgs_installed if pkg in unneeded_pkgs]
+            # Make sure update-alternatives is last on the command line, so
+            # that it is removed last. This makes sure that its database is
+            # available while uninstalling packages, allowing alternative
+            # symlinks of packages to be uninstalled to be managed correctly.
+            provider = self.d.getVar("VIRTUAL-RUNTIME_update-alternatives")
+            pkgs_to_remove = sorted([pkg for pkg in pkgs_installed if pkg in unneeded_pkgs], key=lambda x: x == provider)
 
             if len(pkgs_to_remove) > 0:
                 self.pm.remove(pkgs_to_remove, False)
@@ -266,7 +277,7 @@
                 bb.warn("There are post install scripts "
                         "in a read-only rootfs")
 
-        post_uninstall_cmds = self.d.getVar("ROOTFS_POSTUNINSTALL_COMMAND", True)
+        post_uninstall_cmds = self.d.getVar("ROOTFS_POSTUNINSTALL_COMMAND")
         execute_pre_post_process(self.d, post_uninstall_cmds)
 
         runtime_pkgmanage = bb.utils.contains("IMAGE_FEATURES", "package-management",
@@ -276,12 +287,12 @@
             self.pm.remove_packaging_data()
 
     def _run_intercepts(self):
-        intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True),
+        intercepts_dir = os.path.join(self.d.getVar('WORKDIR'),
                                       "intercept_scripts")
 
         bb.note("Running intercept scripts:")
         os.environ['D'] = self.image_rootfs
-        os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE', True)
+        os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE')
         for script in os.listdir(intercepts_dir):
             script_full = os.path.join(intercepts_dir, script)
 
@@ -291,10 +302,10 @@
             bb.note("> Executing %s intercept ..." % script)
 
             try:
-                subprocess.check_call(script_full)
+                subprocess.check_output(script_full)
             except subprocess.CalledProcessError as e:
-                bb.warn("The postinstall intercept hook '%s' failed (exit code: %d)! See log for details!" %
-                        (script, e.returncode))
+                bb.warn("The postinstall intercept hook '%s' failed (exit code: %d)! See log for details! (Output: %s)" %
+                        (script, e.returncode, e.output))
 
                 with open(script_full) as intercept:
                     registered_pkgs = None
@@ -313,7 +324,7 @@
                         self._handle_intercept_failure(registered_pkgs)
 
     def _run_ldconfig(self):
-        if self.d.getVar('LDCONFIGDEPEND', True):
+        if self.d.getVar('LDCONFIGDEPEND'):
             bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v")
             self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c',
                                   'new', '-v'])
@@ -333,7 +344,7 @@
             bb.note("No Kernel Modules found, not running depmod")
             return
 
-        kernel_abi_ver_file = oe.path.join(self.d.getVar('PKGDATA_DIR', True), "kernel-depmod",
+        kernel_abi_ver_file = oe.path.join(self.d.getVar('PKGDATA_DIR'), "kernel-depmod",
                                            'kernel-abiversion')
         if not os.path.exists(kernel_abi_ver_file):
             bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
@@ -355,15 +366,15 @@
     """
     def _create_devfs(self):
         devtable_list = []
-        devtable = self.d.getVar('IMAGE_DEVICE_TABLE', True)
+        devtable = self.d.getVar('IMAGE_DEVICE_TABLE')
         if devtable is not None:
             devtable_list.append(devtable)
         else:
-            devtables = self.d.getVar('IMAGE_DEVICE_TABLES', True)
+            devtables = self.d.getVar('IMAGE_DEVICE_TABLES')
             if devtables is None:
                 devtables = 'files/device_table-minimal.txt'
             for devtable in devtables.split():
-                devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH', True), devtable))
+                devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH'), devtable))
 
         for devtable in devtable_list:
             self._exec_shell_cmd(["makedevs", "-r",
@@ -371,24 +382,24 @@
 
 
 class RpmRootfs(Rootfs):
-    def __init__(self, d, manifest_dir, progress_reporter=None):
-        super(RpmRootfs, self).__init__(d, progress_reporter)
+    def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+        super(RpmRootfs, self).__init__(d, progress_reporter, logcatcher)
         self.log_check_regex = '(unpacking of archive failed|Cannot find package'\
                                '|exit 1|ERROR: |Error: |Error |ERROR '\
                                '|Failed |Failed: |Failed$|Failed\(\d+\):)'
         self.manifest = RpmManifest(d, manifest_dir)
 
         self.pm = RpmPM(d,
-                        d.getVar('IMAGE_ROOTFS', True),
-                        self.d.getVar('TARGET_VENDOR', True)
+                        d.getVar('IMAGE_ROOTFS'),
+                        self.d.getVar('TARGET_VENDOR')
                         )
 
-        self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN', True)
+        self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN')
         if self.inc_rpm_image_gen != "1":
             bb.utils.remove(self.image_rootfs, True)
         else:
             self.pm.recovery_packaging_data()
-        bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
+        bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
 
         self.pm.create_configs()
 
@@ -420,10 +431,12 @@
                 bb.note('incremental removed: %s' % ' '.join(pkg_to_remove))
                 self.pm.remove(pkg_to_remove)
 
+            self.pm.autoremove()
+
     def _create(self):
         pkgs_to_install = self.manifest.parse_initial_manifest()
-        rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS', True)
-        rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS', True)
+        rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS')
+        rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS')
 
         # update PM index files
         self.pm.write_index()
@@ -433,8 +446,6 @@
         if self.progress_reporter:
             self.progress_reporter.next_stage()
 
-        self.pm.dump_all_available_pkgs()
-
         if self.inc_rpm_image_gen == "1":
             self._create_incremental(pkgs_to_install)
 
@@ -469,15 +480,13 @@
         if self.progress_reporter:
             self.progress_reporter.next_stage()
 
-        self._setup_dbg_rootfs(['/etc/rpm', '/var/lib/rpm', '/var/lib/smart'])
+        self._setup_dbg_rootfs(['/etc', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf'])
 
         execute_pre_post_process(self.d, rpm_post_process_cmds)
 
         if self.inc_rpm_image_gen == "1":
             self.pm.backup_packaging_data()
 
-        self.pm.rpm_setup_smart_target_config()
-
         if self.progress_reporter:
             self.progress_reporter.next_stage()
 
@@ -515,19 +524,11 @@
             self.pm.save_rpmpostinst(pkg)
 
     def _cleanup(self):
-        # during the execution of postprocess commands, rpm is called several
-        # times to get the files installed, dependencies, etc. This creates the
-        # __db.00* (Berkeley DB files that hold locks, rpm specific environment
-        # settings, etc.), that should not get into the final rootfs
-        self.pm.unlock_rpm_db()
-        if os.path.isdir(self.pm.install_dir_path + "/tmp") and not os.listdir(self.pm.install_dir_path + "/tmp"):
-           bb.utils.remove(self.pm.install_dir_path + "/tmp", True)
-        if os.path.isdir(self.pm.install_dir_path) and not os.listdir(self.pm.install_dir_path):
-           bb.utils.remove(self.pm.install_dir_path, True)
+        pass
 
 class DpkgOpkgRootfs(Rootfs):
-    def __init__(self, d, progress_reporter=None):
-        super(DpkgOpkgRootfs, self).__init__(d, progress_reporter)
+    def __init__(self, d, progress_reporter=None, logcatcher=None):
+        super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
 
     def _get_pkgs_postinsts(self, status_file):
         def _get_pkg_depends_list(pkg_depends):
@@ -594,7 +595,7 @@
         pkg_list = []
 
         pkgs = None
-        if not self.d.getVar('PACKAGE_INSTALL', True).strip():
+        if not self.d.getVar('PACKAGE_INSTALL').strip():
             bb.note("Building empty image")
         else:
             pkgs = self._get_pkgs_postinsts(status_file)
@@ -621,8 +622,8 @@
             num += 1
 
 class DpkgRootfs(DpkgOpkgRootfs):
-    def __init__(self, d, manifest_dir, progress_reporter=None):
-        super(DpkgRootfs, self).__init__(d, progress_reporter)
+    def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+        super(DpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
         self.log_check_regex = '^E:'
         self.log_check_expected_regexes = \
         [
@@ -630,17 +631,17 @@
         ]
 
         bb.utils.remove(self.image_rootfs, True)
-        bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
+        bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
         self.manifest = DpkgManifest(d, manifest_dir)
-        self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS', True),
-                         d.getVar('PACKAGE_ARCHS', True),
-                         d.getVar('DPKG_ARCH', True))
+        self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS'),
+                         d.getVar('PACKAGE_ARCHS'),
+                         d.getVar('DPKG_ARCH'))
 
 
     def _create(self):
         pkgs_to_install = self.manifest.parse_initial_manifest()
-        deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS', True)
-        deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS', True)
+        deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS')
+        deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS')
 
         alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives")
         bb.utils.mkdirhier(alt_dir)
@@ -713,15 +714,15 @@
 
 
 class OpkgRootfs(DpkgOpkgRootfs):
-    def __init__(self, d, manifest_dir, progress_reporter=None):
-        super(OpkgRootfs, self).__init__(d, progress_reporter)
+    def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None):
+        super(OpkgRootfs, self).__init__(d, progress_reporter, logcatcher)
         self.log_check_regex = '(exit 1|Collected errors)'
 
         self.manifest = OpkgManifest(d, manifest_dir)
-        self.opkg_conf = self.d.getVar("IPKGCONF_TARGET", True)
-        self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True)
+        self.opkg_conf = self.d.getVar("IPKGCONF_TARGET")
+        self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS")
 
-        self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN', True) or ""
+        self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN') or ""
         if self._remove_old_rootfs():
             bb.utils.remove(self.image_rootfs, True)
             self.pm = OpkgPM(d,
@@ -735,7 +736,7 @@
                              self.pkg_archs)
             self.pm.recover_packaging_data()
 
-        bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
+        bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True)
 
     def _prelink_file(self, root_dir, filename):
         bb.note('prelink %s in %s' % (filename, root_dir))
@@ -790,7 +791,7 @@
     """
     def _multilib_sanity_test(self, dirs):
 
-        allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP", True)
+        allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP")
         if allow_replace is None:
             allow_replace = ""
 
@@ -822,12 +823,12 @@
                         files[key] = item
 
     def _multilib_test_install(self, pkgs):
-        ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS", True)
+        ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS")
         bb.utils.mkdirhier(ml_temp)
 
         dirs = [self.image_rootfs]
 
-        for variant in self.d.getVar("MULTILIB_VARIANTS", True).split():
+        for variant in self.d.getVar("MULTILIB_VARIANTS").split():
             ml_target_rootfs = os.path.join(ml_temp, variant)
 
             bb.utils.remove(ml_target_rootfs, True)
@@ -887,9 +888,9 @@
             old_vars_list = open(vars_list_file, 'r+').read()
 
         new_vars_list = '%s:%s:%s\n' % \
-                ((self.d.getVar('BAD_RECOMMENDATIONS', True) or '').strip(),
-                 (self.d.getVar('NO_RECOMMENDATIONS', True) or '').strip(),
-                 (self.d.getVar('PACKAGE_EXCLUDE', True) or '').strip())
+                ((self.d.getVar('BAD_RECOMMENDATIONS') or '').strip(),
+                 (self.d.getVar('NO_RECOMMENDATIONS') or '').strip(),
+                 (self.d.getVar('PACKAGE_EXCLUDE') or '').strip())
         open(vars_list_file, 'w+').write(new_vars_list)
 
         if old_vars_list != new_vars_list:
@@ -899,11 +900,11 @@
 
     def _create(self):
         pkgs_to_install = self.manifest.parse_initial_manifest()
-        opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS', True)
-        opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS', True)
+        opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS')
+        opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS')
 
         # update PM index files, unless users provide their own feeds
-        if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
+        if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1":
             self.pm.write_index()
 
         execute_pre_post_process(self.d, opkg_pre_process_cmds)
@@ -945,9 +946,9 @@
         if self.progress_reporter:
             self.progress_reporter.next_stage()
 
-        opkg_lib_dir = self.d.getVar('OPKGLIBDIR', True)
+        opkg_lib_dir = self.d.getVar('OPKGLIBDIR')
         opkg_dir = os.path.join(opkg_lib_dir, 'opkg')
-        self._setup_dbg_rootfs(['/etc', opkg_dir, '/usr/lib/ssl'])
+        self._setup_dbg_rootfs([opkg_dir])
 
         execute_pre_post_process(self.d, opkg_post_process_cmds)
 
@@ -963,7 +964,7 @@
 
     def _get_delayed_postinsts(self):
         status_file = os.path.join(self.image_rootfs,
-                                   self.d.getVar('OPKGLIBDIR', True).strip('/'),
+                                   self.d.getVar('OPKGLIBDIR').strip('/'),
                                    "opkg", "status")
         return self._get_delayed_postinsts_common(status_file)
 
@@ -988,20 +989,20 @@
             "deb": DpkgRootfs}[imgtype]
 
 def variable_depends(d, manifest_dir=None):
-    img_type = d.getVar('IMAGE_PKGTYPE', True)
+    img_type = d.getVar('IMAGE_PKGTYPE')
     cls = get_class_for_type(img_type)
     return cls._depends_list()
 
-def create_rootfs(d, manifest_dir=None, progress_reporter=None):
+def create_rootfs(d, manifest_dir=None, progress_reporter=None, logcatcher=None):
     env_bkp = os.environ.copy()
 
-    img_type = d.getVar('IMAGE_PKGTYPE', True)
+    img_type = d.getVar('IMAGE_PKGTYPE')
     if img_type == "rpm":
-        RpmRootfs(d, manifest_dir, progress_reporter).create()
+        RpmRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
     elif img_type == "ipk":
-        OpkgRootfs(d, manifest_dir, progress_reporter).create()
+        OpkgRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
     elif img_type == "deb":
-        DpkgRootfs(d, manifest_dir, progress_reporter).create()
+        DpkgRootfs(d, manifest_dir, progress_reporter, logcatcher).create()
 
     os.environ.clear()
     os.environ.update(env_bkp)
@@ -1009,13 +1010,13 @@
 
 def image_list_installed_packages(d, rootfs_dir=None):
     if not rootfs_dir:
-        rootfs_dir = d.getVar('IMAGE_ROOTFS', True)
+        rootfs_dir = d.getVar('IMAGE_ROOTFS')
 
-    img_type = d.getVar('IMAGE_PKGTYPE', True)
+    img_type = d.getVar('IMAGE_PKGTYPE')
     if img_type == "rpm":
         return RpmPkgsList(d, rootfs_dir).list_pkgs()
     elif img_type == "ipk":
-        return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET", True)).list_pkgs()
+        return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET")).list_pkgs()
     elif img_type == "deb":
         return DpkgPkgsList(d, rootfs_dir).list_pkgs()
 
diff --git a/import-layers/yocto-poky/meta/lib/oe/sdk.py b/import-layers/yocto-poky/meta/lib/oe/sdk.py
index c74525f..9fe1687 100644
--- a/import-layers/yocto-poky/meta/lib/oe/sdk.py
+++ b/import-layers/yocto-poky/meta/lib/oe/sdk.py
@@ -11,16 +11,16 @@
 class Sdk(object, metaclass=ABCMeta):
     def __init__(self, d, manifest_dir):
         self.d = d
-        self.sdk_output = self.d.getVar('SDK_OUTPUT', True)
-        self.sdk_native_path = self.d.getVar('SDKPATHNATIVE', True).strip('/')
-        self.target_path = self.d.getVar('SDKTARGETSYSROOT', True).strip('/')
-        self.sysconfdir = self.d.getVar('sysconfdir', True).strip('/')
+        self.sdk_output = self.d.getVar('SDK_OUTPUT')
+        self.sdk_native_path = self.d.getVar('SDKPATHNATIVE').strip('/')
+        self.target_path = self.d.getVar('SDKTARGETSYSROOT').strip('/')
+        self.sysconfdir = self.d.getVar('sysconfdir').strip('/')
 
         self.sdk_target_sysroot = os.path.join(self.sdk_output, self.target_path)
         self.sdk_host_sysroot = self.sdk_output
 
         if manifest_dir is None:
-            self.manifest_dir = self.d.getVar("SDK_DIR", True)
+            self.manifest_dir = self.d.getVar("SDK_DIR")
         else:
             self.manifest_dir = manifest_dir
 
@@ -40,12 +40,12 @@
 
         # Don't ship any libGL in the SDK
         self.remove(os.path.join(self.sdk_output, self.sdk_native_path,
-                         self.d.getVar('libdir_nativesdk', True).strip('/'),
+                         self.d.getVar('libdir_nativesdk').strip('/'),
                          "libGL*"))
 
         # Fix or remove broken .la files
         self.remove(os.path.join(self.sdk_output, self.sdk_native_path,
-                         self.d.getVar('libdir_nativesdk', True).strip('/'),
+                         self.d.getVar('libdir_nativesdk').strip('/'),
                          "*.la"))
 
         # Link the ld.so.cache file into the hosts filesystem
@@ -54,7 +54,7 @@
         self.mkdirhier(os.path.dirname(link_name))
         os.symlink("/etc/ld.so.cache", link_name)
 
-        execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND', True))
+        execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND'))
 
     def movefile(self, sourcefile, destdir):
         try:
@@ -85,7 +85,7 @@
             bb.warn("cannot remove SDK dir: %s" % path)
 
 class RpmSdk(Sdk):
-    def __init__(self, d, manifest_dir=None):
+    def __init__(self, d, manifest_dir=None, rpm_workdir="oe-sdk-repo"):
         super(RpmSdk, self).__init__(d, manifest_dir)
 
         self.target_manifest = RpmManifest(d, self.manifest_dir,
@@ -100,11 +100,17 @@
                               'pkgconfig'
                               ]
 
+        rpm_repo_workdir = "oe-sdk-repo"
+        if "sdk_ext" in d.getVar("BB_RUNTASK"):
+            rpm_repo_workdir = "oe-sdk-ext-repo"
+
+
         self.target_pm = RpmPM(d,
                                self.sdk_target_sysroot,
-                               self.d.getVar('TARGET_VENDOR', True),
+                               self.d.getVar('TARGET_VENDOR'),
                                'target',
-                               target_providename
+                               target_providename,
+                               rpm_repo_workdir=rpm_repo_workdir
                                )
 
         sdk_providename = ['/bin/sh',
@@ -118,11 +124,12 @@
 
         self.host_pm = RpmPM(d,
                              self.sdk_host_sysroot,
-                             self.d.getVar('SDK_VENDOR', True),
+                             self.d.getVar('SDK_VENDOR'),
                              'host',
                              sdk_providename,
                              "SDK_PACKAGE_ARCHS",
-                             "SDK_OS"
+                             "SDK_OS",
+                             rpm_repo_workdir=rpm_repo_workdir
                              )
 
     def _populate_sysroot(self, pm, manifest):
@@ -130,7 +137,6 @@
 
         pm.create_configs()
         pm.write_index()
-        pm.dump_all_available_pkgs()
         pm.update()
 
         pkgs = []
@@ -149,9 +155,9 @@
         bb.note("Installing TARGET packages")
         self._populate_sysroot(self.target_pm, self.target_manifest)
 
-        self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
+        self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
 
-        execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
+        execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
 
         if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
             self.target_pm.remove_packaging_data()
@@ -159,7 +165,7 @@
         bb.note("Installing NATIVESDK packages")
         self._populate_sysroot(self.host_pm, self.host_manifest)
 
-        execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
+        execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
 
         if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
             self.host_pm.remove_packaging_data()
@@ -167,7 +173,7 @@
         # Move host RPM library data
         native_rpm_state_dir = os.path.join(self.sdk_output,
                                             self.sdk_native_path,
-                                            self.d.getVar('localstatedir_nativesdk', True).strip('/'),
+                                            self.d.getVar('localstatedir_nativesdk').strip('/'),
                                             "lib",
                                             "rpm"
                                             )
@@ -188,7 +194,9 @@
                                                         True).strip('/'),
                                           )
         self.mkdirhier(native_sysconf_dir)
-        for f in glob.glob(os.path.join(self.sdk_output, "etc", "*")):
+        for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")):
+            self.movefile(f, native_sysconf_dir)
+        for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")):
             self.movefile(f, native_sysconf_dir)
         self.remove(os.path.join(self.sdk_output, "etc"), True)
 
@@ -197,8 +205,8 @@
     def __init__(self, d, manifest_dir=None):
         super(OpkgSdk, self).__init__(d, manifest_dir)
 
-        self.target_conf = self.d.getVar("IPKGCONF_TARGET", True)
-        self.host_conf = self.d.getVar("IPKGCONF_SDK", True)
+        self.target_conf = self.d.getVar("IPKGCONF_TARGET")
+        self.host_conf = self.d.getVar("IPKGCONF_SDK")
 
         self.target_manifest = OpkgManifest(d, self.manifest_dir,
                                             Manifest.MANIFEST_TYPE_SDK_TARGET)
@@ -206,15 +214,15 @@
                                           Manifest.MANIFEST_TYPE_SDK_HOST)
 
         self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf,
-                                self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True))
+                                self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"))
 
         self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf,
-                              self.d.getVar("SDK_PACKAGE_ARCHS", True))
+                              self.d.getVar("SDK_PACKAGE_ARCHS"))
 
     def _populate_sysroot(self, pm, manifest):
         pkgs_to_install = manifest.parse_initial_manifest()
 
-        if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
+        if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1":
             pm.write_index()
 
         pm.update()
@@ -228,9 +236,9 @@
         bb.note("Installing TARGET packages")
         self._populate_sysroot(self.target_pm, self.target_manifest)
 
-        self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
+        self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
 
-        execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
+        execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
 
         if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
             self.target_pm.remove_packaging_data()
@@ -238,7 +246,7 @@
         bb.note("Installing NATIVESDK packages")
         self._populate_sysroot(self.host_pm, self.host_manifest)
 
-        execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
+        execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
 
         if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d):
             self.host_pm.remove_packaging_data()
@@ -257,7 +265,7 @@
                               os.path.basename(self.host_conf)), 0o644)
 
         native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
-                                             self.d.getVar('localstatedir_nativesdk', True).strip('/'),
+                                             self.d.getVar('localstatedir_nativesdk').strip('/'),
                                              "lib", "opkg")
         self.mkdirhier(native_opkg_state_dir)
         for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")):
@@ -270,8 +278,8 @@
     def __init__(self, d, manifest_dir=None):
         super(DpkgSdk, self).__init__(d, manifest_dir)
 
-        self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt")
-        self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt-sdk")
+        self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt")
+        self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt-sdk")
 
         self.target_manifest = DpkgManifest(d, self.manifest_dir,
                                             Manifest.MANIFEST_TYPE_SDK_TARGET)
@@ -279,17 +287,17 @@
                                           Manifest.MANIFEST_TYPE_SDK_HOST)
 
         self.target_pm = DpkgPM(d, self.sdk_target_sysroot,
-                                self.d.getVar("PACKAGE_ARCHS", True),
-                                self.d.getVar("DPKG_ARCH", True),
+                                self.d.getVar("PACKAGE_ARCHS"),
+                                self.d.getVar("DPKG_ARCH"),
                                 self.target_conf_dir)
 
         self.host_pm = DpkgPM(d, self.sdk_host_sysroot,
-                              self.d.getVar("SDK_PACKAGE_ARCHS", True),
-                              self.d.getVar("DEB_SDK_ARCH", True),
+                              self.d.getVar("SDK_PACKAGE_ARCHS"),
+                              self.d.getVar("DEB_SDK_ARCH"),
                               self.host_conf_dir)
 
     def _copy_apt_dir_to(self, dst_dir):
-        staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE", True)
+        staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE")
 
         self.remove(dst_dir, True)
 
@@ -310,9 +318,9 @@
         bb.note("Installing TARGET packages")
         self._populate_sysroot(self.target_pm, self.target_manifest)
 
-        self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
+        self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY'))
 
-        execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
+        execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND"))
 
         self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
 
@@ -322,7 +330,7 @@
         bb.note("Installing NATIVESDK packages")
         self._populate_sysroot(self.host_pm, self.host_manifest)
 
-        execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
+        execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND"))
 
         self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
                                            "etc", "apt"))
@@ -341,26 +349,26 @@
 
 def sdk_list_installed_packages(d, target, rootfs_dir=None):
     if rootfs_dir is None:
-        sdk_output = d.getVar('SDK_OUTPUT', True)
-        target_path = d.getVar('SDKTARGETSYSROOT', True).strip('/')
+        sdk_output = d.getVar('SDK_OUTPUT')
+        target_path = d.getVar('SDKTARGETSYSROOT').strip('/')
 
         rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True]
 
-    img_type = d.getVar('IMAGE_PKGTYPE', True)
+    img_type = d.getVar('IMAGE_PKGTYPE')
     if img_type == "rpm":
         arch_var = ["SDK_PACKAGE_ARCHS", None][target is True]
         os_var = ["SDK_OS", None][target is True]
-        return RpmPkgsList(d, rootfs_dir, arch_var, os_var).list_pkgs()
+        return RpmPkgsList(d, rootfs_dir).list_pkgs()
     elif img_type == "ipk":
         conf_file_var = ["IPKGCONF_SDK", "IPKGCONF_TARGET"][target is True]
-        return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var, True)).list_pkgs()
+        return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var)).list_pkgs()
     elif img_type == "deb":
         return DpkgPkgsList(d, rootfs_dir).list_pkgs()
 
 def populate_sdk(d, manifest_dir=None):
     env_bkp = os.environ.copy()
 
-    img_type = d.getVar('IMAGE_PKGTYPE', True)
+    img_type = d.getVar('IMAGE_PKGTYPE')
     if img_type == "rpm":
         RpmSdk(d, manifest_dir).populate()
     elif img_type == "ipk":
diff --git a/import-layers/yocto-poky/meta/lib/oe/sstatesig.py b/import-layers/yocto-poky/meta/lib/oe/sstatesig.py
index 8224e3a..b8dd4c8 100644
--- a/import-layers/yocto-poky/meta/lib/oe/sstatesig.py
+++ b/import-layers/yocto-poky/meta/lib/oe/sstatesig.py
@@ -20,8 +20,12 @@
     def isImage(fn):
         return "/image.bbclass" in " ".join(dataCache.inherits[fn])
 
-    # Always include our own inter-task dependencies
+    # (Almost) always include our own inter-task dependencies.
+    # The exception is the special do_kernel_configme->do_unpack_and_patch
+    # dependency from archiver.bbclass.
     if recipename == depname:
+        if task == "do_kernel_configme" and dep.endswith(".do_unpack_and_patch"):
+            return False
         return True
 
     # Quilt (patch application) changing isn't likely to affect anything
@@ -63,10 +67,10 @@
 
 def sstate_lockedsigs(d):
     sigs = {}
-    types = (d.getVar("SIGGEN_LOCKEDSIGS_TYPES", True) or "").split()
+    types = (d.getVar("SIGGEN_LOCKEDSIGS_TYPES") or "").split()
     for t in types:
         siggen_lockedsigs_var = "SIGGEN_LOCKEDSIGS_%s" % t
-        lockedsigs = (d.getVar(siggen_lockedsigs_var, True) or "").split()
+        lockedsigs = (d.getVar(siggen_lockedsigs_var) or "").split()
         for ls in lockedsigs:
             pn, task, h = ls.split(":", 2)
             if pn not in sigs:
@@ -77,8 +81,8 @@
 class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic):
     name = "OEBasic"
     def init_rundepcheck(self, data):
-        self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split()
-        self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split()
+        self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
+        self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
         pass
     def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
         return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
@@ -86,15 +90,15 @@
 class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
     name = "OEBasicHash"
     def init_rundepcheck(self, data):
-        self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split()
-        self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split()
+        self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE") or "").split()
+        self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS") or "").split()
         self.lockedsigs = sstate_lockedsigs(data)
         self.lockedhashes = {}
         self.lockedpnmap = {}
         self.lockedhashfn = {}
-        self.machine = data.getVar("MACHINE", True)
+        self.machine = data.getVar("MACHINE")
         self.mismatch_msgs = []
-        self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES", True) or
+        self.unlockedrecipes = (data.getVar("SIGGEN_UNLOCKED_RECIPES") or
                                 "").split()
         self.unlockedrecipes = { k: "" for k in self.unlockedrecipes }
         pass
@@ -197,7 +201,8 @@
             types[t].append(k)
 
         with open(sigfile, "w") as f:
-            for t in types:
+            l = sorted(types)
+            for t in l:
                 f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % t)
                 types[t].sort()
                 sortedk = sorted(types[t], key=lambda k: self.lockedpnmap[k.rsplit(".",1)[0]])
@@ -208,7 +213,17 @@
                         continue
                     f.write("    " + self.lockedpnmap[fn] + ":" + task + ":" + self.taskhash[k] + " \\\n")
                 f.write('    "\n')
-            f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(list(types.keys()))))
+            f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(l)))
+
+    def dump_siglist(self, sigfile):
+        with open(sigfile, "w") as f:
+            tasks = []
+            for taskitem in self.taskhash:
+                (fn, task) = taskitem.rsplit(".", 1)
+                pn = self.lockedpnmap[fn]
+                tasks.append((pn, task, fn, self.taskhash[taskitem]))
+            for (pn, task, fn, taskhash) in sorted(tasks):
+                f.write('%s.%s %s %s\n' % (pn, task, fn, taskhash))
 
     def checkhashes(self, missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d):
         warn_msgs = []
@@ -224,13 +239,13 @@
                         sstate_missing_msgs.append("Locked sig is set for %s:%s (%s) yet not in sstate cache?"
                                                % (pn, sq_task[task], sq_hash[task]))
 
-        checklevel = d.getVar("SIGGEN_LOCKEDSIGS_TASKSIG_CHECK", True)
+        checklevel = d.getVar("SIGGEN_LOCKEDSIGS_TASKSIG_CHECK")
         if checklevel == 'warn':
             warn_msgs += self.mismatch_msgs
         elif checklevel == 'error':
             error_msgs += self.mismatch_msgs
 
-        checklevel = d.getVar("SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK", True)
+        checklevel = d.getVar("SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK")
         if checklevel == 'warn':
             warn_msgs += sstate_missing_msgs
         elif checklevel == 'error':
@@ -253,9 +268,6 @@
     import fnmatch
     import glob
 
-    if taskhashlist:
-        hashfiles = {}
-
     if not taskname:
         # We have to derive pn and taskname
         key = pn
@@ -265,8 +277,15 @@
         if key.startswith('virtual:native:'):
             pn = pn + '-native'
 
+    hashfiles = {}
     filedates = {}
 
+    def get_hashval(siginfo):
+        if siginfo.endswith('.siginfo'):
+            return siginfo.rpartition(':')[2].partition('_')[0]
+        else:
+            return siginfo.rpartition('.')[2]
+
     # First search in stamps dir
     localdata = d.createCopy()
     localdata.setVar('MULTIMACH_TARGET_SYS', '*')
@@ -274,7 +293,7 @@
     localdata.setVar('PV', '*')
     localdata.setVar('PR', '*')
     localdata.setVar('EXTENDPE', '')
-    stamp = localdata.getVar('STAMP', True)
+    stamp = localdata.getVar('STAMP')
     if pn.startswith("gcc-source"):
         # gcc-source shared workdir is a special case :(
         stamp = localdata.expand("${STAMPS_DIR}/work-shared/gcc-${PV}-${PR}")
@@ -296,6 +315,8 @@
                 filedates[fullpath] = os.stat(fullpath).st_mtime
             except OSError:
                 continue
+            hashval = get_hashval(fullpath)
+            hashfiles[hashval] = fullpath
 
     if not taskhashlist or (len(filedates) < 2 and not foundall):
         # That didn't work, look in sstate-cache
@@ -309,30 +330,25 @@
             localdata.setVar('PV', '*')
             localdata.setVar('PR', '*')
             localdata.setVar('BB_TASKHASH', hashval)
-            swspec = localdata.getVar('SSTATE_SWSPEC', True)
+            swspec = localdata.getVar('SSTATE_SWSPEC')
             if taskname in ['do_fetch', 'do_unpack', 'do_patch', 'do_populate_lic', 'do_preconfigure'] and swspec:
                 localdata.setVar('SSTATE_PKGSPEC', '${SSTATE_SWSPEC}')
             elif pn.endswith('-native') or "-cross-" in pn or "-crosssdk-" in pn:
                 localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
             sstatename = taskname[3:]
-            filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG', True), sstatename)
+            filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG'), sstatename)
 
-            if hashval != '*':
-                sstatedir = "%s/%s" % (d.getVar('SSTATE_DIR', True), hashval[:2])
-            else:
-                sstatedir = d.getVar('SSTATE_DIR', True)
-
-            for root, dirs, files in os.walk(sstatedir):
-                for fn in files:
-                    fullpath = os.path.join(root, fn)
-                    if fnmatch.fnmatch(fullpath, filespec):
-                        if taskhashlist:
-                            hashfiles[hashval] = fullpath
-                        else:
-                            try:
-                                filedates[fullpath] = os.stat(fullpath).st_mtime
-                            except:
-                                continue
+            matchedfiles = glob.glob(filespec)
+            for fullpath in matchedfiles:
+                actual_hashval = get_hashval(fullpath)
+                if actual_hashval in hashfiles:
+                    continue
+                hashfiles[hashval] = fullpath
+                if not taskhashlist:
+                    try:
+                        filedates[fullpath] = os.stat(fullpath).st_mtime
+                    except:
+                        continue
 
     if taskhashlist:
         return hashfiles
@@ -348,7 +364,7 @@
     Also returns the datastore that can be used to query related variables.
     """
     d2 = d.createCopy()
-    extrainf = d.getVarFlag("do_" + task, 'stamp-extra-info', True)
+    extrainf = d.getVarFlag("do_" + task, 'stamp-extra-info')
     if extrainf:
         d2.setVar("SSTATE_MANMACH", extrainf)
     return (d2.expand("${SSTATE_MANFILEPREFIX}.%s" % task), d2)
diff --git a/import-layers/yocto-poky/meta/lib/oe/terminal.py b/import-layers/yocto-poky/meta/lib/oe/terminal.py
index 3c8ef59..2f18ec0 100644
--- a/import-layers/yocto-poky/meta/lib/oe/terminal.py
+++ b/import-layers/yocto-poky/meta/lib/oe/terminal.py
@@ -11,7 +11,8 @@
     pass
 
 class NoSupportedTerminals(Exception):
-    pass
+    def __init__(self, terms):
+        self.terms = terms
 
 
 class Registry(oe.classutils.ClassRegistry):
@@ -61,31 +62,10 @@
         # Once fixed on the gnome-terminal project, this should be removed.
         if os.getenv('LC_ALL'): os.putenv('LC_ALL','')
 
-        # We need to know when the command completes but gnome-terminal gives us no way 
-        # to do this. We therefore write the pid to a file using a "phonehome" wrapper
-        # script, then monitor the pid until it exits. Thanks gnome!
-        import tempfile
-        pidfile = tempfile.NamedTemporaryFile(delete = False).name
-        try:
-            sh_cmd = "oe-gnome-terminal-phonehome " + pidfile + " " + sh_cmd
-            XTerminal.__init__(self, sh_cmd, title, env, d)
-            while os.stat(pidfile).st_size <= 0:
-                continue
-            with open(pidfile, "r") as f:
-                pid = int(f.readline())
-        finally:
-            os.unlink(pidfile)
-
-        import time
-        while True:
-            try:
-                os.kill(pid, 0)
-                time.sleep(0.1)
-            except OSError:
-               return
+        XTerminal.__init__(self, sh_cmd, title, env, d)
 
 class Mate(XTerminal):
-    command = 'mate-terminal -t "{title}" -x {command}'
+    command = 'mate-terminal --disable-factory -t "{title}" -x {command}'
     priority = 2
 
 class Xfce(XTerminal):
@@ -97,7 +77,7 @@
     priority = 2
 
 class Konsole(XTerminal):
-    command = 'konsole --nofork --workdir . -p tabtitle="{title}" -e {command}'
+    command = 'konsole --separate --workdir . -p tabtitle="{title}" -e {command}'
     priority = 2
 
     def __init__(self, sh_cmd, title=None, env=None, d=None):
@@ -106,6 +86,9 @@
         if vernum and LooseVersion(vernum) < '2.0.0':
             # Konsole from KDE 3.x
             self.command = 'konsole -T "{title}" -e {command}'
+        elif vernum and LooseVersion(vernum) < '16.08.1':
+            # Konsole pre 16.08.01 Has nofork
+            self.command = 'konsole --nofork --workdir . -p tabtitle="{title}" -e {command}'
         XTerminal.__init__(self, sh_cmd, title, env, d)
 
 class XTerm(XTerminal):
@@ -192,7 +175,7 @@
     priority = 3
 
     def __init__(self, sh_cmd, title=None, env=None, d=None):
-        self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD', True)
+        self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD')
         if self.command:
             if not '{command}' in self.command:
                 self.command += ' {command}'
@@ -206,6 +189,14 @@
 def prioritized():
     return Registry.prioritized()
 
+def get_cmd_list():
+    terms = Registry.prioritized()
+    cmds = []
+    for term in terms:
+        if term.command:
+            cmds.append(term.command)
+    return cmds
+
 def spawn_preferred(sh_cmd, title=None, env=None, d=None):
     """Spawn the first supported terminal, by priority"""
     for terminal in prioritized():
@@ -215,7 +206,7 @@
         except UnsupportedTerminal:
             continue
     else:
-        raise NoSupportedTerminals()
+        raise NoSupportedTerminals(get_cmd_list())
 
 def spawn(name, sh_cmd, title=None, env=None, d=None):
     """Spawn the specified terminal, by name"""
@@ -225,12 +216,36 @@
     except KeyError:
         raise UnsupportedTerminal(name)
 
-    pipe = terminal(sh_cmd, title, env, d)
-    output = pipe.communicate()[0]
-    if output:
-        output = output.decode("utf-8")
-    if pipe.returncode != 0:
-        raise ExecutionError(sh_cmd, pipe.returncode, output)
+    # We need to know when the command completes but some terminals (at least
+    # gnome and tmux) gives us no way to do this. We therefore write the pid
+    # to a file using a "phonehome" wrapper script, then monitor the pid
+    # until it exits.
+    import tempfile
+    import time
+    pidfile = tempfile.NamedTemporaryFile(delete = False).name
+    try:
+        sh_cmd = bb.utils.which(os.getenv('PATH'), "oe-gnome-terminal-phonehome") + " " + pidfile + " " + sh_cmd
+        pipe = terminal(sh_cmd, title, env, d)
+        output = pipe.communicate()[0]
+        if output:
+            output = output.decode("utf-8")
+        if pipe.returncode != 0:
+            raise ExecutionError(sh_cmd, pipe.returncode, output)
+
+        while os.stat(pidfile).st_size <= 0:
+            time.sleep(0.01)
+            continue
+        with open(pidfile, "r") as f:
+            pid = int(f.readline())
+    finally:
+        os.unlink(pidfile)
+
+    while True:
+        try:
+            os.kill(pid, 0)
+            time.sleep(0.1)
+        except OSError:
+           return
 
 def check_tmux_pane_size(tmux):
     import subprocess as sub
diff --git a/import-layers/yocto-poky/meta/lib/oe/tests/__init__.py b/import-layers/yocto-poky/meta/lib/oe/tests/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/import-layers/yocto-poky/meta/lib/oe/tests/__init__.py
+++ /dev/null
diff --git a/import-layers/yocto-poky/meta/lib/oe/tests/test_elf.py b/import-layers/yocto-poky/meta/lib/oe/tests/test_elf.py
deleted file mode 100644
index 1f59037..0000000
--- a/import-layers/yocto-poky/meta/lib/oe/tests/test_elf.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import unittest
-import oe.qa
-
-class TestElf(unittest.TestCase):
-    def test_machine_name(self):
-        """
-        Test elf_machine_to_string()
-        """
-        self.assertEqual(oe.qa.elf_machine_to_string(0x02), "SPARC")
-        self.assertEqual(oe.qa.elf_machine_to_string(0x03), "x86")
-        self.assertEqual(oe.qa.elf_machine_to_string(0x08), "MIPS")
-        self.assertEqual(oe.qa.elf_machine_to_string(0x14), "PowerPC")
-        self.assertEqual(oe.qa.elf_machine_to_string(0x28), "ARM")
-        self.assertEqual(oe.qa.elf_machine_to_string(0x2A), "SuperH")
-        self.assertEqual(oe.qa.elf_machine_to_string(0x32), "IA-64")
-        self.assertEqual(oe.qa.elf_machine_to_string(0x3E), "x86-64")
-        self.assertEqual(oe.qa.elf_machine_to_string(0xB7), "AArch64")
-
-        self.assertEqual(oe.qa.elf_machine_to_string(0x00), "Unknown (0)")
-        self.assertEqual(oe.qa.elf_machine_to_string(0xDEADBEEF), "Unknown (3735928559)")
-        self.assertEqual(oe.qa.elf_machine_to_string("foobar"), "Unknown ('foobar')")
diff --git a/import-layers/yocto-poky/meta/lib/oe/tests/test_license.py b/import-layers/yocto-poky/meta/lib/oe/tests/test_license.py
deleted file mode 100644
index c388886..0000000
--- a/import-layers/yocto-poky/meta/lib/oe/tests/test_license.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import unittest
-import oe.license
-
-class SeenVisitor(oe.license.LicenseVisitor):
-    def __init__(self):
-        self.seen = []
-        oe.license.LicenseVisitor.__init__(self)
-
-    def visit_Str(self, node):
-        self.seen.append(node.s)
-
-class TestSingleLicense(unittest.TestCase):
-    licenses = [
-        "GPLv2",
-        "LGPL-2.0",
-        "Artistic",
-        "MIT",
-        "GPLv3+",
-        "FOO_BAR",
-    ]
-    invalid_licenses = ["GPL/BSD"]
-
-    @staticmethod
-    def parse(licensestr):
-        visitor = SeenVisitor()
-        visitor.visit_string(licensestr)
-        return visitor.seen
-
-    def test_single_licenses(self):
-        for license in self.licenses:
-            licenses = self.parse(license)
-            self.assertListEqual(licenses, [license])
-
-    def test_invalid_licenses(self):
-        for license in self.invalid_licenses:
-            with self.assertRaises(oe.license.InvalidLicense) as cm:
-                self.parse(license)
-            self.assertEqual(cm.exception.license, license)
-
-class TestSimpleCombinations(unittest.TestCase):
-    tests = {
-        "FOO&BAR": ["FOO", "BAR"],
-        "BAZ & MOO": ["BAZ", "MOO"],
-        "ALPHA|BETA": ["ALPHA"],
-        "BAZ&MOO|FOO": ["FOO"],
-        "FOO&BAR|BAZ": ["FOO", "BAR"],
-    }
-    preferred = ["ALPHA", "FOO", "BAR"]
-
-    def test_tests(self):
-        def choose(a, b):
-            if all(lic in self.preferred for lic in b):
-                return b
-            else:
-                return a
-
-        for license, expected in self.tests.items():
-            licenses = oe.license.flattened_licenses(license, choose)
-            self.assertListEqual(licenses, expected)
-
-class TestComplexCombinations(TestSimpleCombinations):
-    tests = {
-        "FOO & (BAR | BAZ)&MOO": ["FOO", "BAR", "MOO"],
-        "(ALPHA|(BETA&THETA)|OMEGA)&DELTA": ["OMEGA", "DELTA"],
-        "((ALPHA|BETA)&FOO)|BAZ": ["BETA", "FOO"],
-        "(GPL-2.0|Proprietary)&BSD-4-clause&MIT": ["GPL-2.0", "BSD-4-clause", "MIT"],
-    }
-    preferred = ["BAR", "OMEGA", "BETA", "GPL-2.0"]
diff --git a/import-layers/yocto-poky/meta/lib/oe/tests/test_path.py b/import-layers/yocto-poky/meta/lib/oe/tests/test_path.py
deleted file mode 100644
index 44d0681..0000000
--- a/import-layers/yocto-poky/meta/lib/oe/tests/test_path.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import unittest
-import oe, oe.path
-import tempfile
-import os
-import errno
-import shutil
-
-class TestRealPath(unittest.TestCase):
-    DIRS = [ "a", "b", "etc", "sbin", "usr", "usr/bin", "usr/binX", "usr/sbin", "usr/include", "usr/include/gdbm" ]
-    FILES = [ "etc/passwd", "b/file" ]
-    LINKS = [
-        ( "bin",             "/usr/bin",             "/usr/bin" ),
-        ( "binX",            "usr/binX",             "/usr/binX" ),
-        ( "c",               "broken",               "/broken" ),
-        ( "etc/passwd-1",    "passwd",               "/etc/passwd" ),
-        ( "etc/passwd-2",    "passwd-1",             "/etc/passwd" ),
-        ( "etc/passwd-3",    "/etc/passwd-1",        "/etc/passwd" ),
-        ( "etc/shadow-1",    "/etc/shadow",          "/etc/shadow" ),
-        ( "etc/shadow-2",    "/etc/shadow-1",        "/etc/shadow" ),
-        ( "prog-A",          "bin/prog-A",           "/usr/bin/prog-A" ),
-        ( "prog-B",          "/bin/prog-B",          "/usr/bin/prog-B" ),
-        ( "usr/bin/prog-C",  "../../sbin/prog-C",    "/sbin/prog-C" ),
-        ( "usr/bin/prog-D",  "/sbin/prog-D",         "/sbin/prog-D" ),
-        ( "usr/binX/prog-E", "../sbin/prog-E",       None ),
-        ( "usr/bin/prog-F",  "../../../sbin/prog-F", "/sbin/prog-F" ),
-        ( "loop",            "a/loop",               None ),
-        ( "a/loop",          "../loop",              None ),
-        ( "b/test",          "file/foo",             "/b/file/foo" ),
-    ]
-
-    LINKS_PHYS = [
-        ( "./",          "/",                "" ),
-        ( "binX/prog-E", "/usr/sbin/prog-E", "/sbin/prog-E" ),
-    ]
-
-    EXCEPTIONS = [
-        ( "loop",   errno.ELOOP ),
-        ( "b/test", errno.ENOENT ),
-    ]
-
-    def __del__(self):
-        try:
-            #os.system("tree -F %s" % self.tmpdir)
-            shutil.rmtree(self.tmpdir)
-        except:
-            pass
-
-    def setUp(self):
-        self.tmpdir = tempfile.mkdtemp(prefix = "oe-test_path")
-        self.root = os.path.join(self.tmpdir, "R")
-
-        os.mkdir(os.path.join(self.tmpdir, "_real"))
-        os.symlink("_real", self.root)
-
-        for d in self.DIRS:
-            os.mkdir(os.path.join(self.root, d))
-        for f in self.FILES:
-            open(os.path.join(self.root, f), "w")
-        for l in self.LINKS:
-            os.symlink(l[1], os.path.join(self.root, l[0]))
-
-    def __realpath(self, file, use_physdir, assume_dir = True):
-        return oe.path.realpath(os.path.join(self.root, file), self.root,
-                                use_physdir, assume_dir = assume_dir)
-
-    def test_norm(self):
-        for l in self.LINKS:
-            if l[2] == None:
-                continue
-
-            target_p = self.__realpath(l[0], True)
-            target_l = self.__realpath(l[0], False)
-
-            if l[2] != False:
-                self.assertEqual(target_p, target_l)
-                self.assertEqual(l[2], target_p[len(self.root):])
-
-    def test_phys(self):
-        for l in self.LINKS_PHYS:
-            target_p = self.__realpath(l[0], True)
-            target_l = self.__realpath(l[0], False)
-
-            self.assertEqual(l[1], target_p[len(self.root):])
-            self.assertEqual(l[2], target_l[len(self.root):])
-
-    def test_loop(self):
-        for e in self.EXCEPTIONS:
-            self.assertRaisesRegex(OSError, r'\[Errno %u\]' % e[1],
-                                    self.__realpath, e[0], False, False)
diff --git a/import-layers/yocto-poky/meta/lib/oe/tests/test_types.py b/import-layers/yocto-poky/meta/lib/oe/tests/test_types.py
deleted file mode 100644
index 367cc30..0000000
--- a/import-layers/yocto-poky/meta/lib/oe/tests/test_types.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import unittest
-from oe.maketype import create, factory
-
-class TestTypes(unittest.TestCase):
-    def assertIsInstance(self, obj, cls):
-        return self.assertTrue(isinstance(obj, cls))
-
-    def assertIsNot(self, obj, other):
-        return self.assertFalse(obj is other)
-
-    def assertFactoryCreated(self, value, type, **flags):
-        cls = factory(type)
-        self.assertIsNot(cls, None)
-        self.assertIsInstance(create(value, type, **flags), cls)
-
-class TestBooleanType(TestTypes):
-    def test_invalid(self):
-        self.assertRaises(ValueError, create, '', 'boolean')
-        self.assertRaises(ValueError, create, 'foo', 'boolean')
-        self.assertRaises(TypeError, create, object(), 'boolean')
-
-    def test_true(self):
-        self.assertTrue(create('y', 'boolean'))
-        self.assertTrue(create('yes', 'boolean'))
-        self.assertTrue(create('1', 'boolean'))
-        self.assertTrue(create('t', 'boolean'))
-        self.assertTrue(create('true', 'boolean'))
-        self.assertTrue(create('TRUE', 'boolean'))
-        self.assertTrue(create('truE', 'boolean'))
-
-    def test_false(self):
-        self.assertFalse(create('n', 'boolean'))
-        self.assertFalse(create('no', 'boolean'))
-        self.assertFalse(create('0', 'boolean'))
-        self.assertFalse(create('f', 'boolean'))
-        self.assertFalse(create('false', 'boolean'))
-        self.assertFalse(create('FALSE', 'boolean'))
-        self.assertFalse(create('faLse', 'boolean'))
-
-    def test_bool_equality(self):
-        self.assertEqual(create('n', 'boolean'), False)
-        self.assertNotEqual(create('n', 'boolean'), True)
-        self.assertEqual(create('y', 'boolean'), True)
-        self.assertNotEqual(create('y', 'boolean'), False)
-
-class TestList(TestTypes):
-    def assertListEqual(self, value, valid, sep=None):
-        obj = create(value, 'list', separator=sep)
-        self.assertEqual(obj, valid)
-        if sep is not None:
-            self.assertEqual(obj.separator, sep)
-        self.assertEqual(str(obj), obj.separator.join(obj))
-
-    def test_list_nosep(self):
-        testlist = ['alpha', 'beta', 'theta']
-        self.assertListEqual('alpha beta theta', testlist)
-        self.assertListEqual('alpha  beta\ttheta', testlist)
-        self.assertListEqual('alpha', ['alpha'])
-
-    def test_list_usersep(self):
-        self.assertListEqual('foo:bar', ['foo', 'bar'], ':')
-        self.assertListEqual('foo:bar:baz', ['foo', 'bar', 'baz'], ':')
diff --git a/import-layers/yocto-poky/meta/lib/oe/tests/test_utils.py b/import-layers/yocto-poky/meta/lib/oe/tests/test_utils.py
deleted file mode 100644
index 5d9ac52..0000000
--- a/import-layers/yocto-poky/meta/lib/oe/tests/test_utils.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import unittest
-from oe.utils import packages_filter_out_system
-
-class TestPackagesFilterOutSystem(unittest.TestCase):
-    def test_filter(self):
-        """
-        Test that oe.utils.packages_filter_out_system works.
-        """
-        try:
-            import bb
-        except ImportError:
-            self.skipTest("Cannot import bb")
-
-        d = bb.data_smart.DataSmart()
-        d.setVar("PN", "foo")
-
-        d.setVar("PACKAGES", "foo foo-doc foo-dev")
-        pkgs = packages_filter_out_system(d)
-        self.assertEqual(pkgs, [])
-
-        d.setVar("PACKAGES", "foo foo-doc foo-data foo-dev")
-        pkgs = packages_filter_out_system(d)
-        self.assertEqual(pkgs, ["foo-data"])
-
-        d.setVar("PACKAGES", "foo foo-locale-en-gb")
-        pkgs = packages_filter_out_system(d)
-        self.assertEqual(pkgs, [])
-
-        d.setVar("PACKAGES", "foo foo-data foo-locale-en-gb")
-        pkgs = packages_filter_out_system(d)
-        self.assertEqual(pkgs, ["foo-data"])
-
-
-class TestTrimVersion(unittest.TestCase):
-    def test_version_exception(self):
-        with self.assertRaises(TypeError):
-            trim_version(None, 2)
-        with self.assertRaises(TypeError):
-            trim_version((1, 2, 3), 2)
-
-    def test_num_exception(self):
-        with self.assertRaises(ValueError):
-            trim_version("1.2.3", 0)
-        with self.assertRaises(ValueError):
-            trim_version("1.2.3", -1)
-
-    def test_valid(self):
-        self.assertEqual(trim_version("1.2.3", 1), "1")
-        self.assertEqual(trim_version("1.2.3", 2), "1.2")
-        self.assertEqual(trim_version("1.2.3", 3), "1.2.3")
-        self.assertEqual(trim_version("1.2.3", 4), "1.2.3")
diff --git a/import-layers/yocto-poky/meta/lib/oe/utils.py b/import-layers/yocto-poky/meta/lib/oe/utils.py
index 36cf74f..330a5ff 100644
--- a/import-layers/yocto-poky/meta/lib/oe/utils.py
+++ b/import-layers/yocto-poky/meta/lib/oe/utils.py
@@ -1,9 +1,4 @@
-try:
-    # Python 2
-    import commands as cmdstatus
-except ImportError:
-    # Python 3
-    import subprocess as cmdstatus
+import subprocess
 
 def read_file(filename):
     try:
@@ -23,27 +18,27 @@
         return iffalse
 
 def conditional(variable, checkvalue, truevalue, falsevalue, d):
-    if d.getVar(variable, True) == checkvalue:
+    if d.getVar(variable) == checkvalue:
         return truevalue
     else:
         return falsevalue
 
 def less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
-    if float(d.getVar(variable, True)) <= float(checkvalue):
+    if float(d.getVar(variable)) <= float(checkvalue):
         return truevalue
     else:
         return falsevalue
 
 def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
-    result = bb.utils.vercmp_string(d.getVar(variable,True), checkvalue)
+    result = bb.utils.vercmp_string(d.getVar(variable), checkvalue)
     if result <= 0:
         return truevalue
     else:
         return falsevalue
 
 def both_contain(variable1, variable2, checkvalue, d):
-    val1 = d.getVar(variable1, True)
-    val2 = d.getVar(variable2, True)
+    val1 = d.getVar(variable1)
+    val2 = d.getVar(variable2)
     val1 = set(val1.split())
     val2 = set(val2.split())
     if isinstance(checkvalue, str):
@@ -66,8 +61,8 @@
     s3 = set_intersect(s1, s2)
     => s3 = "b c"
     """
-    val1 = set(d.getVar(variable1, True).split())
-    val2 = set(d.getVar(variable2, True).split())
+    val1 = set(d.getVar(variable1).split())
+    val2 = set(d.getVar(variable2).split())
     return " ".join(val1 & val2)
 
 def prune_suffix(var, suffixes, d):
@@ -77,7 +72,7 @@
         if var.endswith(suffix):
             var = var.replace(suffix, "")
 
-    prefix = d.getVar("MLPREFIX", True)
+    prefix = d.getVar("MLPREFIX")
     if prefix and var.startswith(prefix):
         var = var.replace(prefix, "")
 
@@ -102,6 +97,10 @@
         return False
     raise ValueError("invalid value for boolean parameter '%s': '%s'" % (field, value))
 
+def build_depends_string(depends, task):
+    """Append a taskname to a string of dependencies as used by the [depends] flag"""
+    return " ".join(dep + ":" + task for dep in depends.split())
+
 def inherits(d, *classes):
     """Return True if the metadata inherits any of the specified classes"""
     return any(bb.data.inherits_class(cls, d) for cls in classes)
@@ -115,9 +114,9 @@
     # disturbing distributions that have already set DISTRO_FEATURES.
     # Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should
     # add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED
-    features = (d.getVar(var, True) or "").split()
-    backfill = (d.getVar(var+"_BACKFILL", True) or "").split()
-    considered = (d.getVar(var+"_BACKFILL_CONSIDERED", True) or "").split()
+    features = (d.getVar(var) or "").split()
+    backfill = (d.getVar(var+"_BACKFILL") or "").split()
+    considered = (d.getVar(var+"_BACKFILL_CONSIDERED") or "").split()
 
     addfeatures = []
     for feature in backfill:
@@ -133,18 +132,18 @@
     Return a list of packages from PACKAGES with the "system" packages such as
     PN-dbg PN-doc PN-locale-eb-gb removed.
     """
-    pn = d.getVar('PN', True)
+    pn = d.getVar('PN')
     blacklist = [pn + suffix for suffix in ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev')]
     localepkg = pn + "-locale-"
     pkgs = []
 
-    for pkg in d.getVar('PACKAGES', True).split():
+    for pkg in d.getVar('PACKAGES').split():
         if pkg not in blacklist and localepkg not in pkg:
             pkgs.append(pkg)
     return pkgs
 
 def getstatusoutput(cmd):
-    return cmdstatus.getstatusoutput(cmd)
+    return subprocess.getstatusoutput(cmd)
 
 
 def trim_version(version, num_parts=2):
@@ -233,11 +232,10 @@
 def host_gcc_version(d):
     import re, subprocess
 
-    compiler = d.getVar("BUILD_CC", True)
-
+    compiler = d.getVar("BUILD_CC")
     try:
         env = os.environ.copy()
-        env["PATH"] = d.getVar("PATH", True)
+        env["PATH"] = d.getVar("PATH")
         output = subprocess.check_output("%s --version" % compiler, shell=True, env=env).decode("utf-8")
     except subprocess.CalledProcessError as e:
         bb.fatal("Error running %s --version: %s" % (compiler, e.output.decode("utf-8")))
@@ -321,8 +319,8 @@
         bb.utils.remove(ldsoconf)
     bb.utils.mkdirhier(os.path.dirname(ldsoconf))
     with open(ldsoconf, "w") as f:
-        f.write(d.getVar("base_libdir", True) + '\n')
-        f.write(d.getVar("libdir", True) + '\n')
+        f.write(d.getVar("base_libdir") + '\n')
+        f.write(d.getVar("libdir") + '\n')
 
 class ImageQAFailed(bb.build.FuncFailed):
     def __init__(self, description, name=None, logfile=None):