Squashed 'yocto-poky/' content from commit ea562de

git-subtree-dir: yocto-poky
git-subtree-split: ea562de57590c966cd5a75fda8defecd397e6436
diff --git a/meta/lib/oe/__init__.py b/meta/lib/oe/__init__.py
new file mode 100644
index 0000000..3ad9513
--- /dev/null
+++ b/meta/lib/oe/__init__.py
@@ -0,0 +1,2 @@
+from pkgutil import extend_path
+__path__ = extend_path(__path__, __name__)
diff --git a/meta/lib/oe/buildhistory_analysis.py b/meta/lib/oe/buildhistory_analysis.py
new file mode 100644
index 0000000..5395c76
--- /dev/null
+++ b/meta/lib/oe/buildhistory_analysis.py
@@ -0,0 +1,456 @@
+# Report significant differences in the buildhistory repository since a specific revision
+#
+# Copyright (C) 2012 Intel Corporation
+# Author: Paul Eggleton <paul.eggleton@linux.intel.com>
+#
+# Note: requires GitPython 0.3.1+
+#
+# You can use this from the command line by running scripts/buildhistory-diff
+#
+
+import sys
+import os.path
+import difflib
+import git
+import re
+import bb.utils
+
+
+# How to display fields
+list_fields = ['DEPENDS', 'RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS', 'FILES', 'FILELIST', 'USER_CLASSES', 'IMAGE_CLASSES', 'IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
+list_order_fields = ['PACKAGES']
+defaultval_map = {'PKG': 'PKG', 'PKGE': 'PE', 'PKGV': 'PV', 'PKGR': 'PR'}
+numeric_fields = ['PKGSIZE', 'IMAGESIZE']
+# Fields to monitor
+monitor_fields = ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RREPLACES', 'RCONFLICTS', 'PACKAGES', 'FILELIST', 'PKGSIZE', 'IMAGESIZE', 'PKG']
+ver_monitor_fields = ['PKGE', 'PKGV', 'PKGR']
+# Percentage change to alert for numeric fields
+monitor_numeric_threshold = 10
+# Image files to monitor (note that image-info.txt is handled separately)
+img_monitor_files = ['installed-package-names.txt', 'files-in-image.txt']
+# Related context fields for reporting (note: PE, PV & PR are always reported for monitored package fields)
+related_fields = {}
+related_fields['RDEPENDS'] = ['DEPENDS']
+related_fields['RRECOMMENDS'] = ['DEPENDS']
+related_fields['FILELIST'] = ['FILES']
+related_fields['PKGSIZE'] = ['FILELIST']
+related_fields['files-in-image.txt'] = ['installed-package-names.txt', 'USER_CLASSES', 'IMAGE_CLASSES', 'ROOTFS_POSTPROCESS_COMMAND', 'IMAGE_POSTPROCESS_COMMAND']
+related_fields['installed-package-names.txt'] = ['IMAGE_FEATURES', 'IMAGE_LINGUAS', 'IMAGE_INSTALL', 'BAD_RECOMMENDATIONS', 'NO_RECOMMENDATIONS', 'PACKAGE_EXCLUDE']
+
+
+class ChangeRecord:
+    def __init__(self, path, fieldname, oldvalue, newvalue, monitored):
+        self.path = path
+        self.fieldname = fieldname
+        self.oldvalue = oldvalue
+        self.newvalue = newvalue
+        self.monitored = monitored
+        self.related = []
+        self.filechanges = None
+
+    def __str__(self):
+        return self._str_internal(True)
+
+    def _str_internal(self, outer):
+        if outer:
+            if '/image-files/' in self.path:
+                prefix = '%s: ' % self.path.split('/image-files/')[0]
+            else:
+                prefix = '%s: ' % self.path
+        else:
+            prefix = ''
+
+        def pkglist_combine(depver):
+            pkglist = []
+            for k,v in depver.iteritems():
+                if v:
+                    pkglist.append("%s (%s)" % (k,v))
+                else:
+                    pkglist.append(k)
+            return pkglist
+
+        if self.fieldname in list_fields or self.fieldname in list_order_fields:
+            if self.fieldname in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
+                (depvera, depverb) = compare_pkg_lists(self.oldvalue, self.newvalue)
+                aitems = pkglist_combine(depvera)
+                bitems = pkglist_combine(depverb)
+            else:
+                aitems = self.oldvalue.split()
+                bitems = self.newvalue.split()
+            removed = list(set(aitems) - set(bitems))
+            added = list(set(bitems) - set(aitems))
+
+            if removed or added:
+                if removed and not bitems:
+                    out = '%s: removed all items "%s"' % (self.fieldname, ' '.join(removed))
+                else:
+                    out = '%s:%s%s' % (self.fieldname, ' removed "%s"' % ' '.join(removed) if removed else '', ' added "%s"' % ' '.join(added) if added else '')
+            else:
+                out = '%s changed order' % self.fieldname
+        elif self.fieldname in numeric_fields:
+            aval = int(self.oldvalue or 0)
+            bval = int(self.newvalue or 0)
+            if aval != 0:
+                percentchg = ((bval - aval) / float(aval)) * 100
+            else:
+                percentchg = 100
+            out = '%s changed from %s to %s (%s%d%%)' % (self.fieldname, self.oldvalue or "''", self.newvalue or "''", '+' if percentchg > 0 else '', percentchg)
+        elif self.fieldname in defaultval_map:
+            out = '%s changed from %s to %s' % (self.fieldname, self.oldvalue, self.newvalue)
+            if self.fieldname == 'PKG' and '[default]' in self.newvalue:
+                out += ' - may indicate debian renaming failure'
+        elif self.fieldname in ['pkg_preinst', 'pkg_postinst', 'pkg_prerm', 'pkg_postrm']:
+            if self.oldvalue and self.newvalue:
+                out = '%s changed:\n  ' % self.fieldname
+            elif self.newvalue:
+                out = '%s added:\n  ' % self.fieldname
+            elif self.oldvalue:
+                out = '%s cleared:\n  ' % self.fieldname
+            alines = self.oldvalue.splitlines()
+            blines = self.newvalue.splitlines()
+            diff = difflib.unified_diff(alines, blines, self.fieldname, self.fieldname, lineterm='')
+            out += '\n  '.join(list(diff)[2:])
+            out += '\n  --'
+        elif self.fieldname in img_monitor_files or '/image-files/' in self.path:
+            fieldname = self.fieldname
+            if '/image-files/' in self.path:
+                fieldname = os.path.join('/' + self.path.split('/image-files/')[1], self.fieldname)
+                out = 'Changes to %s:\n  ' % fieldname
+            else:
+                if outer:
+                    prefix = 'Changes to %s ' % self.path
+                out = '(%s):\n  ' % self.fieldname
+            if self.filechanges:
+                out += '\n  '.join(['%s' % i for i in self.filechanges])
+            else:
+                alines = self.oldvalue.splitlines()
+                blines = self.newvalue.splitlines()
+                diff = difflib.unified_diff(alines, blines, fieldname, fieldname, lineterm='')
+                out += '\n  '.join(list(diff))
+                out += '\n  --'
+        else:
+            out = '%s changed from "%s" to "%s"' % (self.fieldname, self.oldvalue, self.newvalue)
+
+        if self.related:
+            for chg in self.related:
+                if not outer and chg.fieldname in ['PE', 'PV', 'PR']:
+                    continue
+                for line in chg._str_internal(False).splitlines():
+                    out += '\n  * %s' % line
+
+        return '%s%s' % (prefix, out)
+
+class FileChange:
+    changetype_add = 'A'
+    changetype_remove = 'R'
+    changetype_type = 'T'
+    changetype_perms = 'P'
+    changetype_ownergroup = 'O'
+    changetype_link = 'L'
+
+    def __init__(self, path, changetype, oldvalue = None, newvalue = None):
+        self.path = path
+        self.changetype = changetype
+        self.oldvalue = oldvalue
+        self.newvalue = newvalue
+
+    def _ftype_str(self, ftype):
+        if ftype == '-':
+            return 'file'
+        elif ftype == 'd':
+            return 'directory'
+        elif ftype == 'l':
+            return 'symlink'
+        elif ftype == 'c':
+            return 'char device'
+        elif ftype == 'b':
+            return 'block device'
+        elif ftype == 'p':
+            return 'fifo'
+        elif ftype == 's':
+            return 'socket'
+        else:
+            return 'unknown (%s)' % ftype
+
+    def __str__(self):
+        if self.changetype == self.changetype_add:
+            return '%s was added' % self.path
+        elif self.changetype == self.changetype_remove:
+            return '%s was removed' % self.path
+        elif self.changetype == self.changetype_type:
+            return '%s changed type from %s to %s' % (self.path, self._ftype_str(self.oldvalue), self._ftype_str(self.newvalue))
+        elif self.changetype == self.changetype_perms:
+            return '%s changed permissions from %s to %s' % (self.path, self.oldvalue, self.newvalue)
+        elif self.changetype == self.changetype_ownergroup:
+            return '%s changed owner/group from %s to %s' % (self.path, self.oldvalue, self.newvalue)
+        elif self.changetype == self.changetype_link:
+            return '%s changed symlink target from %s to %s' % (self.path, self.oldvalue, self.newvalue)
+        else:
+            return '%s changed (unknown)' % self.path
+
+
+def blob_to_dict(blob):
+    alines = blob.data_stream.read().splitlines()
+    adict = {}
+    for line in alines:
+        splitv = [i.strip() for i in line.split('=',1)]
+        if len(splitv) > 1:
+            adict[splitv[0]] = splitv[1]
+    return adict
+
+
+def file_list_to_dict(lines):
+    adict = {}
+    for line in lines:
+        # Leave the last few fields intact so we handle file names containing spaces
+        splitv = line.split(None,4)
+        # Grab the path and remove the leading .
+        path = splitv[4][1:].strip()
+        # Handle symlinks
+        if(' -> ' in path):
+            target = path.split(' -> ')[1]
+            path = path.split(' -> ')[0]
+            adict[path] = splitv[0:3] + [target]
+        else:
+            adict[path] = splitv[0:3]
+    return adict
+
+
+def compare_file_lists(alines, blines):
+    adict = file_list_to_dict(alines)
+    bdict = file_list_to_dict(blines)
+    filechanges = []
+    for path, splitv in adict.iteritems():
+        newsplitv = bdict.pop(path, None)
+        if newsplitv:
+            # Check type
+            oldvalue = splitv[0][0]
+            newvalue = newsplitv[0][0]
+            if oldvalue != newvalue:
+                filechanges.append(FileChange(path, FileChange.changetype_type, oldvalue, newvalue))
+            # Check permissions
+            oldvalue = splitv[0][1:]
+            newvalue = newsplitv[0][1:]
+            if oldvalue != newvalue:
+                filechanges.append(FileChange(path, FileChange.changetype_perms, oldvalue, newvalue))
+            # Check owner/group
+            oldvalue = '%s/%s' % (splitv[1], splitv[2])
+            newvalue = '%s/%s' % (newsplitv[1], newsplitv[2])
+            if oldvalue != newvalue:
+                filechanges.append(FileChange(path, FileChange.changetype_ownergroup, oldvalue, newvalue))
+            # Check symlink target
+            if newsplitv[0][0] == 'l':
+                if len(splitv) > 3:
+                    oldvalue = splitv[3]
+                else:
+                    oldvalue = None
+                newvalue = newsplitv[3]
+                if oldvalue != newvalue:
+                    filechanges.append(FileChange(path, FileChange.changetype_link, oldvalue, newvalue))
+        else:
+            filechanges.append(FileChange(path, FileChange.changetype_remove))
+
+    # Whatever is left over has been added
+    for path in bdict:
+        filechanges.append(FileChange(path, FileChange.changetype_add))
+
+    return filechanges
+
+
+def compare_lists(alines, blines):
+    removed = list(set(alines) - set(blines))
+    added = list(set(blines) - set(alines))
+
+    filechanges = []
+    for pkg in removed:
+        filechanges.append(FileChange(pkg, FileChange.changetype_remove))
+    for pkg in added:
+        filechanges.append(FileChange(pkg, FileChange.changetype_add))
+
+    return filechanges
+
+
+def compare_pkg_lists(astr, bstr):
+    depvera = bb.utils.explode_dep_versions2(astr)
+    depverb = bb.utils.explode_dep_versions2(bstr)
+
+    # Strip out changes where the version has increased
+    remove = []
+    for k in depvera:
+        if k in depverb:
+            dva = depvera[k]
+            dvb = depverb[k]
+            if dva and dvb and len(dva) == len(dvb):
+                # Since length is the same, sort so that prefixes (e.g. >=) will line up
+                dva.sort()
+                dvb.sort()
+                removeit = True
+                for dvai, dvbi in zip(dva, dvb):
+                    if dvai != dvbi:
+                        aiprefix = dvai.split(' ')[0]
+                        biprefix = dvbi.split(' ')[0]
+                        if aiprefix == biprefix and aiprefix in ['>=', '=']:
+                            if bb.utils.vercmp(bb.utils.split_version(dvai), bb.utils.split_version(dvbi)) > 0:
+                                removeit = False
+                                break
+                        else:
+                            removeit = False
+                            break
+                if removeit:
+                    remove.append(k)
+
+    for k in remove:
+        depvera.pop(k)
+        depverb.pop(k)
+
+    return (depvera, depverb)
+
+
+def compare_dict_blobs(path, ablob, bblob, report_all, report_ver):
+    adict = blob_to_dict(ablob)
+    bdict = blob_to_dict(bblob)
+
+    pkgname = os.path.basename(path)
+
+    defaultvals = {}
+    defaultvals['PKG'] = pkgname
+    defaultvals['PKGE'] = '0'
+
+    changes = []
+    keys = list(set(adict.keys()) | set(bdict.keys()) | set(defaultval_map.keys()))
+    for key in keys:
+        astr = adict.get(key, '')
+        bstr = bdict.get(key, '')
+        if key in ver_monitor_fields:
+            monitored = report_ver or astr or bstr
+        else:
+            monitored = key in monitor_fields
+        mapped_key = defaultval_map.get(key, '')
+        if mapped_key:
+            if not astr:
+                astr = '%s [default]' % adict.get(mapped_key, defaultvals.get(key, ''))
+            if not bstr:
+                bstr = '%s [default]' % bdict.get(mapped_key, defaultvals.get(key, ''))
+
+        if astr != bstr:
+            if (not report_all) and key in numeric_fields:
+                aval = int(astr or 0)
+                bval = int(bstr or 0)
+                if aval != 0:
+                    percentchg = ((bval - aval) / float(aval)) * 100
+                else:
+                    percentchg = 100
+                if abs(percentchg) < monitor_numeric_threshold:
+                    continue
+            elif (not report_all) and key in list_fields:
+                if key == "FILELIST" and path.endswith("-dbg") and bstr.strip() != '':
+                    continue
+                if key in ['RPROVIDES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RREPLACES', 'RCONFLICTS']:
+                    (depvera, depverb) = compare_pkg_lists(astr, bstr)
+                    if depvera == depverb:
+                        continue
+                alist = astr.split()
+                alist.sort()
+                blist = bstr.split()
+                blist.sort()
+                # We don't care about the removal of self-dependencies
+                if pkgname in alist and not pkgname in blist:
+                    alist.remove(pkgname)
+                if ' '.join(alist) == ' '.join(blist):
+                    continue
+
+            chg = ChangeRecord(path, key, astr, bstr, monitored)
+            changes.append(chg)
+    return changes
+
+
+def process_changes(repopath, revision1, revision2='HEAD', report_all=False, report_ver=False):
+    repo = git.Repo(repopath)
+    assert repo.bare == False
+    commit = repo.commit(revision1)
+    diff = commit.diff(revision2)
+
+    changes = []
+    for d in diff.iter_change_type('M'):
+        path = os.path.dirname(d.a_blob.path)
+        if path.startswith('packages/'):
+            filename = os.path.basename(d.a_blob.path)
+            if filename == 'latest':
+                changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
+            elif filename.startswith('latest.'):
+                chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True)
+                changes.append(chg)
+        elif path.startswith('images/'):
+            filename = os.path.basename(d.a_blob.path)
+            if filename in img_monitor_files:
+                if filename == 'files-in-image.txt':
+                    alines = d.a_blob.data_stream.read().splitlines()
+                    blines = d.b_blob.data_stream.read().splitlines()
+                    filechanges = compare_file_lists(alines,blines)
+                    if filechanges:
+                        chg = ChangeRecord(path, filename, None, None, True)
+                        chg.filechanges = filechanges
+                        changes.append(chg)
+                elif filename == 'installed-package-names.txt':
+                    alines = d.a_blob.data_stream.read().splitlines()
+                    blines = d.b_blob.data_stream.read().splitlines()
+                    filechanges = compare_lists(alines,blines)
+                    if filechanges:
+                        chg = ChangeRecord(path, filename, None, None, True)
+                        chg.filechanges = filechanges
+                        changes.append(chg)
+                else:
+                    chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True)
+                    changes.append(chg)
+            elif filename == 'image-info.txt':
+                changes.extend(compare_dict_blobs(path, d.a_blob, d.b_blob, report_all, report_ver))
+            elif '/image-files/' in path:
+                chg = ChangeRecord(path, filename, d.a_blob.data_stream.read(), d.b_blob.data_stream.read(), True)
+                changes.append(chg)
+
+    # Look for added preinst/postinst/prerm/postrm
+    # (without reporting newly added recipes)
+    addedpkgs = []
+    addedchanges = []
+    for d in diff.iter_change_type('A'):
+        path = os.path.dirname(d.b_blob.path)
+        if path.startswith('packages/'):
+            filename = os.path.basename(d.b_blob.path)
+            if filename == 'latest':
+                addedpkgs.append(path)
+            elif filename.startswith('latest.'):
+                chg = ChangeRecord(path, filename[7:], '', d.b_blob.data_stream.read(), True)
+                addedchanges.append(chg)
+    for chg in addedchanges:
+        found = False
+        for pkg in addedpkgs:
+            if chg.path.startswith(pkg):
+                found = True
+                break
+        if not found:
+            changes.append(chg)
+
+    # Look for cleared preinst/postinst/prerm/postrm
+    for d in diff.iter_change_type('D'):
+        path = os.path.dirname(d.a_blob.path)
+        if path.startswith('packages/'):
+            filename = os.path.basename(d.a_blob.path)
+            if filename != 'latest' and filename.startswith('latest.'):
+                chg = ChangeRecord(path, filename[7:], d.a_blob.data_stream.read(), '', True)
+                changes.append(chg)
+
+    # Link related changes
+    for chg in changes:
+        if chg.monitored:
+            for chg2 in changes:
+                # (Check dirname in the case of fields from recipe info files)
+                if chg.path == chg2.path or os.path.dirname(chg.path) == chg2.path:
+                    if chg2.fieldname in related_fields.get(chg.fieldname, []):
+                        chg.related.append(chg2)
+                    elif chg.path == chg2.path and chg.path.startswith('packages/') and chg2.fieldname in ['PE', 'PV', 'PR']:
+                        chg.related.append(chg2)
+
+    if report_all:
+        return changes
+    else:
+        return [chg for chg in changes if chg.monitored]
diff --git a/meta/lib/oe/cachedpath.py b/meta/lib/oe/cachedpath.py
new file mode 100644
index 0000000..0840cc4
--- /dev/null
+++ b/meta/lib/oe/cachedpath.py
@@ -0,0 +1,233 @@
+#
+# Based on standard python library functions but avoid
+# repeated stat calls. Its assumed the files will not change from under us
+# so we can cache stat calls.
+#
+
+import os
+import errno
+import stat as statmod
+
+class CachedPath(object):
+    def __init__(self):
+        self.statcache = {}
+        self.lstatcache = {}
+        self.normpathcache = {}
+        return
+
+    def updatecache(self, x):
+        x = self.normpath(x)
+        if x in self.statcache:
+            del self.statcache[x]
+        if x in self.lstatcache:
+            del self.lstatcache[x]
+
+    def normpath(self, path):
+        if path in self.normpathcache:
+            return self.normpathcache[path]
+        newpath = os.path.normpath(path)
+        self.normpathcache[path] = newpath
+        return newpath
+
+    def _callstat(self, path):
+        if path in self.statcache:
+            return self.statcache[path]
+        try:
+            st = os.stat(path)
+            self.statcache[path] = st
+            return st
+        except os.error:
+            self.statcache[path] = False
+            return False
+
+    # We might as well call lstat and then only 
+    # call stat as well in the symbolic link case
+    # since this turns out to be much more optimal
+    # in real world usage of this cache
+    def callstat(self, path):
+        path = self.normpath(path)
+        self.calllstat(path)
+        return self.statcache[path]
+
+    def calllstat(self, path):
+        path = self.normpath(path)
+        if path in self.lstatcache:
+            return self.lstatcache[path]
+        #bb.error("LStatpath:" + path)
+        try:
+            lst = os.lstat(path)
+            self.lstatcache[path] = lst
+            if not statmod.S_ISLNK(lst.st_mode):
+                self.statcache[path] = lst
+            else:
+                self._callstat(path)
+            return lst
+        except (os.error, AttributeError):
+            self.lstatcache[path] = False
+            self.statcache[path] = False
+            return False
+
+    # This follows symbolic links, so both islink() and isdir() can be true
+    # for the same path ono systems that support symlinks
+    def isfile(self, path):
+        """Test whether a path is a regular file"""
+        st = self.callstat(path)
+        if not st:
+            return False
+        return statmod.S_ISREG(st.st_mode)
+
+    # Is a path a directory?
+    # This follows symbolic links, so both islink() and isdir()
+    # can be true for the same path on systems that support symlinks
+    def isdir(self, s):
+        """Return true if the pathname refers to an existing directory."""
+        st = self.callstat(s)
+        if not st:
+            return False
+        return statmod.S_ISDIR(st.st_mode)
+
+    def islink(self, path):
+        """Test whether a path is a symbolic link"""
+        st = self.calllstat(path)
+        if not st:
+            return False
+        return statmod.S_ISLNK(st.st_mode)
+
+    # Does a path exist?
+    # This is false for dangling symbolic links on systems that support them.
+    def exists(self, path):
+        """Test whether a path exists.  Returns False for broken symbolic links"""
+        if self.callstat(path):
+            return True
+        return False
+
+    def lexists(self, path):
+        """Test whether a path exists.  Returns True for broken symbolic links"""
+        if self.calllstat(path):
+            return True
+        return False
+
+    def stat(self, path):
+        return self.callstat(path)
+
+    def lstat(self, path):
+        return self.calllstat(path)
+
+    def walk(self, top, topdown=True, onerror=None, followlinks=False):
+        # Matches os.walk, not os.path.walk()
+
+        # We may not have read permission for top, in which case we can't
+        # get a list of the files the directory contains.  os.path.walk
+        # always suppressed the exception then, rather than blow up for a
+        # minor reason when (say) a thousand readable directories are still
+        # left to visit.  That logic is copied here.
+        try:
+            names = os.listdir(top)
+        except os.error as err:
+            if onerror is not None:
+                onerror(err)
+            return
+
+        dirs, nondirs = [], []
+        for name in names:
+            if self.isdir(os.path.join(top, name)):
+                dirs.append(name)
+            else:
+                nondirs.append(name)
+
+        if topdown:
+            yield top, dirs, nondirs
+        for name in dirs:
+            new_path = os.path.join(top, name)
+            if followlinks or not self.islink(new_path):
+                for x in self.walk(new_path, topdown, onerror, followlinks):
+                    yield x
+        if not topdown:
+            yield top, dirs, nondirs
+
+    ## realpath() related functions
+    def __is_path_below(self, file, root):
+        return (file + os.path.sep).startswith(root)
+
+    def __realpath_rel(self, start, rel_path, root, loop_cnt, assume_dir):
+        """Calculates real path of symlink 'start' + 'rel_path' below
+        'root'; no part of 'start' below 'root' must contain symlinks. """
+        have_dir = True
+
+        for d in rel_path.split(os.path.sep):
+            if not have_dir and not assume_dir:
+                raise OSError(errno.ENOENT, "no such directory %s" % start)
+
+            if d == os.path.pardir: # '..'
+                if len(start) >= len(root):
+                    # do not follow '..' before root
+                    start = os.path.dirname(start)
+                else:
+                    # emit warning?
+                    pass
+            else:
+                (start, have_dir) = self.__realpath(os.path.join(start, d),
+                                                    root, loop_cnt, assume_dir)
+
+            assert(self.__is_path_below(start, root))
+
+        return start
+
+    def __realpath(self, file, root, loop_cnt, assume_dir):
+        while self.islink(file) and len(file) >= len(root):
+            if loop_cnt == 0:
+                raise OSError(errno.ELOOP, file)
+
+            loop_cnt -= 1
+            target = os.path.normpath(os.readlink(file))
+    
+            if not os.path.isabs(target):
+                tdir = os.path.dirname(file)
+                assert(self.__is_path_below(tdir, root))
+            else:
+                tdir = root
+
+            file = self.__realpath_rel(tdir, target, root, loop_cnt, assume_dir)
+
+        try:
+            is_dir = self.isdir(file)
+        except:
+            is_dir = False
+
+        return (file, is_dir)
+
+    def realpath(self, file, root, use_physdir = True, loop_cnt = 100, assume_dir = False):
+        """ Returns the canonical path of 'file' with assuming a
+        toplevel 'root' directory. When 'use_physdir' is set, all
+        preceding path components of 'file' will be resolved first;
+        this flag should be set unless it is guaranteed that there is
+        no symlink in the path. When 'assume_dir' is not set, missing
+        path components will raise an ENOENT error"""
+
+        root = os.path.normpath(root)
+        file = os.path.normpath(file)
+
+        if not root.endswith(os.path.sep):
+            # letting root end with '/' makes some things easier
+            root = root + os.path.sep
+
+        if not self.__is_path_below(file, root):
+            raise OSError(errno.EINVAL, "file '%s' is not below root" % file)
+
+        try:
+            if use_physdir:
+                file = self.__realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir)
+            else:
+                file = self.__realpath(file, root, loop_cnt, assume_dir)[0]
+        except OSError as e:
+            if e.errno == errno.ELOOP:
+                # make ELOOP more readable; without catching it, there will
+                # be printed a backtrace with 100s of OSError exceptions
+                # else
+                raise OSError(errno.ELOOP,
+                              "too much recursions while resolving '%s'; loop in '%s'" %
+                              (file, e.strerror))
+
+            raise
+
+        return file
diff --git a/meta/lib/oe/classextend.py b/meta/lib/oe/classextend.py
new file mode 100644
index 0000000..5107ecd
--- /dev/null
+++ b/meta/lib/oe/classextend.py
@@ -0,0 +1,120 @@
+class ClassExtender(object):
+    def __init__(self, extname, d):
+        self.extname = extname
+        self.d = d
+        self.pkgs_mapping = []
+
+    def extend_name(self, name):
+        if name.startswith("kernel-") or name == "virtual/kernel":
+            return name
+        if name.startswith("rtld"):
+            return name
+        if name.endswith("-crosssdk"):
+            return name
+        if name.endswith("-" + self.extname):
+            name = name.replace("-" + self.extname, "")
+        if name.startswith("virtual/"):
+            subs = name.split("/", 1)[1]
+            if not subs.startswith(self.extname):
+                return "virtual/" + self.extname + "-" + subs
+            return name
+        if not name.startswith(self.extname):
+            return self.extname + "-" + name
+        return name
+
+    def map_variable(self, varname, setvar = True):
+        var = self.d.getVar(varname, True)
+        if not var:
+            return ""
+        var = var.split()
+        newvar = []
+        for v in var:
+            newvar.append(self.extend_name(v))
+        newdata =  " ".join(newvar)
+        if setvar:
+            self.d.setVar(varname, newdata)
+        return newdata
+
+    def map_regexp_variable(self, varname, setvar = True):
+        var = self.d.getVar(varname, True)
+        if not var:
+            return ""
+        var = var.split()
+        newvar = []
+        for v in var:
+            if v.startswith("^" + self.extname):
+                newvar.append(v)
+            elif v.startswith("^"):
+                newvar.append("^" + self.extname + "-" + v[1:])
+            else:
+                newvar.append(self.extend_name(v))
+        newdata =  " ".join(newvar)
+        if setvar:
+            self.d.setVar(varname, newdata)
+        return newdata
+
+    def map_depends(self, dep):
+        if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('cross-canadian' in dep) or ('-crosssdk-' in dep):
+            return dep
+        else:
+            # Do not extend for that already have multilib prefix
+            var = self.d.getVar("MULTILIB_VARIANTS", True)
+            if var:
+                var = var.split()
+                for v in var:
+                    if dep.startswith(v):
+                        return dep
+            return self.extend_name(dep)
+
+    def map_depends_variable(self, varname, suffix = ""):
+        # We need to preserve EXTENDPKGV so it can be expanded correctly later
+        if suffix:
+            varname = varname + "_" + suffix
+        orig = self.d.getVar("EXTENDPKGV", False)
+        self.d.setVar("EXTENDPKGV", "EXTENDPKGV")
+        deps = self.d.getVar(varname, True)
+        if not deps:
+            self.d.setVar("EXTENDPKGV", orig)
+            return
+        deps = bb.utils.explode_dep_versions2(deps)
+        newdeps = {}
+        for dep in deps:
+            newdeps[self.map_depends(dep)] = deps[dep]
+
+        self.d.setVar(varname, bb.utils.join_deps(newdeps, False).replace("EXTENDPKGV", "${EXTENDPKGV}"))
+        self.d.setVar("EXTENDPKGV", orig)
+
+    def map_packagevars(self):
+        for pkg in (self.d.getVar("PACKAGES", True).split() + [""]):
+            self.map_depends_variable("RDEPENDS", pkg)
+            self.map_depends_variable("RRECOMMENDS", pkg)
+            self.map_depends_variable("RSUGGESTS", pkg)
+            self.map_depends_variable("RPROVIDES", pkg)
+            self.map_depends_variable("RREPLACES", pkg)
+            self.map_depends_variable("RCONFLICTS", pkg)
+            self.map_depends_variable("PKG", pkg)
+
+    def rename_packages(self):
+        for pkg in (self.d.getVar("PACKAGES", True) or "").split():
+            if pkg.startswith(self.extname):
+               self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg])
+               continue
+            self.pkgs_mapping.append([pkg, self.extend_name(pkg)])
+
+        self.d.setVar("PACKAGES", " ".join([row[1] for row in self.pkgs_mapping]))
+
+    def rename_package_variables(self, variables):
+        for pkg_mapping in self.pkgs_mapping:
+            for subs in variables:
+                self.d.renameVar("%s_%s" % (subs, pkg_mapping[0]), "%s_%s" % (subs, pkg_mapping[1]))
+
+class NativesdkClassExtender(ClassExtender):
+    def map_depends(self, dep):
+        if dep.startswith(self.extname):
+            return dep
+        if dep.endswith(("-gcc-initial", "-gcc", "-g++")):
+            return dep + "-crosssdk"
+        elif dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
+            return dep
+        else:
+            return self.extend_name(dep)
diff --git a/meta/lib/oe/classutils.py b/meta/lib/oe/classutils.py
new file mode 100644
index 0000000..58188fd
--- /dev/null
+++ b/meta/lib/oe/classutils.py
@@ -0,0 +1,43 @@
+class ClassRegistry(type):
+    """Maintain a registry of classes, indexed by name.
+
+Note that this implementation requires that the names be unique, as it uses
+a dictionary to hold the classes by name.
+
+The name in the registry can be overridden via the 'name' attribute of the
+class, and the 'priority' attribute controls priority. The prioritized()
+method returns the registered classes in priority order.
+
+Subclasses of ClassRegistry may define an 'implemented' property to exert
+control over whether the class will be added to the registry (e.g. to keep
+abstract base classes out of the registry)."""
+    priority = 0
+    class __metaclass__(type):
+        """Give each ClassRegistry their own registry"""
+        def __init__(cls, name, bases, attrs):
+            cls.registry = {}
+            type.__init__(cls, name, bases, attrs)
+
+    def __init__(cls, name, bases, attrs):
+        super(ClassRegistry, cls).__init__(name, bases, attrs)
+        try:
+            if not cls.implemented:
+                return
+        except AttributeError:
+            pass
+
+        try:
+            cls.name
+        except AttributeError:
+            cls.name = name
+        cls.registry[cls.name] = cls
+
+    @classmethod
+    def prioritized(tcls):
+        return sorted(tcls.registry.values(),
+                      key=lambda v: v.priority, reverse=True)
+
+    def unregister(cls):
+        for key in cls.registry.keys():
+            if cls.registry[key] is cls:
+                del cls.registry[key]
diff --git a/meta/lib/oe/copy_buildsystem.py b/meta/lib/oe/copy_buildsystem.py
new file mode 100644
index 0000000..979578c
--- /dev/null
+++ b/meta/lib/oe/copy_buildsystem.py
@@ -0,0 +1,101 @@
+# This class should provide easy access to the different aspects of the
+# buildsystem such as layers, bitbake location, etc.
+import stat
+import shutil
+
+def _smart_copy(src, dest):
+    # smart_copy will choose the correct function depending on whether the
+    # source is a file or a directory.
+    mode = os.stat(src).st_mode
+    if stat.S_ISDIR(mode):
+        shutil.copytree(src, dest, symlinks=True)
+    else:
+        shutil.copyfile(src, dest)
+        shutil.copymode(src, dest)
+
+class BuildSystem(object):
+    def __init__(self, d):
+        self.d = d
+        self.layerdirs = d.getVar('BBLAYERS', True).split()
+
+    def copy_bitbake_and_layers(self, destdir):
+        # Copy in all metadata layers + bitbake (as repositories)
+        layers_copied = []
+        bb.utils.mkdirhier(destdir)
+        layers = list(self.layerdirs)
+
+        corebase = self.d.getVar('COREBASE', True)
+        layers.append(corebase)
+
+        corebase_files = self.d.getVar('COREBASE_FILES', True).split()
+        corebase_files = [corebase + '/' +x for x in corebase_files]
+        # Make sure bitbake goes in
+        bitbake_dir = bb.__file__.rsplit('/', 3)[0]
+        corebase_files.append(bitbake_dir)
+
+        for layer in layers:
+            layerconf = os.path.join(layer, 'conf', 'layer.conf')
+            if os.path.exists(layerconf):
+                with open(layerconf, 'r') as f:
+                    if f.readline().startswith("# ### workspace layer auto-generated by devtool ###"):
+                        bb.warn("Skipping local workspace layer %s" % layer)
+                        continue
+
+            # If the layer was already under corebase, leave it there
+            # since layers such as meta have issues when moved.
+            layerdestpath = destdir
+            if corebase == os.path.dirname(layer):
+                layerdestpath += '/' + os.path.basename(corebase)
+            layerdestpath += '/' + os.path.basename(layer)
+
+            layer_relative = os.path.relpath(layerdestpath,
+                                             destdir)
+            layers_copied.append(layer_relative)
+
+            # Treat corebase as special since it typically will contain
+            # build directories or other custom items.
+            if corebase == layer:
+                bb.utils.mkdirhier(layerdestpath)
+                for f in corebase_files:
+                    f_basename = os.path.basename(f)
+                    destname = os.path.join(layerdestpath, f_basename)
+                    _smart_copy(f, destname)
+            else:
+                if os.path.exists(layerdestpath):
+                    bb.note("Skipping layer %s, already handled" % layer)
+                else:
+                    _smart_copy(layer, layerdestpath)
+
+        return layers_copied
+
+def generate_locked_sigs(sigfile, d):
+    bb.utils.mkdirhier(os.path.dirname(sigfile))
+    depd = d.getVar('BB_TASKDEPDATA', True)
+    tasks = ['%s.%s' % (v[2], v[1]) for v in depd.itervalues()]
+    bb.parse.siggen.dump_lockedsigs(sigfile, tasks)
+
+def prune_lockedsigs(allowed_tasks, excluded_targets, lockedsigs, pruned_output):
+    with open(lockedsigs, 'r') as infile:
+        bb.utils.mkdirhier(os.path.dirname(pruned_output))
+        with open(pruned_output, 'w') as f:
+            invalue = False
+            for line in infile:
+                if invalue:
+                    if line.endswith('\\\n'):
+                        splitval = line.strip().split(':')
+                        if splitval[1] in allowed_tasks and not splitval[0] in excluded_targets:
+                            f.write(line)
+                    else:
+                        f.write(line)
+                        invalue = False
+                elif line.startswith('SIGGEN_LOCKEDSIGS'):
+                    invalue = True
+                    f.write(line)
+
+def create_locked_sstate_cache(lockedsigs, input_sstate_cache, output_sstate_cache, d, fixedlsbstring=""):
+    bb.note('Generating sstate-cache...')
+
+    bb.process.run("gen-lockedsig-cache %s %s %s" % (lockedsigs, input_sstate_cache, output_sstate_cache))
+    if fixedlsbstring:
+        os.rename(output_sstate_cache + '/' + d.getVar('NATIVELSBSTRING', True),
+        output_sstate_cache + '/' + fixedlsbstring)
diff --git a/meta/lib/oe/data.py b/meta/lib/oe/data.py
new file mode 100644
index 0000000..4cc0e02
--- /dev/null
+++ b/meta/lib/oe/data.py
@@ -0,0 +1,17 @@
+import oe.maketype
+
+def typed_value(key, d):
+    """Construct a value for the specified metadata variable, using its flags
+    to determine the type and parameters for construction."""
+    var_type = d.getVarFlag(key, 'type')
+    flags = d.getVarFlags(key)
+    if flags is not None:
+        flags = dict((flag, d.expand(value))
+                     for flag, value in flags.iteritems())
+    else:
+        flags = {}
+
+    try:
+        return oe.maketype.create(d.getVar(key, True) or '', var_type, **flags)
+    except (TypeError, ValueError), exc:
+        bb.msg.fatal("Data", "%s: %s" % (key, str(exc)))
diff --git a/meta/lib/oe/distro_check.py b/meta/lib/oe/distro_check.py
new file mode 100644
index 0000000..8ed5b0e
--- /dev/null
+++ b/meta/lib/oe/distro_check.py
@@ -0,0 +1,383 @@
+def get_links_from_url(url):
+    "Return all the href links found on the web location"
+
+    import urllib, sgmllib
+    
+    class LinksParser(sgmllib.SGMLParser):
+        def parse(self, s):
+            "Parse the given string 's'."
+            self.feed(s)
+            self.close()
+    
+        def __init__(self, verbose=0):
+            "Initialise an object passing 'verbose' to the superclass."
+            sgmllib.SGMLParser.__init__(self, verbose)
+            self.hyperlinks = []
+    
+        def start_a(self, attributes):
+            "Process a hyperlink and its 'attributes'."
+            for name, value in attributes:
+                if name == "href":
+                    self.hyperlinks.append(value.strip('/'))
+    
+        def get_hyperlinks(self):
+            "Return the list of hyperlinks."
+            return self.hyperlinks
+
+    sock = urllib.urlopen(url)
+    webpage = sock.read()
+    sock.close()
+
+    linksparser = LinksParser()
+    linksparser.parse(webpage)
+    return linksparser.get_hyperlinks()
+
+def find_latest_numeric_release(url):
+    "Find the latest listed numeric release on the given url"
+    max=0
+    maxstr=""
+    for link in get_links_from_url(url):
+        try:
+            release = float(link)
+        except:
+            release = 0
+        if release > max:
+            max = release
+            maxstr = link
+    return maxstr
+
+def is_src_rpm(name):
+    "Check if the link is pointing to a src.rpm file"
+    if name[-8:] == ".src.rpm":
+        return True
+    else:
+        return False
+
+def package_name_from_srpm(srpm):
+    "Strip out the package name from the src.rpm filename"
+    strings = srpm.split('-')
+    package_name = strings[0]
+    for i in range(1, len (strings) - 1):
+        str = strings[i]
+        if not str[0].isdigit():
+            package_name += '-' + str
+    return package_name
+
+def clean_package_list(package_list):
+    "Removes multiple entries of packages and sorts the list"
+    set = {}
+    map(set.__setitem__, package_list, [])
+    return set.keys()
+
+
+def get_latest_released_meego_source_package_list():
+    "Returns list of all the name os packages in the latest meego distro"
+
+    package_names = []
+    try:
+        f = open("/tmp/Meego-1.1", "r")
+        for line in f:
+            package_names.append(line[:-1] + ":" + "main") # Also strip the '\n' at the end
+    except IOError: pass
+    package_list=clean_package_list(package_names)
+    return "1.0", package_list
+
+def get_source_package_list_from_url(url, section):
+    "Return a sectioned list of package names from a URL list"
+
+    bb.note("Reading %s: %s" % (url, section))
+    links = get_links_from_url(url)
+    srpms = filter(is_src_rpm, links)
+    names_list = map(package_name_from_srpm, srpms)
+
+    new_pkgs = []
+    for pkgs in names_list:
+       new_pkgs.append(pkgs + ":" + section)
+
+    return new_pkgs
+
+def get_latest_released_fedora_source_package_list():
+    "Returns list of all the name os packages in the latest fedora distro"
+    latest = find_latest_numeric_release("http://archive.fedoraproject.org/pub/fedora/linux/releases/")
+
+    package_names = get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/releases/%s/Fedora/source/SRPMS/" % latest, "main")
+
+#    package_names += get_source_package_list_from_url("http://download.fedora.redhat.com/pub/fedora/linux/releases/%s/Everything/source/SPRMS/" % latest, "everything")
+    package_names += get_source_package_list_from_url("http://archive.fedoraproject.org/pub/fedora/linux/updates/%s/SRPMS/" % latest, "updates")
+
+    package_list=clean_package_list(package_names)
+        
+    return latest, package_list
+
+def get_latest_released_opensuse_source_package_list():
+    "Returns list of all the name os packages in the latest opensuse distro"
+    latest = find_latest_numeric_release("http://download.opensuse.org/source/distribution/")
+
+    package_names = get_source_package_list_from_url("http://download.opensuse.org/source/distribution/%s/repo/oss/suse/src/" % latest, "main")
+    package_names += get_source_package_list_from_url("http://download.opensuse.org/update/%s/rpm/src/" % latest, "updates")
+
+    package_list=clean_package_list(package_names)
+    return latest, package_list
+
+def get_latest_released_mandriva_source_package_list():
+    "Returns list of all the name os packages in the latest mandriva distro"
+    latest = find_latest_numeric_release("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/")
+    package_names = get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/release/" % latest, "main")
+#    package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/contrib/release/" % latest, "contrib")
+    package_names += get_source_package_list_from_url("http://distrib-coffee.ipsl.jussieu.fr/pub/linux/MandrivaLinux/official/%s/SRPMS/main/updates/" % latest, "updates")
+
+    package_list=clean_package_list(package_names)
+    return latest, package_list
+
+def find_latest_debian_release(url):
+    "Find the latest listed debian release on the given url"
+
+    releases = []
+    for link in get_links_from_url(url):
+        if link[:6] == "Debian":
+            if ';' not in link:
+                releases.append(link)
+    releases.sort()
+    try:
+        return releases.pop()[6:]
+    except:
+        return "_NotFound_"
+
+def get_debian_style_source_package_list(url, section):
+    "Return the list of package-names stored in the debian style Sources.gz file"
+    import urllib
+    sock = urllib.urlopen(url)
+    import tempfile
+    tmpfile = tempfile.NamedTemporaryFile(mode='wb', prefix='oecore.', suffix='.tmp', delete=False)
+    tmpfilename=tmpfile.name
+    tmpfile.write(sock.read())
+    sock.close()
+    tmpfile.close()
+    import gzip
+    bb.note("Reading %s: %s" % (url, section))
+
+    f = gzip.open(tmpfilename)
+    package_names = []
+    for line in f:
+        if line[:9] == "Package: ":
+            package_names.append(line[9:-1] + ":" + section) # Also strip the '\n' at the end
+    os.unlink(tmpfilename)
+
+    return package_names
+
+def get_latest_released_debian_source_package_list():
+    "Returns list of all the name os packages in the latest debian distro"
+    latest = find_latest_debian_release("http://ftp.debian.org/debian/dists/")
+    url = "http://ftp.debian.org/debian/dists/stable/main/source/Sources.gz" 
+    package_names = get_debian_style_source_package_list(url, "main")
+#    url = "http://ftp.debian.org/debian/dists/stable/contrib/source/Sources.gz" 
+#    package_names += get_debian_style_source_package_list(url, "contrib")
+    url = "http://ftp.debian.org/debian/dists/stable-proposed-updates/main/source/Sources.gz" 
+    package_names += get_debian_style_source_package_list(url, "updates")
+    package_list=clean_package_list(package_names)
+    return latest, package_list
+
+def find_latest_ubuntu_release(url):
+    "Find the latest listed ubuntu release on the given url"
+    url += "?C=M;O=D" # Descending Sort by Last Modified
+    for link in get_links_from_url(url):
+        if link[-8:] == "-updates":
+            return link[:-8]
+    return "_NotFound_"
+
+def get_latest_released_ubuntu_source_package_list():
+    "Returns list of all the name os packages in the latest ubuntu distro"
+    latest = find_latest_ubuntu_release("http://archive.ubuntu.com/ubuntu/dists/")
+    url = "http://archive.ubuntu.com/ubuntu/dists/%s/main/source/Sources.gz" % latest
+    package_names = get_debian_style_source_package_list(url, "main")
+#    url = "http://archive.ubuntu.com/ubuntu/dists/%s/multiverse/source/Sources.gz" % latest
+#    package_names += get_debian_style_source_package_list(url, "multiverse")
+#    url = "http://archive.ubuntu.com/ubuntu/dists/%s/universe/source/Sources.gz" % latest
+#    package_names += get_debian_style_source_package_list(url, "universe")
+    url = "http://archive.ubuntu.com/ubuntu/dists/%s-updates/main/source/Sources.gz" % latest
+    package_names += get_debian_style_source_package_list(url, "updates")
+    package_list=clean_package_list(package_names)
+    return latest, package_list
+
+def create_distro_packages_list(distro_check_dir):
+    pkglst_dir = os.path.join(distro_check_dir, "package_lists")
+    if not os.path.isdir (pkglst_dir):
+        os.makedirs(pkglst_dir)
+    # first clear old stuff
+    for file in os.listdir(pkglst_dir):
+        os.unlink(os.path.join(pkglst_dir, file))
+ 
+    per_distro_functions = [
+                            ["Debian", get_latest_released_debian_source_package_list],
+                            ["Ubuntu", get_latest_released_ubuntu_source_package_list],
+                            ["Fedora", get_latest_released_fedora_source_package_list],
+                            ["OpenSuSE", get_latest_released_opensuse_source_package_list],
+                            ["Mandriva", get_latest_released_mandriva_source_package_list],
+                            ["Meego", get_latest_released_meego_source_package_list]
+                           ]
+ 
+    from datetime import datetime
+    begin = datetime.now()
+    for distro in per_distro_functions:
+        name = distro[0]
+        release, package_list = distro[1]()
+        bb.note("Distro: %s, Latest Release: %s, # src packages: %d" % (name, release, len(package_list)))
+        package_list_file = os.path.join(pkglst_dir, name + "-" + release)
+        f = open(package_list_file, "w+b")
+        for pkg in package_list:
+            f.write(pkg + "\n")
+        f.close()
+    end = datetime.now()
+    delta = end - begin
+    bb.note("package_list generatiosn took this much time: %d seconds" % delta.seconds)
+
+def update_distro_data(distro_check_dir, datetime):
+    """
+        If distro packages list data is old then rebuild it.
+        The operations has to be protected by a lock so that
+        only one thread performes it at a time.
+    """
+    if not os.path.isdir (distro_check_dir):
+        try:
+            bb.note ("Making new directory: %s" % distro_check_dir)
+            os.makedirs (distro_check_dir)
+        except OSError:
+            raise Exception('Unable to create directory %s' % (distro_check_dir))
+
+
+    datetime_file = os.path.join(distro_check_dir, "build_datetime")
+    saved_datetime = "_invalid_"
+    import fcntl
+    try:
+        if not os.path.exists(datetime_file):
+            open(datetime_file, 'w+b').close() # touch the file so that the next open won't fail
+
+        f = open(datetime_file, "r+b")
+        fcntl.lockf(f, fcntl.LOCK_EX)
+        saved_datetime = f.read()
+        if saved_datetime[0:8] != datetime[0:8]:
+            bb.note("The build datetime did not match: saved:%s current:%s" % (saved_datetime, datetime))
+            bb.note("Regenerating distro package lists")
+            create_distro_packages_list(distro_check_dir)
+            f.seek(0)
+            f.write(datetime)
+
+    except OSError:
+        raise Exception('Unable to read/write this file: %s' % (datetime_file))
+    finally:
+        fcntl.lockf(f, fcntl.LOCK_UN)
+        f.close()
+ 
+def compare_in_distro_packages_list(distro_check_dir, d):
+    if not os.path.isdir(distro_check_dir):
+        raise Exception("compare_in_distro_packages_list: invalid distro_check_dir passed")
+        
+    localdata = bb.data.createCopy(d)
+    pkglst_dir = os.path.join(distro_check_dir, "package_lists")
+    matching_distros = []
+    pn = d.getVar('PN', True)
+    recipe_name = d.getVar('PN', True)
+    bb.note("Checking: %s" % pn)
+
+    trim_dict = dict({"-native":"-native", "-cross":"-cross", "-initial":"-initial"})
+
+    if pn.find("-native") != -1:
+        pnstripped = pn.split("-native")
+        localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+        bb.data.update_data(localdata)
+        recipe_name = pnstripped[0]
+
+    if pn.startswith("nativesdk-"):
+        pnstripped = pn.split("nativesdk-")
+        localdata.setVar('OVERRIDES', "pn-" + pnstripped[1] + ":" + d.getVar('OVERRIDES', True))
+        bb.data.update_data(localdata)
+        recipe_name = pnstripped[1]
+
+    if pn.find("-cross") != -1:
+        pnstripped = pn.split("-cross")
+        localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+        bb.data.update_data(localdata)
+        recipe_name = pnstripped[0]
+
+    if pn.find("-initial") != -1:
+        pnstripped = pn.split("-initial")
+        localdata.setVar('OVERRIDES', "pn-" + pnstripped[0] + ":" + d.getVar('OVERRIDES', True))
+        bb.data.update_data(localdata)
+        recipe_name = pnstripped[0]
+
+    bb.note("Recipe: %s" % recipe_name)
+    tmp = localdata.getVar('DISTRO_PN_ALIAS', True)
+
+    distro_exceptions = dict({"OE-Core":'OE-Core', "OpenedHand":'OpenedHand', "Intel":'Intel', "Upstream":'Upstream', "Windriver":'Windriver', "OSPDT":'OSPDT Approved', "Poky":'poky'})
+
+    if tmp:
+        list = tmp.split(' ')
+        for str in list:
+            if str and str.find("=") == -1 and distro_exceptions[str]:
+                matching_distros.append(str)
+
+    distro_pn_aliases = {}
+    if tmp:
+        list = tmp.split(' ')
+        for str in list:
+            if str.find("=") != -1:
+                (dist, pn_alias) = str.split('=')
+                distro_pn_aliases[dist.strip().lower()] = pn_alias.strip()
+ 
+    for file in os.listdir(pkglst_dir):
+        (distro, distro_release) = file.split("-")
+        f = open(os.path.join(pkglst_dir, file), "rb")
+        for line in f:
+            (pkg, section) = line.split(":")
+            if distro.lower() in distro_pn_aliases:
+                pn = distro_pn_aliases[distro.lower()]
+            else:
+                pn = recipe_name
+            if pn == pkg:
+                matching_distros.append(distro + "-" + section[:-1]) # strip the \n at the end
+                f.close()
+                break
+        f.close()
+
+    
+    if tmp != None:
+	list = tmp.split(' ')
+	for item in list:
+            matching_distros.append(item)
+    bb.note("Matching: %s" % matching_distros)
+    return matching_distros
+
+def create_log_file(d, logname):
+    import subprocess
+    logpath = d.getVar('LOG_DIR', True)
+    bb.utils.mkdirhier(logpath)
+    logfn, logsuffix = os.path.splitext(logname)
+    logfile = os.path.join(logpath, "%s.%s%s" % (logfn, d.getVar('DATETIME', True), logsuffix))
+    if not os.path.exists(logfile):
+            slogfile = os.path.join(logpath, logname)
+            if os.path.exists(slogfile):
+                    os.remove(slogfile)
+            subprocess.call("touch %s" % logfile, shell=True)
+            os.symlink(logfile, slogfile)
+            d.setVar('LOG_FILE', logfile)
+    return logfile
+
+
+def save_distro_check_result(result, datetime, result_file, d):
+    pn = d.getVar('PN', True)
+    logdir = d.getVar('LOG_DIR', True)
+    if not logdir:
+        bb.error("LOG_DIR variable is not defined, can't write the distro_check results")
+        return
+    if not os.path.isdir(logdir):
+        os.makedirs(logdir)
+    line = pn
+    for i in result:
+        line = line + "," + i
+    f = open(result_file, "a")
+    import fcntl
+    fcntl.lockf(f, fcntl.LOCK_EX)
+    f.seek(0, os.SEEK_END) # seek to the end of file
+    f.write(line + "\n")
+    fcntl.lockf(f, fcntl.LOCK_UN)
+    f.close()
diff --git a/meta/lib/oe/image.py b/meta/lib/oe/image.py
new file mode 100644
index 0000000..2361955
--- /dev/null
+++ b/meta/lib/oe/image.py
@@ -0,0 +1,386 @@
+from oe.utils import execute_pre_post_process
+import os
+import subprocess
+import multiprocessing
+
+
+def generate_image(arg):
+    (type, subimages, create_img_cmd) = arg
+
+    bb.note("Running image creation script for %s: %s ..." %
+            (type, create_img_cmd))
+
+    try:
+        output = subprocess.check_output(create_img_cmd,
+                                         stderr=subprocess.STDOUT)
+    except subprocess.CalledProcessError as e:
+        return("Error: The image creation script '%s' returned %d:\n%s" %
+               (e.cmd, e.returncode, e.output))
+
+    bb.note("Script output:\n%s" % output)
+
+    return None
+
+
+"""
+This class will help compute IMAGE_FSTYPE dependencies and group them in batches
+that can be executed in parallel.
+
+The next example is for illustration purposes, highly unlikely to happen in real life.
+It's just one of the test cases I used to test the algorithm:
+
+For:
+IMAGE_FSTYPES = "i1 i2 i3 i4 i5"
+IMAGE_TYPEDEP_i4 = "i2"
+IMAGE_TYPEDEP_i5 = "i6 i4"
+IMAGE_TYPEDEP_i6 = "i7"
+IMAGE_TYPEDEP_i7 = "i2"
+
+We get the following list of batches that can be executed in parallel, having the
+dependencies satisfied:
+
+[['i1', 'i3', 'i2'], ['i4', 'i7'], ['i6'], ['i5']]
+"""
+class ImageDepGraph(object):
+    def __init__(self, d):
+        self.d = d
+        self.graph = dict()
+        self.deps_array = dict()
+
+    def _construct_dep_graph(self, image_fstypes):
+        graph = dict()
+
+        def add_node(node):
+            base_type = self._image_base_type(node)
+            deps = (self.d.getVar('IMAGE_TYPEDEP_' + node, True) or "")
+            base_deps = (self.d.getVar('IMAGE_TYPEDEP_' + base_type, True) or "")
+            if deps != "" or base_deps != "":
+                graph[node] = deps
+
+                for dep in deps.split() + base_deps.split():
+                    if not dep in graph:
+                        add_node(dep)
+            else:
+                graph[node] = ""
+
+        for fstype in image_fstypes:
+            add_node(fstype)
+
+        return graph
+
+    def _clean_graph(self):
+        # Live and VMDK/VDI images will be processed via inheriting
+        # bbclass and does not get processed here. Remove them from the fstypes
+        # graph. Their dependencies are already added, so no worries here.
+        remove_list = (self.d.getVar('IMAGE_TYPES_MASKED', True) or "").split()
+
+        for item in remove_list:
+            self.graph.pop(item, None)
+
+    def _image_base_type(self, type):
+        ctypes = self.d.getVar('COMPRESSIONTYPES', True).split()
+        if type in ["vmdk", "vdi", "qcow2", "live", "iso", "hddimg"]:
+            type = "ext4"
+        basetype = type
+        for ctype in ctypes:
+            if type.endswith("." + ctype):
+                basetype = type[:-len("." + ctype)]
+                break
+
+        return basetype
+
+    def _compute_dependencies(self):
+        """
+        returns dict object of nodes with [no_of_depends_on, no_of_depended_by]
+        for each node
+        """
+        deps_array = dict()
+        for node in self.graph:
+            deps_array[node] = [0, 0]
+
+        for node in self.graph:
+            deps = self.graph[node].split()
+            deps_array[node][0] += len(deps)
+            for dep in deps:
+                deps_array[dep][1] += 1
+
+        return deps_array
+
+    def _sort_graph(self):
+        sorted_list = []
+        group = []
+        for node in self.graph:
+            if node not in self.deps_array:
+                continue
+
+            depends_on = self.deps_array[node][0]
+
+            if depends_on == 0:
+                group.append(node)
+
+        if len(group) == 0 and len(self.deps_array) != 0:
+            bb.fatal("possible fstype circular dependency...")
+
+        sorted_list.append(group)
+
+        # remove added nodes from deps_array
+        for item in group:
+            for node in self.graph:
+                if item in self.graph[node].split():
+                    self.deps_array[node][0] -= 1
+
+            self.deps_array.pop(item, None)
+
+        if len(self.deps_array):
+            # recursive call, to find the next group
+            sorted_list += self._sort_graph()
+
+        return sorted_list
+
+    def group_fstypes(self, image_fstypes):
+        self.graph = self._construct_dep_graph(image_fstypes)
+
+        self._clean_graph()
+
+        self.deps_array = self._compute_dependencies()
+
+        alltypes = [node for node in self.graph]
+
+        return (alltypes, self._sort_graph())
+
+
+class Image(ImageDepGraph):
+    def __init__(self, d):
+        self.d = d
+
+        super(Image, self).__init__(d)
+
+    def _get_rootfs_size(self):
+        """compute the rootfs size"""
+        rootfs_alignment = int(self.d.getVar('IMAGE_ROOTFS_ALIGNMENT', True))
+        overhead_factor = float(self.d.getVar('IMAGE_OVERHEAD_FACTOR', True))
+        rootfs_req_size = int(self.d.getVar('IMAGE_ROOTFS_SIZE', True))
+        rootfs_extra_space = eval(self.d.getVar('IMAGE_ROOTFS_EXTRA_SPACE', True))
+        rootfs_maxsize = self.d.getVar('IMAGE_ROOTFS_MAXSIZE', True)
+
+        output = subprocess.check_output(['du', '-ks',
+                                          self.d.getVar('IMAGE_ROOTFS', True)])
+        size_kb = int(output.split()[0])
+        base_size = size_kb * overhead_factor
+        base_size = (base_size, rootfs_req_size)[base_size < rootfs_req_size] + \
+            rootfs_extra_space
+
+        if base_size != int(base_size):
+            base_size = int(base_size + 1)
+
+        base_size += rootfs_alignment - 1
+        base_size -= base_size % rootfs_alignment
+
+        # Check the rootfs size against IMAGE_ROOTFS_MAXSIZE (if set)
+        if rootfs_maxsize:
+            rootfs_maxsize_int = int(rootfs_maxsize)
+            if base_size > rootfs_maxsize_int:
+                bb.fatal("The rootfs size %d(K) overrides the max size %d(K)" % \
+                    (base_size, rootfs_maxsize_int))
+
+        return base_size
+
+    def _create_symlinks(self, subimages):
+        """create symlinks to the newly created image"""
+        deploy_dir = self.d.getVar('DEPLOY_DIR_IMAGE', True)
+        img_name = self.d.getVar('IMAGE_NAME', True)
+        link_name = self.d.getVar('IMAGE_LINK_NAME', True)
+        manifest_name = self.d.getVar('IMAGE_MANIFEST', True)
+
+        os.chdir(deploy_dir)
+
+        if link_name:
+            for type in subimages:
+                if os.path.exists(img_name + ".rootfs." + type):
+                    dst = link_name + "." + type
+                    src = img_name + ".rootfs." + type
+                    bb.note("Creating symlink: %s -> %s" % (dst, src))
+                    os.symlink(src, dst)
+
+            if manifest_name is not None and \
+                    os.path.exists(manifest_name) and \
+                    not os.path.exists(link_name + ".manifest"):
+                os.symlink(os.path.basename(manifest_name),
+                           link_name + ".manifest")
+
+    def _remove_old_symlinks(self):
+        """remove the symlinks to old binaries"""
+
+        if self.d.getVar('IMAGE_LINK_NAME', True):
+            deploy_dir = self.d.getVar('DEPLOY_DIR_IMAGE', True)
+            for img in os.listdir(deploy_dir):
+                if img.find(self.d.getVar('IMAGE_LINK_NAME', True)) == 0:
+                    img = os.path.join(deploy_dir, img)
+                    if os.path.islink(img):
+                        if self.d.getVar('RM_OLD_IMAGE', True) == "1" and \
+                                os.path.exists(os.path.realpath(img)):
+                            os.remove(os.path.realpath(img))
+
+                        os.remove(img)
+
+    """
+    This function will just filter out the compressed image types from the
+    fstype groups returning a (filtered_fstype_groups, cimages) tuple.
+    """
+    def _filter_out_commpressed(self, fstype_groups):
+        ctypes = self.d.getVar('COMPRESSIONTYPES', True).split()
+        cimages = {}
+
+        filtered_groups = []
+        for group in fstype_groups:
+            filtered_group = []
+            for type in group:
+                basetype = None
+                for ctype in ctypes:
+                    if type.endswith("." + ctype):
+                        basetype = type[:-len("." + ctype)]
+                        if basetype not in filtered_group:
+                            filtered_group.append(basetype)
+                        if basetype not in cimages:
+                            cimages[basetype] = []
+                        if ctype not in cimages[basetype]:
+                            cimages[basetype].append(ctype)
+                        break
+                if not basetype and type not in filtered_group:
+                    filtered_group.append(type)
+
+            filtered_groups.append(filtered_group)
+
+        return (filtered_groups, cimages)
+
+    def _get_image_types(self):
+        """returns a (types, cimages) tuple"""
+
+        alltypes, fstype_groups = self.group_fstypes(self.d.getVar('IMAGE_FSTYPES', True).split())
+
+        filtered_groups, cimages = self._filter_out_commpressed(fstype_groups)
+
+        return (alltypes, filtered_groups, cimages)
+
+    def _write_script(self, type, cmds):
+        tempdir = self.d.getVar('T', True)
+        script_name = os.path.join(tempdir, "create_image." + type)
+        rootfs_size = self._get_rootfs_size()
+
+        self.d.setVar('img_creation_func', '\n'.join(cmds))
+        self.d.setVarFlag('img_creation_func', 'func', 1)
+        self.d.setVarFlag('img_creation_func', 'fakeroot', 1)
+        self.d.setVar('ROOTFS_SIZE', str(rootfs_size))
+
+        with open(script_name, "w+") as script:
+            script.write("%s" % bb.build.shell_trap_code())
+            script.write("export ROOTFS_SIZE=%d\n" % rootfs_size)
+            bb.data.emit_func('img_creation_func', script, self.d)
+            script.write("img_creation_func\n")
+
+        os.chmod(script_name, 0775)
+
+        return script_name
+
+    def _get_imagecmds(self):
+        old_overrides = self.d.getVar('OVERRIDES', 0)
+
+        alltypes, fstype_groups, cimages = self._get_image_types()
+
+        image_cmd_groups = []
+
+        bb.note("The image creation groups are: %s" % str(fstype_groups))
+        for fstype_group in fstype_groups:
+            image_cmds = []
+            for type in fstype_group:
+                cmds = []
+                subimages = []
+
+                localdata = bb.data.createCopy(self.d)
+                localdata.setVar('OVERRIDES', '%s:%s' % (type, old_overrides))
+                bb.data.update_data(localdata)
+                localdata.setVar('type', type)
+
+                image_cmd = localdata.getVar("IMAGE_CMD", True)
+                if image_cmd:
+                    cmds.append("\t" + image_cmd)
+                else:
+                    bb.fatal("No IMAGE_CMD defined for IMAGE_FSTYPES entry '%s' - possibly invalid type name or missing support class" % type)
+                cmds.append(localdata.expand("\tcd ${DEPLOY_DIR_IMAGE}"))
+
+                if type in cimages:
+                    for ctype in cimages[type]:
+                        cmds.append("\t" + localdata.getVar("COMPRESS_CMD_" + ctype, True))
+                        subimages.append(type + "." + ctype)
+
+                if type not in alltypes:
+                    cmds.append(localdata.expand("\trm ${IMAGE_NAME}.rootfs.${type}"))
+                else:
+                    subimages.append(type)
+
+                script_name = self._write_script(type, cmds)
+
+                image_cmds.append((type, subimages, script_name))
+
+            image_cmd_groups.append(image_cmds)
+
+        return image_cmd_groups
+
+    def _write_wic_env(self):
+        """
+        Write environment variables used by wic
+        to tmp/sysroots/<machine>/imgdata/<image>.env
+        """
+        stdir = self.d.getVar('STAGING_DIR_TARGET', True)
+        outdir = os.path.join(stdir, 'imgdata')
+        if not os.path.exists(outdir):
+            os.makedirs(outdir)
+        basename = self.d.getVar('IMAGE_BASENAME', True)
+        with open(os.path.join(outdir, basename) + '.env', 'w') as envf:
+            for var in self.d.getVar('WICVARS', True).split():
+                value = self.d.getVar(var, True)
+                if value:
+                    envf.write('%s="%s"\n' % (var, value.strip()))
+
+    def create(self):
+        bb.note("###### Generate images #######")
+        pre_process_cmds = self.d.getVar("IMAGE_PREPROCESS_COMMAND", True)
+        post_process_cmds = self.d.getVar("IMAGE_POSTPROCESS_COMMAND", True)
+
+        execute_pre_post_process(self.d, pre_process_cmds)
+
+        self._remove_old_symlinks()
+
+        image_cmd_groups = self._get_imagecmds()
+
+        self._write_wic_env()
+
+        for image_cmds in image_cmd_groups:
+            # create the images in parallel
+            nproc = multiprocessing.cpu_count()
+            pool = bb.utils.multiprocessingpool(nproc)
+            results = list(pool.imap(generate_image, image_cmds))
+            pool.close()
+            pool.join()
+
+            for result in results:
+                if result is not None:
+                    bb.fatal(result)
+
+            for image_type, subimages, script in image_cmds:
+                bb.note("Creating symlinks for %s image ..." % image_type)
+                self._create_symlinks(subimages)
+
+        execute_pre_post_process(self.d, post_process_cmds)
+
+
+def create_image(d):
+    Image(d).create()
+
+if __name__ == "__main__":
+    """
+    Image creation can be called independent from bitbake environment.
+    """
+    """
+    TBD
+    """
diff --git a/meta/lib/oe/license.py b/meta/lib/oe/license.py
new file mode 100644
index 0000000..f0f661c
--- /dev/null
+++ b/meta/lib/oe/license.py
@@ -0,0 +1,217 @@
+# vi:sts=4:sw=4:et
+"""Code for parsing OpenEmbedded license strings"""
+
+import ast
+import re
+from fnmatch import fnmatchcase as fnmatch
+
+def license_ok(license, dont_want_licenses):
+    """ Return False if License exist in dont_want_licenses else True """
+    for dwl in dont_want_licenses:
+        # If you want to exclude license named generically 'X', we
+        # surely want to exclude 'X+' as well.  In consequence, we
+        # will exclude a trailing '+' character from LICENSE in
+        # case INCOMPATIBLE_LICENSE is not a 'X+' license.
+        lic = license
+        if not re.search('\+$', dwl):
+            lic = re.sub('\+', '', license)
+        if fnmatch(lic, dwl):
+            return False
+    return True
+
+class LicenseError(Exception):
+    pass
+
+class LicenseSyntaxError(LicenseError):
+    def __init__(self, licensestr, exc):
+        self.licensestr = licensestr
+        self.exc = exc
+        LicenseError.__init__(self)
+
+    def __str__(self):
+        return "error in '%s': %s" % (self.licensestr, self.exc)
+
+class InvalidLicense(LicenseError):
+    def __init__(self, license):
+        self.license = license
+        LicenseError.__init__(self)
+
+    def __str__(self):
+        return "invalid characters in license '%s'" % self.license
+
+license_operator_chars = '&|() '
+license_operator = re.compile('([' + license_operator_chars + '])')
+license_pattern = re.compile('[a-zA-Z0-9.+_\-]+$')
+
+class LicenseVisitor(ast.NodeVisitor):
+    """Get elements based on OpenEmbedded license strings"""
+    def get_elements(self, licensestr):
+        new_elements = []
+        elements = filter(lambda x: x.strip(), license_operator.split(licensestr))
+        for pos, element in enumerate(elements):
+            if license_pattern.match(element):
+                if pos > 0 and license_pattern.match(elements[pos-1]):
+                    new_elements.append('&')
+                element = '"' + element + '"'
+            elif not license_operator.match(element):
+                raise InvalidLicense(element)
+            new_elements.append(element)
+
+        return new_elements
+
+    """Syntax tree visitor which can accept elements previously generated with
+    OpenEmbedded license string"""
+    def visit_elements(self, elements):
+        self.visit(ast.parse(' '.join(elements)))
+
+    """Syntax tree visitor which can accept OpenEmbedded license strings"""
+    def visit_string(self, licensestr):
+        self.visit_elements(self.get_elements(licensestr))
+
+class FlattenVisitor(LicenseVisitor):
+    """Flatten a license tree (parsed from a string) by selecting one of each
+    set of OR options, in the way the user specifies"""
+    def __init__(self, choose_licenses):
+        self.choose_licenses = choose_licenses
+        self.licenses = []
+        LicenseVisitor.__init__(self)
+
+    def visit_Str(self, node):
+        self.licenses.append(node.s)
+
+    def visit_BinOp(self, node):
+        if isinstance(node.op, ast.BitOr):
+            left = FlattenVisitor(self.choose_licenses)
+            left.visit(node.left)
+
+            right = FlattenVisitor(self.choose_licenses)
+            right.visit(node.right)
+
+            selected = self.choose_licenses(left.licenses, right.licenses)
+            self.licenses.extend(selected)
+        else:
+            self.generic_visit(node)
+
+def flattened_licenses(licensestr, choose_licenses):
+    """Given a license string and choose_licenses function, return a flat list of licenses"""
+    flatten = FlattenVisitor(choose_licenses)
+    try:
+        flatten.visit_string(licensestr)
+    except SyntaxError as exc:
+        raise LicenseSyntaxError(licensestr, exc)
+    return flatten.licenses
+
+def is_included(licensestr, whitelist=None, blacklist=None):
+    """Given a license string and whitelist and blacklist, determine if the
+    license string matches the whitelist and does not match the blacklist.
+
+    Returns a tuple holding the boolean state and a list of the applicable
+    licenses which were excluded (or None, if the state is True)
+    """
+
+    def include_license(license):
+        return any(fnmatch(license, pattern) for pattern in whitelist)
+
+    def exclude_license(license):
+        return any(fnmatch(license, pattern) for pattern in blacklist)
+
+    def choose_licenses(alpha, beta):
+        """Select the option in an OR which is the 'best' (has the most
+        included licenses)."""
+        alpha_weight = len(filter(include_license, alpha))
+        beta_weight = len(filter(include_license, beta))
+        if alpha_weight > beta_weight:
+            return alpha
+        else:
+            return beta
+
+    if not whitelist:
+        whitelist = ['*']
+
+    if not blacklist:
+        blacklist = []
+
+    licenses = flattened_licenses(licensestr, choose_licenses)
+    excluded = filter(lambda lic: exclude_license(lic), licenses)
+    included = filter(lambda lic: include_license(lic), licenses)
+    if excluded:
+        return False, excluded
+    else:
+        return True, included
+
+class ManifestVisitor(LicenseVisitor):
+    """Walk license tree (parsed from a string) removing the incompatible
+    licenses specified"""
+    def __init__(self, dont_want_licenses, canonical_license, d):
+        self._dont_want_licenses = dont_want_licenses
+        self._canonical_license = canonical_license
+        self._d = d
+        self._operators = []
+
+        self.licenses = []
+        self.licensestr = ''
+
+        LicenseVisitor.__init__(self)
+
+    def visit(self, node):
+        if isinstance(node, ast.Str):
+            lic = node.s
+
+            if license_ok(self._canonical_license(self._d, lic),
+                    self._dont_want_licenses) == True:
+                if self._operators:
+                    ops = []
+                    for op in self._operators:
+                        if op == '[':
+                            ops.append(op)
+                        elif op == ']':
+                            ops.append(op)
+                        else:
+                            if not ops:
+                                ops.append(op)
+                            elif ops[-1] in ['[', ']']:
+                                ops.append(op)
+                            else:
+                                ops[-1] = op 
+
+                    for op in ops:
+                        if op == '[' or op == ']':
+                            self.licensestr += op
+                        elif self.licenses:
+                            self.licensestr += ' ' + op + ' '
+
+                    self._operators = []
+
+                self.licensestr += lic
+                self.licenses.append(lic)
+        elif isinstance(node, ast.BitAnd):
+            self._operators.append("&")
+        elif isinstance(node, ast.BitOr):
+            self._operators.append("|")
+        elif isinstance(node, ast.List):
+            self._operators.append("[")
+        elif isinstance(node, ast.Load):
+            self.licensestr += "]"
+
+        self.generic_visit(node)
+
+def manifest_licenses(licensestr, dont_want_licenses, canonical_license, d):
+    """Given a license string and dont_want_licenses list,
+       return license string filtered and a list of licenses"""
+    manifest = ManifestVisitor(dont_want_licenses, canonical_license, d)
+
+    try:
+        elements = manifest.get_elements(licensestr)
+
+        # Replace '()' to '[]' for handle in ast as List and Load types.
+        elements = ['[' if e == '(' else e for e in elements]
+        elements = [']' if e == ')' else e for e in elements]
+
+        manifest.visit_elements(elements)
+    except SyntaxError as exc:
+        raise LicenseSyntaxError(licensestr, exc)
+
+    # Replace '[]' to '()' for output correct license.
+    manifest.licensestr = manifest.licensestr.replace('[', '(').replace(']', ')')
+
+    return (manifest.licensestr, manifest.licenses)
diff --git a/meta/lib/oe/lsb.py b/meta/lib/oe/lsb.py
new file mode 100644
index 0000000..ddfe71b
--- /dev/null
+++ b/meta/lib/oe/lsb.py
@@ -0,0 +1,83 @@
+def release_dict():
+    """Return the output of lsb_release -ir as a dictionary"""
+    from subprocess import PIPE
+
+    try:
+        output, err = bb.process.run(['lsb_release', '-ir'], stderr=PIPE)
+    except bb.process.CmdError as exc:
+        return None
+
+    data = {}
+    for line in output.splitlines():
+        if line.startswith("-e"): line = line[3:]
+        try:
+            key, value = line.split(":\t", 1)
+        except ValueError:
+            continue
+        else:
+            data[key] = value
+    return data
+
+def release_dict_file():
+    """ Try to gather LSB release information manually when lsb_release tool is unavailable """
+    data = None
+    try:
+        if os.path.exists('/etc/lsb-release'):
+            data = {}
+            with open('/etc/lsb-release') as f:
+                for line in f:
+                    key, value = line.split("=", 1)
+                    data[key] = value.strip()
+        elif os.path.exists('/etc/redhat-release'):
+            data = {}
+            with open('/etc/redhat-release') as f:
+                distro = f.readline().strip()
+            import re
+            match = re.match(r'(.*) release (.*) \((.*)\)', distro)
+            if match:
+                data['DISTRIB_ID'] = match.group(1)
+                data['DISTRIB_RELEASE'] = match.group(2)
+        elif os.path.exists('/etc/os-release'):
+            data = {}
+            with open('/etc/os-release') as f:
+                for line in f:
+                    if line.startswith('NAME='):
+                        data['DISTRIB_ID'] = line[5:].rstrip().strip('"')
+                    if line.startswith('VERSION_ID='):
+                        data['DISTRIB_RELEASE'] = line[11:].rstrip().strip('"')
+        elif os.path.exists('/etc/SuSE-release'):
+            data = {}
+            data['DISTRIB_ID'] = 'SUSE LINUX'
+            with open('/etc/SuSE-release') as f:
+                for line in f:
+                    if line.startswith('VERSION = '):
+                        data['DISTRIB_RELEASE'] = line[10:].rstrip()
+                        break
+
+    except IOError:
+        return None
+    return data
+
+def distro_identifier(adjust_hook=None):
+    """Return a distro identifier string based upon lsb_release -ri,
+       with optional adjustment via a hook"""
+
+    lsb_data = release_dict()
+    if lsb_data:
+        distro_id, release = lsb_data['Distributor ID'], lsb_data['Release']
+    else:
+        lsb_data_file = release_dict_file()
+        if lsb_data_file:
+            distro_id, release = lsb_data_file['DISTRIB_ID'], lsb_data_file.get('DISTRIB_RELEASE', None)
+        else:
+            distro_id, release = None, None
+
+    if adjust_hook:
+        distro_id, release = adjust_hook(distro_id, release)
+    if not distro_id:
+        return "Unknown"
+    if release:
+        id_str = '{0}-{1}'.format(distro_id, release)
+    else:
+        id_str = distro_id
+    return id_str.replace(' ','-').replace('/','-')
diff --git a/meta/lib/oe/maketype.py b/meta/lib/oe/maketype.py
new file mode 100644
index 0000000..139f333
--- /dev/null
+++ b/meta/lib/oe/maketype.py
@@ -0,0 +1,99 @@
+"""OpenEmbedded variable typing support
+
+Types are defined in the metadata by name, using the 'type' flag on a
+variable.  Other flags may be utilized in the construction of the types.  See
+the arguments of the type's factory for details.
+"""
+
+import inspect
+import types
+
+available_types = {}
+
+class MissingFlag(TypeError):
+    """A particular flag is required to construct the type, but has not been
+    provided."""
+    def __init__(self, flag, type):
+        self.flag = flag
+        self.type = type
+        TypeError.__init__(self)
+
+    def __str__(self):
+        return "Type '%s' requires flag '%s'" % (self.type, self.flag)
+
+def factory(var_type):
+    """Return the factory for a specified type."""
+    if var_type is None:
+        raise TypeError("No type specified. Valid types: %s" %
+                        ', '.join(available_types))
+    try:
+        return available_types[var_type]
+    except KeyError:
+        raise TypeError("Invalid type '%s':\n  Valid types: %s" %
+                        (var_type, ', '.join(available_types)))
+
+def create(value, var_type, **flags):
+    """Create an object of the specified type, given the specified flags and
+    string value."""
+    obj = factory(var_type)
+    objflags = {}
+    for flag in obj.flags:
+        if flag not in flags:
+            if flag not in obj.optflags:
+                raise MissingFlag(flag, var_type)
+        else:
+            objflags[flag] = flags[flag]
+
+    return obj(value, **objflags)
+
+def get_callable_args(obj):
+    """Grab all but the first argument of the specified callable, returning
+    the list, as well as a list of which of the arguments have default
+    values."""
+    if type(obj) is type:
+        obj = obj.__init__
+
+    args, varargs, keywords, defaults = inspect.getargspec(obj)
+    flaglist = []
+    if args:
+        if len(args) > 1 and args[0] == 'self':
+            args = args[1:]
+        flaglist.extend(args)
+
+    optional = set()
+    if defaults:
+        optional |= set(flaglist[-len(defaults):])
+    return flaglist, optional
+
+def factory_setup(name, obj):
+    """Prepare a factory for use."""
+    args, optional = get_callable_args(obj)
+    extra_args = args[1:]
+    if extra_args:
+        obj.flags, optional = extra_args, optional
+        obj.optflags = set(optional)
+    else:
+        obj.flags = obj.optflags = ()
+
+    if not hasattr(obj, 'name'):
+        obj.name = name
+
+def register(name, factory):
+    """Register a type, given its name and a factory callable.
+
+    Determines the required and optional flags from the factory's
+    arguments."""
+    factory_setup(name, factory)
+    available_types[factory.name] = factory
+
+
+# Register all our included types
+for name in dir(types):
+    if name.startswith('_'):
+        continue
+
+    obj = getattr(types, name)
+    if not callable(obj):
+        continue
+
+    register(name, obj)
diff --git a/meta/lib/oe/manifest.py b/meta/lib/oe/manifest.py
new file mode 100644
index 0000000..42832f1
--- /dev/null
+++ b/meta/lib/oe/manifest.py
@@ -0,0 +1,345 @@
+from abc import ABCMeta, abstractmethod
+import os
+import re
+import bb
+
+
+class Manifest(object):
+    """
+    This is an abstract class. Do not instantiate this directly.
+    """
+    __metaclass__ = ABCMeta
+
+    PKG_TYPE_MUST_INSTALL = "mip"
+    PKG_TYPE_MULTILIB = "mlp"
+    PKG_TYPE_LANGUAGE = "lgp"
+    PKG_TYPE_ATTEMPT_ONLY = "aop"
+
+    MANIFEST_TYPE_IMAGE = "image"
+    MANIFEST_TYPE_SDK_HOST = "sdk_host"
+    MANIFEST_TYPE_SDK_TARGET = "sdk_target"
+
+    var_maps = {
+        MANIFEST_TYPE_IMAGE: {
+            "PACKAGE_INSTALL": PKG_TYPE_MUST_INSTALL,
+            "PACKAGE_INSTALL_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY,
+            "LINGUAS_INSTALL": PKG_TYPE_LANGUAGE
+        },
+        MANIFEST_TYPE_SDK_HOST: {
+            "TOOLCHAIN_HOST_TASK": PKG_TYPE_MUST_INSTALL,
+            "TOOLCHAIN_HOST_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY
+        },
+        MANIFEST_TYPE_SDK_TARGET: {
+            "TOOLCHAIN_TARGET_TASK": PKG_TYPE_MUST_INSTALL,
+            "TOOLCHAIN_TARGET_TASK_ATTEMPTONLY": PKG_TYPE_ATTEMPT_ONLY
+        }
+    }
+
+    INSTALL_ORDER = [
+        PKG_TYPE_LANGUAGE,
+        PKG_TYPE_MUST_INSTALL,
+        PKG_TYPE_ATTEMPT_ONLY,
+        PKG_TYPE_MULTILIB
+    ]
+
+    initial_manifest_file_header = \
+        "# This file was generated automatically and contains the packages\n" \
+        "# passed on to the package manager in order to create the rootfs.\n\n" \
+        "# Format:\n" \
+        "#  <package_type>,<package_name>\n" \
+        "# where:\n" \
+        "#   <package_type> can be:\n" \
+        "#      'mip' = must install package\n" \
+        "#      'aop' = attempt only package\n" \
+        "#      'mlp' = multilib package\n" \
+        "#      'lgp' = language package\n\n"
+
+    def __init__(self, d, manifest_dir=None, manifest_type=MANIFEST_TYPE_IMAGE):
+        self.d = d
+        self.manifest_type = manifest_type
+
+        if manifest_dir is None:
+            if manifest_type != self.MANIFEST_TYPE_IMAGE:
+                self.manifest_dir = self.d.getVar('SDK_DIR', True)
+            else:
+                self.manifest_dir = self.d.getVar('WORKDIR', True)
+        else:
+            self.manifest_dir = manifest_dir
+
+        bb.utils.mkdirhier(self.manifest_dir)
+
+        self.initial_manifest = os.path.join(self.manifest_dir, "%s_initial_manifest" % manifest_type)
+        self.final_manifest = os.path.join(self.manifest_dir, "%s_final_manifest" % manifest_type)
+        self.full_manifest = os.path.join(self.manifest_dir, "%s_full_manifest" % manifest_type)
+
+        # packages in the following vars will be split in 'must install' and
+        # 'multilib'
+        self.vars_to_split = ["PACKAGE_INSTALL",
+                              "TOOLCHAIN_HOST_TASK",
+                              "TOOLCHAIN_TARGET_TASK"]
+
+    """
+    This creates a standard initial manifest for core-image-(minimal|sato|sato-sdk).
+    This will be used for testing until the class is implemented properly!
+    """
+    def _create_dummy_initial(self):
+        image_rootfs = self.d.getVar('IMAGE_ROOTFS', True)
+        pkg_list = dict()
+        if image_rootfs.find("core-image-sato-sdk") > 0:
+            pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
+                "packagegroup-core-x11-sato-games packagegroup-base-extended " \
+                "packagegroup-core-x11-sato packagegroup-core-x11-base " \
+                "packagegroup-core-sdk packagegroup-core-tools-debug " \
+                "packagegroup-core-boot packagegroup-core-tools-testapps " \
+                "packagegroup-core-eclipse-debug packagegroup-core-qt-demoapps " \
+                "apt packagegroup-core-tools-profile psplash " \
+                "packagegroup-core-standalone-sdk-target " \
+                "packagegroup-core-ssh-openssh dpkg kernel-dev"
+            pkg_list[self.PKG_TYPE_LANGUAGE] = \
+                "locale-base-en-us locale-base-en-gb"
+        elif image_rootfs.find("core-image-sato") > 0:
+            pkg_list[self.PKG_TYPE_MUST_INSTALL] = \
+                "packagegroup-core-ssh-dropbear packagegroup-core-x11-sato-games " \
+                "packagegroup-core-x11-base psplash apt dpkg packagegroup-base-extended " \
+                "packagegroup-core-x11-sato packagegroup-core-boot"
+            pkg_list['lgp'] = \
+                "locale-base-en-us locale-base-en-gb"
+        elif image_rootfs.find("core-image-minimal") > 0:
+            pkg_list[self.PKG_TYPE_MUST_INSTALL] = "run-postinsts packagegroup-core-boot"
+
+        with open(self.initial_manifest, "w+") as manifest:
+            manifest.write(self.initial_manifest_file_header)
+
+            for pkg_type in pkg_list:
+                for pkg in pkg_list[pkg_type].split():
+                    manifest.write("%s,%s\n" % (pkg_type, pkg))
+
+    """
+    This will create the initial manifest which will be used by Rootfs class to
+    generate the rootfs
+    """
+    @abstractmethod
+    def create_initial(self):
+        pass
+
+    """
+    This creates the manifest after everything has been installed.
+    """
+    @abstractmethod
+    def create_final(self):
+        pass
+
+    """
+    This creates the manifest after the package in initial manifest has been
+    dummy installed. It lists all *to be installed* packages. There is no real
+    installation, just a test.
+    """
+    @abstractmethod
+    def create_full(self, pm):
+        pass
+
+    """
+    The following function parses an initial manifest and returns a dictionary
+    object with the must install, attempt only, multilib and language packages.
+    """
+    def parse_initial_manifest(self):
+        pkgs = dict()
+
+        with open(self.initial_manifest) as manifest:
+            for line in manifest.read().split('\n'):
+                comment = re.match("^#.*", line)
+                pattern = "^(%s|%s|%s|%s),(.*)$" % \
+                          (self.PKG_TYPE_MUST_INSTALL,
+                           self.PKG_TYPE_ATTEMPT_ONLY,
+                           self.PKG_TYPE_MULTILIB,
+                           self.PKG_TYPE_LANGUAGE)
+                pkg = re.match(pattern, line)
+
+                if comment is not None:
+                    continue
+
+                if pkg is not None:
+                    pkg_type = pkg.group(1)
+                    pkg_name = pkg.group(2)
+
+                    if not pkg_type in pkgs:
+                        pkgs[pkg_type] = [pkg_name]
+                    else:
+                        pkgs[pkg_type].append(pkg_name)
+
+        return pkgs
+
+    '''
+    This following function parses a full manifest and return a list
+    object with packages.
+    '''
+    def parse_full_manifest(self):
+        installed_pkgs = list()
+        if not os.path.exists(self.full_manifest):
+            bb.note('full manifest not exist')
+            return installed_pkgs
+
+        with open(self.full_manifest, 'r') as manifest:
+            for pkg in manifest.read().split('\n'):
+                installed_pkgs.append(pkg.strip())
+
+        return installed_pkgs
+
+
+class RpmManifest(Manifest):
+    """
+    Returns a dictionary object with mip and mlp packages.
+    """
+    def _split_multilib(self, pkg_list):
+        pkgs = dict()
+
+        for pkg in pkg_list.split():
+            pkg_type = self.PKG_TYPE_MUST_INSTALL
+
+            ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split()
+
+            for ml_variant in ml_variants:
+                if pkg.startswith(ml_variant + '-'):
+                    pkg_type = self.PKG_TYPE_MULTILIB
+
+            if not pkg_type in pkgs:
+                pkgs[pkg_type] = pkg
+            else:
+                pkgs[pkg_type] += " " + pkg
+
+        return pkgs
+
+    def create_initial(self):
+        pkgs = dict()
+
+        with open(self.initial_manifest, "w+") as manifest:
+            manifest.write(self.initial_manifest_file_header)
+
+            for var in self.var_maps[self.manifest_type]:
+                if var in self.vars_to_split:
+                    split_pkgs = self._split_multilib(self.d.getVar(var, True))
+                    if split_pkgs is not None:
+                        pkgs = dict(pkgs.items() + split_pkgs.items())
+                else:
+                    pkg_list = self.d.getVar(var, True)
+                    if pkg_list is not None:
+                        pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True)
+
+            for pkg_type in pkgs:
+                for pkg in pkgs[pkg_type].split():
+                    manifest.write("%s,%s\n" % (pkg_type, pkg))
+
+    def create_final(self):
+        pass
+
+    def create_full(self, pm):
+        pass
+
+
+class OpkgManifest(Manifest):
+    """
+    Returns a dictionary object with mip and mlp packages.
+    """
+    def _split_multilib(self, pkg_list):
+        pkgs = dict()
+
+        for pkg in pkg_list.split():
+            pkg_type = self.PKG_TYPE_MUST_INSTALL
+
+            ml_variants = self.d.getVar('MULTILIB_VARIANTS', True).split()
+
+            for ml_variant in ml_variants:
+                if pkg.startswith(ml_variant + '-'):
+                    pkg_type = self.PKG_TYPE_MULTILIB
+
+            if not pkg_type in pkgs:
+                pkgs[pkg_type] = pkg
+            else:
+                pkgs[pkg_type] += " " + pkg
+
+        return pkgs
+
+    def create_initial(self):
+        pkgs = dict()
+
+        with open(self.initial_manifest, "w+") as manifest:
+            manifest.write(self.initial_manifest_file_header)
+
+            for var in self.var_maps[self.manifest_type]:
+                if var in self.vars_to_split:
+                    split_pkgs = self._split_multilib(self.d.getVar(var, True))
+                    if split_pkgs is not None:
+                        pkgs = dict(pkgs.items() + split_pkgs.items())
+                else:
+                    pkg_list = self.d.getVar(var, True)
+                    if pkg_list is not None:
+                        pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var, True)
+
+            for pkg_type in pkgs:
+                for pkg in pkgs[pkg_type].split():
+                    manifest.write("%s,%s\n" % (pkg_type, pkg))
+
+    def create_final(self):
+        pass
+
+    def create_full(self, pm):
+        if not os.path.exists(self.initial_manifest):
+            self.create_initial()
+
+        initial_manifest = self.parse_initial_manifest()
+        pkgs_to_install = list()
+        for pkg_type in initial_manifest:
+            pkgs_to_install += initial_manifest[pkg_type]
+        if len(pkgs_to_install) == 0:
+            return
+
+        output = pm.dummy_install(pkgs_to_install)
+
+        with open(self.full_manifest, 'w+') as manifest:
+            pkg_re = re.compile('^Installing ([^ ]+) [^ ].*')
+            for line in set(output.split('\n')):
+                m = pkg_re.match(line)
+                if m:
+                    manifest.write(m.group(1) + '\n')
+
+        return
+
+
+class DpkgManifest(Manifest):
+    def create_initial(self):
+        with open(self.initial_manifest, "w+") as manifest:
+            manifest.write(self.initial_manifest_file_header)
+
+            for var in self.var_maps[self.manifest_type]:
+                pkg_list = self.d.getVar(var, True)
+
+                if pkg_list is None:
+                    continue
+
+                for pkg in pkg_list.split():
+                    manifest.write("%s,%s\n" %
+                                   (self.var_maps[self.manifest_type][var], pkg))
+
+    def create_final(self):
+        pass
+
+    def create_full(self, pm):
+        pass
+
+
+def create_manifest(d, final_manifest=False, manifest_dir=None,
+                    manifest_type=Manifest.MANIFEST_TYPE_IMAGE):
+    manifest_map = {'rpm': RpmManifest,
+                    'ipk': OpkgManifest,
+                    'deb': DpkgManifest}
+
+    manifest = manifest_map[d.getVar('IMAGE_PKGTYPE', True)](d, manifest_dir, manifest_type)
+
+    if final_manifest:
+        manifest.create_final()
+    else:
+        manifest.create_initial()
+
+
+if __name__ == "__main__":
+    pass
diff --git a/meta/lib/oe/package.py b/meta/lib/oe/package.py
new file mode 100644
index 0000000..f176446
--- /dev/null
+++ b/meta/lib/oe/package.py
@@ -0,0 +1,125 @@
+def runstrip(arg):
+    # Function to strip a single file, called from split_and_strip_files below
+    # A working 'file' (one which works on the target architecture)
+    #
+    # The elftype is a bit pattern (explained in split_and_strip_files) to tell
+    # us what type of file we're processing...
+    # 4 - executable
+    # 8 - shared library
+    # 16 - kernel module
+
+    import commands, stat, subprocess
+
+    (file, elftype, strip) = arg
+
+    newmode = None
+    if not os.access(file, os.W_OK) or os.access(file, os.R_OK):
+        origmode = os.stat(file)[stat.ST_MODE]
+        newmode = origmode | stat.S_IWRITE | stat.S_IREAD
+        os.chmod(file, newmode)
+
+    extraflags = ""
+
+    # kernel module    
+    if elftype & 16:
+        extraflags = "--strip-debug --remove-section=.comment --remove-section=.note --preserve-dates"
+    # .so and shared library
+    elif ".so" in file and elftype & 8:
+        extraflags = "--remove-section=.comment --remove-section=.note --strip-unneeded"
+    # shared or executable:
+    elif elftype & 8 or elftype & 4:
+        extraflags = "--remove-section=.comment --remove-section=.note"
+
+    stripcmd = "'%s' %s '%s'" % (strip, extraflags, file)
+    bb.debug(1, "runstrip: %s" % stripcmd)
+
+    try:
+        output = subprocess.check_output(stripcmd, stderr=subprocess.STDOUT, shell=True)
+    except subprocess.CalledProcessError as e:
+        bb.error("runstrip: '%s' strip command failed with %s (%s)" % (stripcmd, e.returncode, e.output))
+
+    if newmode:
+        os.chmod(file, origmode)
+
+    return
+
+
+def file_translate(file):
+    ft = file.replace("@", "@at@")
+    ft = ft.replace(" ", "@space@")
+    ft = ft.replace("\t", "@tab@")
+    ft = ft.replace("[", "@openbrace@")
+    ft = ft.replace("]", "@closebrace@")
+    ft = ft.replace("_", "@underscore@")
+    return ft
+
+def filedeprunner(arg):
+    import re, subprocess, shlex
+
+    (pkg, pkgfiles, rpmdeps, pkgdest) = arg
+    provides = {}
+    requires = {}
+
+    r = re.compile(r'[<>=]+ +[^ ]*')
+
+    def process_deps(pipe, pkg, pkgdest, provides, requires):
+        for line in pipe:
+            f = line.split(" ", 1)[0].strip()
+            line = line.split(" ", 1)[1].strip()
+
+            if line.startswith("Requires:"):
+                i = requires
+            elif line.startswith("Provides:"):
+                i = provides
+            else:
+                continue
+
+            file = f.replace(pkgdest + "/" + pkg, "")
+            file = file_translate(file)
+            value = line.split(":", 1)[1].strip()
+            value = r.sub(r'(\g<0>)', value)
+
+            if value.startswith("rpmlib("):
+                continue
+            if value == "python":
+                continue
+            if file not in i:
+                i[file] = []
+            i[file].append(value)
+
+        return provides, requires
+
+    try:
+        dep_popen = subprocess.Popen(shlex.split(rpmdeps) + pkgfiles, stdout=subprocess.PIPE)
+        provides, requires = process_deps(dep_popen.stdout, pkg, pkgdest, provides, requires)
+    except OSError as e:
+        bb.error("rpmdeps: '%s' command failed, '%s'" % (shlex.split(rpmdeps) + pkgfiles, e))
+        raise e
+
+    return (pkg, provides, requires)
+
+
+def read_shlib_providers(d):
+    import re
+
+    shlib_provider = {}
+    shlibs_dirs = d.getVar('SHLIBSDIRS', True).split()
+    list_re = re.compile('^(.*)\.list$')
+    # Go from least to most specific since the last one found wins
+    for dir in reversed(shlibs_dirs):
+        bb.debug(2, "Reading shlib providers in %s" % (dir))
+        if not os.path.exists(dir):
+            continue
+        for file in os.listdir(dir):
+            m = list_re.match(file)
+            if m:
+                dep_pkg = m.group(1)
+                fd = open(os.path.join(dir, file))
+                lines = fd.readlines()
+                fd.close()
+                for l in lines:
+                    s = l.strip().split(":")
+                    if s[0] not in shlib_provider:
+                        shlib_provider[s[0]] = {}
+                    shlib_provider[s[0]][s[1]] = (dep_pkg, s[2])
+    return shlib_provider
diff --git a/meta/lib/oe/package_manager.py b/meta/lib/oe/package_manager.py
new file mode 100644
index 0000000..292ed44
--- /dev/null
+++ b/meta/lib/oe/package_manager.py
@@ -0,0 +1,1900 @@
+from abc import ABCMeta, abstractmethod
+import os
+import glob
+import subprocess
+import shutil
+import multiprocessing
+import re
+import bb
+import tempfile
+import oe.utils
+
+
+# this can be used by all PM backends to create the index files in parallel
+def create_index(arg):
+    index_cmd = arg
+
+    try:
+        bb.note("Executing '%s' ..." % index_cmd)
+        result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True)
+    except subprocess.CalledProcessError as e:
+        return("Index creation command '%s' failed with return code %d:\n%s" %
+               (e.cmd, e.returncode, e.output))
+
+    if result:
+        bb.note(result)
+
+    return None
+
+
+class Indexer(object):
+    __metaclass__ = ABCMeta
+
+    def __init__(self, d, deploy_dir):
+        self.d = d
+        self.deploy_dir = deploy_dir
+
+    @abstractmethod
+    def write_index(self):
+        pass
+
+
+class RpmIndexer(Indexer):
+    def get_ml_prefix_and_os_list(self, arch_var=None, os_var=None):
+        package_archs = {
+            'default': [],
+        }
+
+        target_os = {
+            'default': "",
+        }
+
+        if arch_var is not None and os_var is not None:
+            package_archs['default'] = self.d.getVar(arch_var, True).split()
+            package_archs['default'].reverse()
+            target_os['default'] = self.d.getVar(os_var, True).strip()
+        else:
+            package_archs['default'] = self.d.getVar("PACKAGE_ARCHS", True).split()
+            # arch order is reversed.  This ensures the -best- match is
+            # listed first!
+            package_archs['default'].reverse()
+            target_os['default'] = self.d.getVar("TARGET_OS", True).strip()
+            multilibs = self.d.getVar('MULTILIBS', True) or ""
+            for ext in multilibs.split():
+                eext = ext.split(':')
+                if len(eext) > 1 and eext[0] == 'multilib':
+                    localdata = bb.data.createCopy(self.d)
+                    default_tune_key = "DEFAULTTUNE_virtclass-multilib-" + eext[1]
+                    default_tune = localdata.getVar(default_tune_key, False)
+                    if default_tune is None:
+                        default_tune_key = "DEFAULTTUNE_ML_" + eext[1]
+                        default_tune = localdata.getVar(default_tune_key, False)
+                    if default_tune:
+                        localdata.setVar("DEFAULTTUNE", default_tune)
+                        bb.data.update_data(localdata)
+                        package_archs[eext[1]] = localdata.getVar('PACKAGE_ARCHS',
+                                                                  True).split()
+                        package_archs[eext[1]].reverse()
+                        target_os[eext[1]] = localdata.getVar("TARGET_OS",
+                                                              True).strip()
+
+        ml_prefix_list = dict()
+        for mlib in package_archs:
+            if mlib == 'default':
+                ml_prefix_list[mlib] = package_archs[mlib]
+            else:
+                ml_prefix_list[mlib] = list()
+                for arch in package_archs[mlib]:
+                    if arch in ['all', 'noarch', 'any']:
+                        ml_prefix_list[mlib].append(arch)
+                    else:
+                        ml_prefix_list[mlib].append(mlib + "_" + arch)
+
+        return (ml_prefix_list, target_os)
+
+    def write_index(self):
+        sdk_pkg_archs = (self.d.getVar('SDK_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
+        all_mlb_pkg_archs = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
+
+        mlb_prefix_list = self.get_ml_prefix_and_os_list()[0]
+
+        archs = set()
+        for item in mlb_prefix_list:
+            archs = archs.union(set(i.replace('-', '_') for i in mlb_prefix_list[item]))
+
+        if len(archs) == 0:
+            archs = archs.union(set(all_mlb_pkg_archs))
+
+        archs = archs.union(set(sdk_pkg_archs))
+
+        rpm_createrepo = bb.utils.which(os.getenv('PATH'), "createrepo")
+        if self.d.getVar('PACKAGE_FEED_SIGN', True) == '1':
+            pkgfeed_gpg_name = self.d.getVar('PACKAGE_FEED_GPG_NAME', True)
+            pkgfeed_gpg_pass = self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE', True)
+        else:
+            pkgfeed_gpg_name = None
+            pkgfeed_gpg_pass = None
+        gpg_bin = self.d.getVar('GPG_BIN', True) or \
+                  bb.utils.which(os.getenv('PATH'), "gpg")
+
+        index_cmds = []
+        repo_sign_cmds = []
+        rpm_dirs_found = False
+        for arch in archs:
+            dbpath = os.path.join(self.d.getVar('WORKDIR', True), 'rpmdb', arch)
+            if os.path.exists(dbpath):
+                bb.utils.remove(dbpath, True)
+            arch_dir = os.path.join(self.deploy_dir, arch)
+            if not os.path.isdir(arch_dir):
+                continue
+
+            index_cmds.append("%s --dbpath %s --update -q %s" % \
+                             (rpm_createrepo, dbpath, arch_dir))
+            if pkgfeed_gpg_name:
+                repomd_file = os.path.join(arch_dir, 'repodata', 'repomd.xml')
+                gpg_cmd = "%s --detach-sign --armor --batch --no-tty --yes " \
+                          "--passphrase-file '%s' -u '%s' %s" % (gpg_bin,
+                          pkgfeed_gpg_pass, pkgfeed_gpg_name, repomd_file)
+                repo_sign_cmds.append(gpg_cmd)
+
+            rpm_dirs_found = True
+
+        if not rpm_dirs_found:
+            bb.note("There are no packages in %s" % self.deploy_dir)
+            return
+
+        # Create repodata
+        result = oe.utils.multiprocess_exec(index_cmds, create_index)
+        if result:
+            bb.fatal('%s' % ('\n'.join(result)))
+        # Sign repomd
+        result = oe.utils.multiprocess_exec(repo_sign_cmds, create_index)
+        if result:
+            bb.fatal('%s' % ('\n'.join(result)))
+        # Copy pubkey(s) to repo
+        distro_version = self.d.getVar('DISTRO_VERSION', True) or "oe.0"
+        if self.d.getVar('RPM_SIGN_PACKAGES', True) == '1':
+            shutil.copy2(self.d.getVar('RPM_GPG_PUBKEY', True),
+                         os.path.join(self.deploy_dir,
+                                      'RPM-GPG-KEY-%s' % distro_version))
+        if self.d.getVar('PACKAGE_FEED_SIGN', True) == '1':
+            shutil.copy2(self.d.getVar('PACKAGE_FEED_GPG_PUBKEY', True),
+                         os.path.join(self.deploy_dir,
+                                      'REPODATA-GPG-KEY-%s' % distro_version))
+
+
+class OpkgIndexer(Indexer):
+    def write_index(self):
+        arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS",
+                     "SDK_PACKAGE_ARCHS",
+                     "MULTILIB_ARCHS"]
+
+        opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index")
+
+        if not os.path.exists(os.path.join(self.deploy_dir, "Packages")):
+            open(os.path.join(self.deploy_dir, "Packages"), "w").close()
+
+        index_cmds = []
+        for arch_var in arch_vars:
+            archs = self.d.getVar(arch_var, True)
+            if archs is None:
+                continue
+
+            for arch in archs.split():
+                pkgs_dir = os.path.join(self.deploy_dir, arch)
+                pkgs_file = os.path.join(pkgs_dir, "Packages")
+
+                if not os.path.isdir(pkgs_dir):
+                    continue
+
+                if not os.path.exists(pkgs_file):
+                    open(pkgs_file, "w").close()
+
+                index_cmds.append('%s -r %s -p %s -m %s' %
+                                  (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir))
+
+        if len(index_cmds) == 0:
+            bb.note("There are no packages in %s!" % self.deploy_dir)
+            return
+
+        result = oe.utils.multiprocess_exec(index_cmds, create_index)
+        if result:
+            bb.fatal('%s' % ('\n'.join(result)))
+
+
+
+class DpkgIndexer(Indexer):
+    def _create_configs(self):
+        bb.utils.mkdirhier(self.apt_conf_dir)
+        bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "lists", "partial"))
+        bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "apt.conf.d"))
+        bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "preferences.d"))
+
+        with open(os.path.join(self.apt_conf_dir, "preferences"),
+                "w") as prefs_file:
+            pass
+        with open(os.path.join(self.apt_conf_dir, "sources.list"),
+                "w+") as sources_file:
+            pass
+
+        with open(self.apt_conf_file, "w") as apt_conf:
+            with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"),
+                "apt", "apt.conf.sample")) as apt_conf_sample:
+                for line in apt_conf_sample.read().split("\n"):
+                    line = re.sub("#ROOTFS#", "/dev/null", line)
+                    line = re.sub("#APTCONF#", self.apt_conf_dir, line)
+                    apt_conf.write(line + "\n")
+
+    def write_index(self):
+        self.apt_conf_dir = os.path.join(self.d.expand("${APTCONF_TARGET}"),
+                "apt-ftparchive")
+        self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
+        self._create_configs()
+
+        os.environ['APT_CONFIG'] = self.apt_conf_file
+
+        pkg_archs = self.d.getVar('PACKAGE_ARCHS', True)
+        if pkg_archs is not None:
+            arch_list = pkg_archs.split()
+        sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS', True)
+        if sdk_pkg_archs is not None:
+            for a in sdk_pkg_archs.split():
+                if a not in pkg_archs:
+                    arch_list.append(a)
+
+        all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
+        arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list)
+
+        apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive")
+        gzip = bb.utils.which(os.getenv('PATH'), "gzip")
+
+        index_cmds = []
+        deb_dirs_found = False
+        for arch in arch_list:
+            arch_dir = os.path.join(self.deploy_dir, arch)
+            if not os.path.isdir(arch_dir):
+                continue
+
+            cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive)
+
+            cmd += "%s -fc Packages > Packages.gz;" % gzip
+
+            with open(os.path.join(arch_dir, "Release"), "w+") as release:
+                release.write("Label: %s\n" % arch)
+
+            cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive
+            
+            index_cmds.append(cmd)
+
+            deb_dirs_found = True
+
+        if not deb_dirs_found:
+            bb.note("There are no packages in %s" % self.deploy_dir)
+            return
+
+        result = oe.utils.multiprocess_exec(index_cmds, create_index)
+        if result:
+            bb.fatal('%s' % ('\n'.join(result)))
+
+
+
+class PkgsList(object):
+    __metaclass__ = ABCMeta
+
+    def __init__(self, d, rootfs_dir):
+        self.d = d
+        self.rootfs_dir = rootfs_dir
+
+    @abstractmethod
+    def list(self, format=None):
+        pass
+
+
+class RpmPkgsList(PkgsList):
+    def __init__(self, d, rootfs_dir, arch_var=None, os_var=None):
+        super(RpmPkgsList, self).__init__(d, rootfs_dir)
+
+        self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm")
+        self.image_rpmlib = os.path.join(self.rootfs_dir, 'var/lib/rpm')
+
+        self.ml_prefix_list, self.ml_os_list = \
+            RpmIndexer(d, rootfs_dir).get_ml_prefix_and_os_list(arch_var, os_var)
+
+        # Determine rpm version
+        cmd = "%s --version" % self.rpm_cmd
+        try:
+            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+        except subprocess.CalledProcessError as e:
+            bb.fatal("Getting rpm version failed. Command '%s' "
+                     "returned %d:\n%s" % (cmd, e.returncode, e.output))
+        self.rpm_version = int(output.split()[-1].split('.')[0])
+
+    '''
+    Translate the RPM/Smart format names to the OE multilib format names
+    '''
+    def _pkg_translate_smart_to_oe(self, pkg, arch):
+        new_pkg = pkg
+        new_arch = arch
+        fixed_arch = arch.replace('_', '-')
+        found = 0
+        for mlib in self.ml_prefix_list:
+            for cmp_arch in self.ml_prefix_list[mlib]:
+                fixed_cmp_arch = cmp_arch.replace('_', '-')
+                if fixed_arch == fixed_cmp_arch:
+                    if mlib == 'default':
+                        new_pkg = pkg
+                        new_arch = cmp_arch
+                    else:
+                        new_pkg = mlib + '-' + pkg
+                        # We need to strip off the ${mlib}_ prefix on the arch
+                        new_arch = cmp_arch.replace(mlib + '_', '')
+
+                    # Workaround for bug 3565. Simply look to see if we
+                    # know of a package with that name, if not try again!
+                    filename = os.path.join(self.d.getVar('PKGDATA_DIR', True),
+                                            'runtime-reverse',
+                                            new_pkg)
+                    if os.path.exists(filename):
+                        found = 1
+                        break
+
+            if found == 1 and fixed_arch == fixed_cmp_arch:
+                break
+        #bb.note('%s, %s -> %s, %s' % (pkg, arch, new_pkg, new_arch))
+        return new_pkg, new_arch
+
+    def _list_pkg_deps(self):
+        cmd = [bb.utils.which(os.getenv('PATH'), "rpmresolve"),
+               "-t", self.image_rpmlib]
+
+        try:
+            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip()
+        except subprocess.CalledProcessError as e:
+            bb.fatal("Cannot get the package dependencies. Command '%s' "
+                     "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output))
+
+        return output
+
+    def list(self, format=None):
+        if format == "deps":
+            if self.rpm_version == 4:
+                bb.fatal("'deps' format dependency listings are not supported with rpm 4 since rpmresolve does not work")
+            return self._list_pkg_deps()
+
+        cmd = self.rpm_cmd + ' --root ' + self.rootfs_dir
+        cmd += ' -D "_dbpath /var/lib/rpm" -qa'
+        if self.rpm_version == 4:
+            cmd += " --qf '[%{NAME} %{ARCH} %{VERSION}\n]'"
+        else:
+            cmd += " --qf '[%{NAME} %{ARCH} %{VERSION} %{PACKAGEORIGIN}\n]'"
+
+        try:
+            # bb.note(cmd)
+            tmp_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
+
+        except subprocess.CalledProcessError as e:
+            bb.fatal("Cannot get the installed packages list. Command '%s' "
+                     "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+        output = list()
+        for line in tmp_output.split('\n'):
+            if len(line.strip()) == 0:
+                continue
+            pkg = line.split()[0]
+            arch = line.split()[1]
+            ver = line.split()[2]
+            # Skip GPG keys
+            if pkg == 'gpg-pubkey':
+                continue
+            if self.rpm_version == 4:
+                pkgorigin = "unknown"
+            else:
+                pkgorigin = line.split()[3]
+            new_pkg, new_arch = self._pkg_translate_smart_to_oe(pkg, arch)
+
+            if format == "arch":
+                output.append('%s %s' % (new_pkg, new_arch))
+            elif format == "file":
+                output.append('%s %s %s' % (new_pkg, pkgorigin, new_arch))
+            elif format == "ver":
+                output.append('%s %s %s' % (new_pkg, new_arch, ver))
+            else:
+                output.append('%s' % (new_pkg))
+
+            output.sort()
+
+        return '\n'.join(output)
+
+
+class OpkgPkgsList(PkgsList):
+    def __init__(self, d, rootfs_dir, config_file):
+        super(OpkgPkgsList, self).__init__(d, rootfs_dir)
+
+        self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
+        self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir)
+        self.opkg_args += self.d.getVar("OPKG_ARGS", True)
+
+    def list(self, format=None):
+        opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py")
+
+        if format == "arch":
+            cmd = "%s %s status | %s -a" % \
+                (self.opkg_cmd, self.opkg_args, opkg_query_cmd)
+        elif format == "file":
+            cmd = "%s %s status | %s -f" % \
+                (self.opkg_cmd, self.opkg_args, opkg_query_cmd)
+        elif format == "ver":
+            cmd = "%s %s status | %s -v" % \
+                (self.opkg_cmd, self.opkg_args, opkg_query_cmd)
+        elif format == "deps":
+            cmd = "%s %s status | %s" % \
+                (self.opkg_cmd, self.opkg_args, opkg_query_cmd)
+        else:
+            cmd = "%s %s list_installed | cut -d' ' -f1" % \
+                (self.opkg_cmd, self.opkg_args)
+
+        try:
+            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
+        except subprocess.CalledProcessError as e:
+            bb.fatal("Cannot get the installed packages list. Command '%s' "
+                     "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+        if output and format == "file":
+            tmp_output = ""
+            for line in output.split('\n'):
+                pkg, pkg_file, pkg_arch = line.split()
+                full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file)
+                if os.path.exists(full_path):
+                    tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch)
+                else:
+                    tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch)
+
+            output = tmp_output
+
+        return output
+
+
+class DpkgPkgsList(PkgsList):
+    def list(self, format=None):
+        cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"),
+               "--admindir=%s/var/lib/dpkg" % self.rootfs_dir,
+               "-W"]
+
+        if format == "arch":
+            cmd.append("-f=${Package} ${PackageArch}\n")
+        elif format == "file":
+            cmd.append("-f=${Package} ${Package}_${Version}_${Architecture}.deb ${PackageArch}\n")
+        elif format == "ver":
+            cmd.append("-f=${Package} ${PackageArch} ${Version}\n")
+        elif format == "deps":
+            cmd.append("-f=Package: ${Package}\nDepends: ${Depends}\nRecommends: ${Recommends}\n\n")
+        else:
+            cmd.append("-f=${Package}\n")
+
+        try:
+            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip()
+        except subprocess.CalledProcessError as e:
+            bb.fatal("Cannot get the installed packages list. Command '%s' "
+                     "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output))
+
+        if format == "file":
+            tmp_output = ""
+            for line in tuple(output.split('\n')):
+                if not line.strip():
+                    continue
+                pkg, pkg_file, pkg_arch = line.split()
+                full_path = os.path.join(self.rootfs_dir, pkg_arch, pkg_file)
+                if os.path.exists(full_path):
+                    tmp_output += "%s %s %s\n" % (pkg, full_path, pkg_arch)
+                else:
+                    tmp_output += "%s %s %s\n" % (pkg, pkg_file, pkg_arch)
+
+            output = tmp_output
+        elif format == "deps":
+            opkg_query_cmd = bb.utils.which(os.getenv('PATH'), "opkg-query-helper.py")
+            file_out = tempfile.NamedTemporaryFile()
+            file_out.write(output)
+            file_out.flush()
+
+            try:
+                output = subprocess.check_output("cat %s | %s" %
+                                                 (file_out.name, opkg_query_cmd),
+                                                 stderr=subprocess.STDOUT,
+                                                 shell=True)
+            except subprocess.CalledProcessError as e:
+                file_out.close()
+                bb.fatal("Cannot compute packages dependencies. Command '%s' "
+                         "returned %d:\n%s" % (e.cmd, e.returncode, e.output))
+
+            file_out.close()
+
+        return output
+
+
+class PackageManager(object):
+    """
+    This is an abstract class. Do not instantiate this directly.
+    """
+    __metaclass__ = ABCMeta
+
+    def __init__(self, d):
+        self.d = d
+        self.deploy_dir = None
+        self.deploy_lock = None
+        self.feed_uris = self.d.getVar('PACKAGE_FEED_URIS', True) or ""
+        self.feed_prefix = self.d.getVar('PACKAGE_FEED_PREFIX', True) or ""
+
+    """
+    Update the package manager package database.
+    """
+    @abstractmethod
+    def update(self):
+        pass
+
+    """
+    Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is
+    True, installation failures are ignored.
+    """
+    @abstractmethod
+    def install(self, pkgs, attempt_only=False):
+        pass
+
+    """
+    Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies'
+    is False, the any dependencies are left in place.
+    """
+    @abstractmethod
+    def remove(self, pkgs, with_dependencies=True):
+        pass
+
+    """
+    This function creates the index files
+    """
+    @abstractmethod
+    def write_index(self):
+        pass
+
+    @abstractmethod
+    def remove_packaging_data(self):
+        pass
+
+    @abstractmethod
+    def list_installed(self, format=None):
+        pass
+
+    @abstractmethod
+    def insert_feeds_uris(self):
+        pass
+
+    """
+    Install complementary packages based upon the list of currently installed
+    packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install
+    these packages, if they don't exist then no error will occur.  Note: every
+    backend needs to call this function explicitly after the normal package
+    installation
+    """
+    def install_complementary(self, globs=None):
+        # we need to write the list of installed packages to a file because the
+        # oe-pkgdata-util reads it from a file
+        installed_pkgs_file = os.path.join(self.d.getVar('WORKDIR', True),
+                                           "installed_pkgs.txt")
+        with open(installed_pkgs_file, "w+") as installed_pkgs:
+            installed_pkgs.write(self.list_installed("arch"))
+
+        if globs is None:
+            globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY', True)
+            split_linguas = set()
+
+            for translation in self.d.getVar('IMAGE_LINGUAS', True).split():
+                split_linguas.add(translation)
+                split_linguas.add(translation.split('-')[0])
+
+            split_linguas = sorted(split_linguas)
+
+            for lang in split_linguas:
+                globs += " *-locale-%s" % lang
+
+        if globs is None:
+            return
+
+        cmd = [bb.utils.which(os.getenv('PATH'), "oe-pkgdata-util"),
+               "-p", self.d.getVar('PKGDATA_DIR', True), "glob", installed_pkgs_file,
+               globs]
+        exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY', True)
+        if exclude:
+            cmd.extend(['-x', exclude])
+        try:
+            bb.note("Installing complementary packages ...")
+            complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+        except subprocess.CalledProcessError as e:
+            bb.fatal("Could not compute complementary packages list. Command "
+                     "'%s' returned %d:\n%s" %
+                     (' '.join(cmd), e.returncode, e.output))
+
+        self.install(complementary_pkgs.split(), attempt_only=True)
+
+    def deploy_dir_lock(self):
+        if self.deploy_dir is None:
+            raise RuntimeError("deploy_dir is not set!")
+
+        lock_file_name = os.path.join(self.deploy_dir, "deploy.lock")
+
+        self.deploy_lock = bb.utils.lockfile(lock_file_name)
+
+    def deploy_dir_unlock(self):
+        if self.deploy_lock is None:
+            return
+
+        bb.utils.unlockfile(self.deploy_lock)
+
+        self.deploy_lock = None
+
+
+class RpmPM(PackageManager):
+    def __init__(self,
+                 d,
+                 target_rootfs,
+                 target_vendor,
+                 task_name='target',
+                 providename=None,
+                 arch_var=None,
+                 os_var=None):
+        super(RpmPM, self).__init__(d)
+        self.target_rootfs = target_rootfs
+        self.target_vendor = target_vendor
+        self.task_name = task_name
+        self.providename = providename
+        self.fullpkglist = list()
+        self.deploy_dir = self.d.getVar('DEPLOY_DIR_RPM', True)
+        self.etcrpm_dir = os.path.join(self.target_rootfs, "etc/rpm")
+        self.install_dir_name = "oe_install"
+        self.install_dir_path = os.path.join(self.target_rootfs, self.install_dir_name)
+        self.rpm_cmd = bb.utils.which(os.getenv('PATH'), "rpm")
+        self.smart_cmd = bb.utils.which(os.getenv('PATH'), "smart")
+        self.smart_opt = "--log-level=warning --data-dir=" + os.path.join(target_rootfs,
+                                                      'var/lib/smart')
+        self.scriptlet_wrapper = self.d.expand('${WORKDIR}/scriptlet_wrapper')
+        self.solution_manifest = self.d.expand('${T}/saved/%s_solution' %
+                                               self.task_name)
+        self.saved_rpmlib = self.d.expand('${T}/saved/%s' % self.task_name)
+        self.image_rpmlib = os.path.join(self.target_rootfs, 'var/lib/rpm')
+
+        if not os.path.exists(self.d.expand('${T}/saved')):
+            bb.utils.mkdirhier(self.d.expand('${T}/saved'))
+
+        self.indexer = RpmIndexer(self.d, self.deploy_dir)
+        self.pkgs_list = RpmPkgsList(self.d, self.target_rootfs, arch_var, os_var)
+        self.rpm_version = self.pkgs_list.rpm_version
+
+        self.ml_prefix_list, self.ml_os_list = self.indexer.get_ml_prefix_and_os_list(arch_var, os_var)
+
+    def insert_feeds_uris(self):
+        if self.feed_uris == "":
+            return
+
+        # List must be prefered to least preferred order
+        default_platform_extra = set()
+        platform_extra = set()
+        bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or ""
+        for mlib in self.ml_os_list:
+            for arch in self.ml_prefix_list[mlib]:
+                plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib]
+                if mlib == bbextendvariant:
+                        default_platform_extra.add(plt)
+                else:
+                        platform_extra.add(plt)
+
+        platform_extra = platform_extra.union(default_platform_extra)
+
+        arch_list = []
+        for canonical_arch in platform_extra:
+            arch = canonical_arch.split('-')[0]
+            if not os.path.exists(os.path.join(self.deploy_dir, arch)):
+                continue
+            arch_list.append(arch)
+
+        uri_iterator = 0
+        channel_priority = 10 + 5 * len(self.feed_uris.split()) * len(arch_list)
+
+        for uri in self.feed_uris.split():
+            full_uri = uri
+            if self.feed_prefix:
+                full_uri = os.path.join(uri, self.feed_prefix)
+            for arch in arch_list:
+                bb.note('Note: adding Smart channel url%d%s (%s)' %
+                        (uri_iterator, arch, channel_priority))
+                self._invoke_smart('channel --add url%d-%s type=rpm-md baseurl=%s/%s -y'
+                                   % (uri_iterator, arch, full_uri, arch))
+                self._invoke_smart('channel --set url%d-%s priority=%d' %
+                                   (uri_iterator, arch, channel_priority))
+                channel_priority -= 5
+            uri_iterator += 1
+
+    '''
+    Create configs for rpm and smart, and multilib is supported
+    '''
+    def create_configs(self):
+        target_arch = self.d.getVar('TARGET_ARCH', True)
+        platform = '%s%s-%s' % (target_arch.replace('-', '_'),
+                                self.target_vendor,
+                                self.ml_os_list['default'])
+
+        # List must be prefered to least preferred order
+        default_platform_extra = list()
+        platform_extra = list()
+        bbextendvariant = self.d.getVar('BBEXTENDVARIANT', True) or ""
+        for mlib in self.ml_os_list:
+            for arch in self.ml_prefix_list[mlib]:
+                plt = arch.replace('-', '_') + '-.*-' + self.ml_os_list[mlib]
+                if mlib == bbextendvariant:
+                    if plt not in default_platform_extra:
+                        default_platform_extra.append(plt)
+                else:
+                    if plt not in platform_extra:
+                        platform_extra.append(plt)
+        platform_extra = default_platform_extra + platform_extra
+
+        self._create_configs(platform, platform_extra)
+
+    def _invoke_smart(self, args):
+        cmd = "%s %s %s" % (self.smart_cmd, self.smart_opt, args)
+        # bb.note(cmd)
+        try:
+            complementary_pkgs = subprocess.check_output(cmd,
+                                                         stderr=subprocess.STDOUT,
+                                                         shell=True)
+            # bb.note(complementary_pkgs)
+            return complementary_pkgs
+        except subprocess.CalledProcessError as e:
+            bb.fatal("Could not invoke smart. Command "
+                     "'%s' returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+    def _search_pkg_name_in_feeds(self, pkg, feed_archs):
+        for arch in feed_archs:
+            arch = arch.replace('-', '_')
+            regex_match = re.compile(r"^%s-[^-]*-[^-]*@%s$" % \
+                (re.escape(pkg), re.escape(arch)))
+            for p in self.fullpkglist:
+                if regex_match.match(p) is not None:
+                    # First found is best match
+                    # bb.note('%s -> %s' % (pkg, pkg + '@' + arch))
+                    return pkg + '@' + arch
+
+        # Search provides if not found by pkgname.
+        bb.note('Not found %s by name, searching provides ...' % pkg)
+        cmd = "%s %s query --provides %s --show-format='$name-$version'" % \
+                (self.smart_cmd, self.smart_opt, pkg)
+        cmd += " | sed -ne 's/ *Provides://p'"
+        bb.note('cmd: %s' % cmd)
+        output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+        # Found a provider
+        if output:
+            bb.note('Found providers for %s: %s' % (pkg, output))
+            for p in output.split():
+                for arch in feed_archs:
+                    arch = arch.replace('-', '_')
+                    if p.rstrip().endswith('@' + arch):
+                        return p
+
+        return ""
+
+    '''
+    Translate the OE multilib format names to the RPM/Smart format names
+    It searched the RPM/Smart format names in probable multilib feeds first,
+    and then searched the default base feed.
+    '''
+    def _pkg_translate_oe_to_smart(self, pkgs, attempt_only=False):
+        new_pkgs = list()
+
+        for pkg in pkgs:
+            new_pkg = pkg
+            # Search new_pkg in probable multilibs first
+            for mlib in self.ml_prefix_list:
+                # Jump the default archs
+                if mlib == 'default':
+                    continue
+
+                subst = pkg.replace(mlib + '-', '')
+                # if the pkg in this multilib feed
+                if subst != pkg:
+                    feed_archs = self.ml_prefix_list[mlib]
+                    new_pkg = self._search_pkg_name_in_feeds(subst, feed_archs)
+                    if not new_pkg:
+                        # Failed to translate, package not found!
+                        err_msg = '%s not found in the %s feeds (%s).\n' % \
+                                  (pkg, mlib, " ".join(feed_archs))
+                        if not attempt_only:
+                            err_msg += " ".join(self.fullpkglist)
+                            bb.fatal(err_msg)
+                        bb.warn(err_msg)
+                    else:
+                        new_pkgs.append(new_pkg)
+
+                    break
+
+            # Apparently not a multilib package...
+            if pkg == new_pkg:
+                # Search new_pkg in default archs
+                default_archs = self.ml_prefix_list['default']
+                new_pkg = self._search_pkg_name_in_feeds(pkg, default_archs)
+                if not new_pkg:
+                    err_msg = '%s not found in the base feeds (%s).\n' % \
+                              (pkg, ' '.join(default_archs))
+                    if not attempt_only:
+                        err_msg += " ".join(self.fullpkglist)
+                        bb.fatal(err_msg)
+                    bb.warn(err_msg)
+                else:
+                    new_pkgs.append(new_pkg)
+
+        return new_pkgs
+
+    def _create_configs(self, platform, platform_extra):
+        # Setup base system configuration
+        bb.note("configuring RPM platform settings")
+
+        # Configure internal RPM environment when using Smart
+        os.environ['RPM_ETCRPM'] = self.etcrpm_dir
+        bb.utils.mkdirhier(self.etcrpm_dir)
+
+        # Setup temporary directory -- install...
+        if os.path.exists(self.install_dir_path):
+            bb.utils.remove(self.install_dir_path, True)
+        bb.utils.mkdirhier(os.path.join(self.install_dir_path, 'tmp'))
+
+        channel_priority = 5
+        platform_dir = os.path.join(self.etcrpm_dir, "platform")
+        sdkos = self.d.getVar("SDK_OS", True)
+        with open(platform_dir, "w+") as platform_fd:
+            platform_fd.write(platform + '\n')
+            for pt in platform_extra:
+                channel_priority += 5
+                if sdkos:
+                    tmp = re.sub("-%s$" % sdkos, "-%s\n" % sdkos, pt)
+                tmp = re.sub("-linux.*$", "-linux.*\n", tmp)
+                platform_fd.write(tmp)
+
+        # Tell RPM that the "/" directory exist and is available
+        bb.note("configuring RPM system provides")
+        sysinfo_dir = os.path.join(self.etcrpm_dir, "sysinfo")
+        bb.utils.mkdirhier(sysinfo_dir)
+        with open(os.path.join(sysinfo_dir, "Dirnames"), "w+") as dirnames:
+            dirnames.write("/\n")
+
+        if self.providename:
+            providename_dir = os.path.join(sysinfo_dir, "Providename")
+            if not os.path.exists(providename_dir):
+                providename_content = '\n'.join(self.providename)
+                providename_content += '\n'
+                open(providename_dir, "w+").write(providename_content)
+
+        # Configure RPM... we enforce these settings!
+        bb.note("configuring RPM DB settings")
+        # After change the __db.* cache size, log file will not be
+        # generated automatically, that will raise some warnings,
+        # so touch a bare log for rpm write into it.
+        if self.rpm_version == 5:
+            rpmlib_log = os.path.join(self.image_rpmlib, 'log', 'log.0000000001')
+            if not os.path.exists(rpmlib_log):
+                bb.utils.mkdirhier(os.path.join(self.image_rpmlib, 'log'))
+                open(rpmlib_log, 'w+').close()
+
+            DB_CONFIG_CONTENT = "# ================ Environment\n" \
+                "set_data_dir .\n" \
+                "set_create_dir .\n" \
+                "set_lg_dir ./log\n" \
+                "set_tmp_dir ./tmp\n" \
+                "set_flags db_log_autoremove on\n" \
+                "\n" \
+                "# -- thread_count must be >= 8\n" \
+                "set_thread_count 64\n" \
+                "\n" \
+                "# ================ Logging\n" \
+                "\n" \
+                "# ================ Memory Pool\n" \
+                "set_cachesize 0 1048576 0\n" \
+                "set_mp_mmapsize 268435456\n" \
+                "\n" \
+                "# ================ Locking\n" \
+                "set_lk_max_locks 16384\n" \
+                "set_lk_max_lockers 16384\n" \
+                "set_lk_max_objects 16384\n" \
+                "mutex_set_max 163840\n" \
+                "\n" \
+                "# ================ Replication\n"
+
+            db_config_dir = os.path.join(self.image_rpmlib, 'DB_CONFIG')
+            if not os.path.exists(db_config_dir):
+                open(db_config_dir, 'w+').write(DB_CONFIG_CONTENT)
+
+        # Create database so that smart doesn't complain (lazy init)
+        opt = "-qa"
+        if self.rpm_version == 4:
+            opt = "--initdb"
+        cmd = "%s --root %s --dbpath /var/lib/rpm %s > /dev/null" % (
+              self.rpm_cmd, self.target_rootfs, opt)
+        try:
+            subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+        except subprocess.CalledProcessError as e:
+            bb.fatal("Create rpm database failed. Command '%s' "
+                     "returned %d:\n%s" % (cmd, e.returncode, e.output))
+        # Import GPG key to RPM database of the target system
+        if self.d.getVar('RPM_SIGN_PACKAGES', True) == '1':
+            pubkey_path = self.d.getVar('RPM_GPG_PUBKEY', True)
+            cmd = "%s --root %s --dbpath /var/lib/rpm --import %s > /dev/null" % (
+                  self.rpm_cmd, self.target_rootfs, pubkey_path)
+            subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+
+        # Configure smart
+        bb.note("configuring Smart settings")
+        bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
+                        True)
+        self._invoke_smart('config --set rpm-root=%s' % self.target_rootfs)
+        self._invoke_smart('config --set rpm-dbpath=/var/lib/rpm')
+        self._invoke_smart('config --set rpm-extra-macros._var=%s' %
+                           self.d.getVar('localstatedir', True))
+        cmd = "config --set rpm-extra-macros._tmppath=/%s/tmp" % (self.install_dir_name)
+
+        prefer_color = self.d.getVar('RPM_PREFER_ELF_ARCH', True)
+        if prefer_color:
+            if prefer_color not in ['0', '1', '2', '4']:
+                bb.fatal("Invalid RPM_PREFER_ELF_ARCH: %s, it should be one of:\n"
+                        "\t1: ELF32 wins\n"
+                        "\t2: ELF64 wins\n"
+                        "\t4: ELF64 N32 wins (mips64 or mips64el only)" %
+                        prefer_color)
+            if prefer_color == "4" and self.d.getVar("TUNE_ARCH", True) not in \
+                                    ['mips64', 'mips64el']:
+                bb.fatal("RPM_PREFER_ELF_ARCH = \"4\" is for mips64 or mips64el "
+                         "only.")
+            self._invoke_smart('config --set rpm-extra-macros._prefer_color=%s'
+                        % prefer_color)
+
+        self._invoke_smart(cmd)
+        self._invoke_smart('config --set rpm-ignoresize=1')
+
+        # Write common configuration for host and target usage
+        self._invoke_smart('config --set rpm-nolinktos=1')
+        self._invoke_smart('config --set rpm-noparentdirs=1')
+        check_signature = self.d.getVar('RPM_CHECK_SIGNATURES', True)
+        if check_signature and check_signature.strip() == "0":
+            self._invoke_smart('config --set rpm-check-signatures=false')
+        for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split():
+            self._invoke_smart('flag --set ignore-recommends %s' % i)
+
+        # Do the following configurations here, to avoid them being
+        # saved for field upgrade
+        if self.d.getVar('NO_RECOMMENDATIONS', True).strip() == "1":
+            self._invoke_smart('config --set ignore-all-recommends=1')
+        pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or ""
+        for i in pkg_exclude.split():
+            self._invoke_smart('flag --set exclude-packages %s' % i)
+
+        # Optional debugging
+        # self._invoke_smart('config --set rpm-log-level=debug')
+        # cmd = 'config --set rpm-log-file=/tmp/smart-debug-logfile'
+        # self._invoke_smart(cmd)
+        ch_already_added = []
+        for canonical_arch in platform_extra:
+            arch = canonical_arch.split('-')[0]
+            arch_channel = os.path.join(self.deploy_dir, arch)
+            if os.path.exists(arch_channel) and not arch in ch_already_added:
+                bb.note('Note: adding Smart channel %s (%s)' %
+                        (arch, channel_priority))
+                self._invoke_smart('channel --add %s type=rpm-md baseurl=%s -y'
+                                   % (arch, arch_channel))
+                self._invoke_smart('channel --set %s priority=%d' %
+                                   (arch, channel_priority))
+                channel_priority -= 5
+
+                ch_already_added.append(arch)
+
+        bb.note('adding Smart RPM DB channel')
+        self._invoke_smart('channel --add rpmsys type=rpm-sys -y')
+
+        # Construct install scriptlet wrapper.
+        # Scripts need to be ordered when executed, this ensures numeric order.
+        # If we ever run into needing more the 899 scripts, we'll have to.
+        # change num to start with 1000.
+        #
+        if self.rpm_version == 4:
+            scriptletcmd = "$2 $3 $4\n"
+            scriptpath = "$3"
+        else:
+            scriptletcmd = "$2 $1/$3 $4\n"
+            scriptpath = "$1/$3"
+
+        SCRIPTLET_FORMAT = "#!/bin/bash\n" \
+            "\n" \
+            "export PATH=%s\n" \
+            "export D=%s\n" \
+            'export OFFLINE_ROOT="$D"\n' \
+            'export IPKG_OFFLINE_ROOT="$D"\n' \
+            'export OPKG_OFFLINE_ROOT="$D"\n' \
+            "export INTERCEPT_DIR=%s\n" \
+            "export NATIVE_ROOT=%s\n" \
+            "\n" \
+            + scriptletcmd + \
+            "if [ $? -ne 0 ]; then\n" \
+            "  if [ $4 -eq 1 ]; then\n" \
+            "    mkdir -p $1/etc/rpm-postinsts\n" \
+            "    num=100\n" \
+            "    while [ -e $1/etc/rpm-postinsts/${num}-* ]; do num=$((num + 1)); done\n" \
+            "    name=`head -1 " + scriptpath + " | cut -d\' \' -f 2`\n" \
+            '    echo "#!$2" > $1/etc/rpm-postinsts/${num}-${name}\n' \
+            '    echo "# Arg: $4" >> $1/etc/rpm-postinsts/${num}-${name}\n' \
+            "    cat " + scriptpath + " >> $1/etc/rpm-postinsts/${num}-${name}\n" \
+            "    chmod +x $1/etc/rpm-postinsts/${num}-${name}\n" \
+            "  else\n" \
+            '    echo "Error: pre/post remove scriptlet failed"\n' \
+            "  fi\n" \
+            "fi\n"
+
+        intercept_dir = self.d.expand('${WORKDIR}/intercept_scripts')
+        native_root = self.d.getVar('STAGING_DIR_NATIVE', True)
+        scriptlet_content = SCRIPTLET_FORMAT % (os.environ['PATH'],
+                                                self.target_rootfs,
+                                                intercept_dir,
+                                                native_root)
+        open(self.scriptlet_wrapper, 'w+').write(scriptlet_content)
+
+        bb.note("Note: configuring RPM cross-install scriptlet_wrapper")
+        os.chmod(self.scriptlet_wrapper, 0755)
+        cmd = 'config --set rpm-extra-macros._cross_scriptlet_wrapper=%s' % \
+              self.scriptlet_wrapper
+        self._invoke_smart(cmd)
+
+        # Debug to show smart config info
+        # bb.note(self._invoke_smart('config --show'))
+
+    def update(self):
+        self._invoke_smart('update rpmsys')
+
+    '''
+    Install pkgs with smart, the pkg name is oe format
+    '''
+    def install(self, pkgs, attempt_only=False):
+
+        if not pkgs:
+            bb.note("There are no packages to install")
+            return
+        bb.note("Installing the following packages: %s" % ' '.join(pkgs))
+        pkgs = self._pkg_translate_oe_to_smart(pkgs, attempt_only)
+
+        if not attempt_only:
+            bb.note('to be installed: %s' % ' '.join(pkgs))
+            cmd = "%s %s install -y %s" % \
+                  (self.smart_cmd, self.smart_opt, ' '.join(pkgs))
+            bb.note(cmd)
+        else:
+            bb.note('installing attempt only packages...')
+            bb.note('Attempting %s' % ' '.join(pkgs))
+            cmd = "%s %s install --attempt -y %s" % \
+                  (self.smart_cmd, self.smart_opt, ' '.join(pkgs))
+        try:
+            output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+            bb.note(output)
+        except subprocess.CalledProcessError as e:
+            bb.fatal("Unable to install packages. Command '%s' "
+                     "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+    '''
+    Remove pkgs with smart, the pkg name is smart/rpm format
+    '''
+    def remove(self, pkgs, with_dependencies=True):
+        bb.note('to be removed: ' + ' '.join(pkgs))
+
+        if not with_dependencies:
+            cmd = "%s -e --nodeps " % self.rpm_cmd
+            cmd += "--root=%s " % self.target_rootfs
+            cmd += "--dbpath=/var/lib/rpm "
+            cmd += "--define='_cross_scriptlet_wrapper %s' " % \
+                   self.scriptlet_wrapper
+            cmd += "--define='_tmppath /%s/tmp' %s" % (self.install_dir_name, ' '.join(pkgs))
+        else:
+            # for pkg in pkgs:
+            #   bb.note('Debug: What required: %s' % pkg)
+            #   bb.note(self._invoke_smart('query %s --show-requiredby' % pkg))
+
+            cmd = "%s %s remove -y %s" % (self.smart_cmd,
+                                          self.smart_opt,
+                                          ' '.join(pkgs))
+
+        try:
+            bb.note(cmd)
+            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+            bb.note(output)
+        except subprocess.CalledProcessError as e:
+            bb.note("Unable to remove packages. Command '%s' "
+                    "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+    def upgrade(self):
+        bb.note('smart upgrade')
+        self._invoke_smart('upgrade')
+
+    def write_index(self):
+        result = self.indexer.write_index()
+
+        if result is not None:
+            bb.fatal(result)
+
+    def remove_packaging_data(self):
+        bb.utils.remove(self.image_rpmlib, True)
+        bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
+                        True)
+        bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/opkg'), True)
+
+        # remove temp directory
+        bb.utils.remove(self.install_dir_path, True)
+
+    def backup_packaging_data(self):
+        # Save the rpmlib for increment rpm image generation
+        if os.path.exists(self.saved_rpmlib):
+            bb.utils.remove(self.saved_rpmlib, True)
+        shutil.copytree(self.image_rpmlib,
+                        self.saved_rpmlib,
+                        symlinks=True)
+
+    def recovery_packaging_data(self):
+        # Move the rpmlib back
+        if os.path.exists(self.saved_rpmlib):
+            if os.path.exists(self.image_rpmlib):
+                bb.utils.remove(self.image_rpmlib, True)
+
+            bb.note('Recovery packaging data')
+            shutil.copytree(self.saved_rpmlib,
+                            self.image_rpmlib,
+                            symlinks=True)
+
+    def list_installed(self, format=None):
+        return self.pkgs_list.list(format)
+
+    '''
+    If incremental install, we need to determine what we've got,
+    what we need to add, and what to remove...
+    The dump_install_solution will dump and save the new install
+    solution.
+    '''
+    def dump_install_solution(self, pkgs):
+        bb.note('creating new install solution for incremental install')
+        if len(pkgs) == 0:
+            return
+
+        pkgs = self._pkg_translate_oe_to_smart(pkgs, False)
+        install_pkgs = list()
+
+        cmd = "%s %s install -y --dump %s 2>%s" %  \
+              (self.smart_cmd,
+               self.smart_opt,
+               ' '.join(pkgs),
+               self.solution_manifest)
+        try:
+            # Disable rpmsys channel for the fake install
+            self._invoke_smart('channel --disable rpmsys')
+
+            subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+            with open(self.solution_manifest, 'r') as manifest:
+                for pkg in manifest.read().split('\n'):
+                    if '@' in pkg:
+                        install_pkgs.append(pkg)
+        except subprocess.CalledProcessError as e:
+            bb.note("Unable to dump install packages. Command '%s' "
+                    "returned %d:\n%s" % (cmd, e.returncode, e.output))
+        # Recovery rpmsys channel
+        self._invoke_smart('channel --enable rpmsys')
+        return install_pkgs
+
+    '''
+    If incremental install, we need to determine what we've got,
+    what we need to add, and what to remove...
+    The load_old_install_solution will load the previous install
+    solution
+    '''
+    def load_old_install_solution(self):
+        bb.note('load old install solution for incremental install')
+        installed_pkgs = list()
+        if not os.path.exists(self.solution_manifest):
+            bb.note('old install solution not exist')
+            return installed_pkgs
+
+        with open(self.solution_manifest, 'r') as manifest:
+            for pkg in manifest.read().split('\n'):
+                if '@' in pkg:
+                    installed_pkgs.append(pkg.strip())
+
+        return installed_pkgs
+
+    '''
+    Dump all available packages in feeds, it should be invoked after the
+    newest rpm index was created
+    '''
+    def dump_all_available_pkgs(self):
+        available_manifest = self.d.expand('${T}/saved/available_pkgs.txt')
+        available_pkgs = list()
+        cmd = "%s %s query --output %s" %  \
+              (self.smart_cmd, self.smart_opt, available_manifest)
+        try:
+            subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+            with open(available_manifest, 'r') as manifest:
+                for pkg in manifest.read().split('\n'):
+                    if '@' in pkg:
+                        available_pkgs.append(pkg.strip())
+        except subprocess.CalledProcessError as e:
+            bb.note("Unable to list all available packages. Command '%s' "
+                    "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+        self.fullpkglist = available_pkgs
+
+        return
+
+    def save_rpmpostinst(self, pkg):
+        mlibs = (self.d.getVar('MULTILIB_GLOBAL_VARIANTS', False) or "").split()
+
+        new_pkg = pkg
+        # Remove any multilib prefix from the package name
+        for mlib in mlibs:
+            if mlib in pkg:
+                new_pkg = pkg.replace(mlib + '-', '')
+                break
+
+        bb.note('  * postponing %s' % new_pkg)
+        saved_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') + new_pkg
+
+        cmd = self.rpm_cmd + ' -q --scripts --root ' + self.target_rootfs
+        cmd += ' --dbpath=/var/lib/rpm ' + new_pkg
+        cmd += ' | sed -n -e "/^postinstall scriptlet (using .*):$/,/^.* scriptlet (using .*):$/ {/.*/p}"'
+        cmd += ' | sed -e "/postinstall scriptlet (using \(.*\)):$/d"'
+        cmd += ' -e "/^.* scriptlet (using .*):$/d" > %s' % saved_dir
+
+        try:
+            bb.note(cmd)
+            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).strip()
+            bb.note(output)
+            os.chmod(saved_dir, 0755)
+        except subprocess.CalledProcessError as e:
+            bb.fatal("Invoke save_rpmpostinst failed. Command '%s' "
+                     "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+    '''Write common configuration for target usage'''
+    def rpm_setup_smart_target_config(self):
+        bb.utils.remove(os.path.join(self.target_rootfs, 'var/lib/smart'),
+                        True)
+
+        self._invoke_smart('config --set rpm-nolinktos=1')
+        self._invoke_smart('config --set rpm-noparentdirs=1')
+        for i in self.d.getVar('BAD_RECOMMENDATIONS', True).split():
+            self._invoke_smart('flag --set ignore-recommends %s' % i)
+        self._invoke_smart('channel --add rpmsys type=rpm-sys -y')
+
+    '''
+    The rpm db lock files were produced after invoking rpm to query on
+    build system, and they caused the rpm on target didn't work, so we
+    need to unlock the rpm db by removing the lock files.
+    '''
+    def unlock_rpm_db(self):
+        # Remove rpm db lock files
+        rpm_db_locks = glob.glob('%s/var/lib/rpm/__db.*' % self.target_rootfs)
+        for f in rpm_db_locks:
+            bb.utils.remove(f, True)
+
+
+class OpkgPM(PackageManager):
+    def __init__(self, d, target_rootfs, config_file, archs, task_name='target'):
+        super(OpkgPM, self).__init__(d)
+
+        self.target_rootfs = target_rootfs
+        self.config_file = config_file
+        self.pkg_archs = archs
+        self.task_name = task_name
+
+        self.deploy_dir = self.d.getVar("DEPLOY_DIR_IPK", True)
+        self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock")
+        self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg")
+        self.opkg_args = "--volatile-cache -f %s -o %s " % (self.config_file, target_rootfs)
+        self.opkg_args += self.d.getVar("OPKG_ARGS", True)
+
+        opkg_lib_dir = self.d.getVar('OPKGLIBDIR', True)
+        if opkg_lib_dir[0] == "/":
+            opkg_lib_dir = opkg_lib_dir[1:]
+
+        self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg")
+
+        bb.utils.mkdirhier(self.opkg_dir)
+
+        self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name)
+        if not os.path.exists(self.d.expand('${T}/saved')):
+            bb.utils.mkdirhier(self.d.expand('${T}/saved'))
+
+        if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
+            self._create_config()
+        else:
+            self._create_custom_config()
+
+        self.indexer = OpkgIndexer(self.d, self.deploy_dir)
+
+    """
+    This function will change a package's status in /var/lib/opkg/status file.
+    If 'packages' is None then the new_status will be applied to all
+    packages
+    """
+    def mark_packages(self, status_tag, packages=None):
+        status_file = os.path.join(self.opkg_dir, "status")
+
+        with open(status_file, "r") as sf:
+            with open(status_file + ".tmp", "w+") as tmp_sf:
+                if packages is None:
+                    tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
+                                        r"Package: \1\n\2Status: \3%s" % status_tag,
+                                        sf.read()))
+                else:
+                    if type(packages).__name__ != "list":
+                        raise TypeError("'packages' should be a list object")
+
+                    status = sf.read()
+                    for pkg in packages:
+                        status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
+                                        r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
+                                        status)
+
+                    tmp_sf.write(status)
+
+        os.rename(status_file + ".tmp", status_file)
+
+    def _create_custom_config(self):
+        bb.note("Building from feeds activated!")
+
+        with open(self.config_file, "w+") as config_file:
+            priority = 1
+            for arch in self.pkg_archs.split():
+                config_file.write("arch %s %d\n" % (arch, priority))
+                priority += 5
+
+            for line in (self.d.getVar('IPK_FEED_URIS', True) or "").split():
+                feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
+
+                if feed_match is not None:
+                    feed_name = feed_match.group(1)
+                    feed_uri = feed_match.group(2)
+
+                    bb.note("Add %s feed with URL %s" % (feed_name, feed_uri))
+
+                    config_file.write("src/gz %s %s\n" % (feed_name, feed_uri))
+
+            """
+            Allow to use package deploy directory contents as quick devel-testing
+            feed. This creates individual feed configs for each arch subdir of those
+            specified as compatible for the current machine.
+            NOTE: Development-helper feature, NOT a full-fledged feed.
+            """
+            if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True) or "") != "":
+                for arch in self.pkg_archs.split():
+                    cfg_file_name = os.path.join(self.target_rootfs,
+                                                 self.d.getVar("sysconfdir", True),
+                                                 "opkg",
+                                                 "local-%s-feed.conf" % arch)
+
+                    with open(cfg_file_name, "w+") as cfg_file:
+                        cfg_file.write("src/gz local-%s %s/%s" %
+                                       (arch,
+                                        self.d.getVar('FEED_DEPLOYDIR_BASE_URI', True),
+                                        arch))
+
+    def _create_config(self):
+        with open(self.config_file, "w+") as config_file:
+            priority = 1
+            for arch in self.pkg_archs.split():
+                config_file.write("arch %s %d\n" % (arch, priority))
+                priority += 5
+
+            config_file.write("src oe file:%s\n" % self.deploy_dir)
+
+            for arch in self.pkg_archs.split():
+                pkgs_dir = os.path.join(self.deploy_dir, arch)
+                if os.path.isdir(pkgs_dir):
+                    config_file.write("src oe-%s file:%s\n" %
+                                      (arch, pkgs_dir))
+
+    def insert_feeds_uris(self):
+        if self.feed_uris == "":
+            return
+
+        rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf'
+                                  % self.target_rootfs)
+
+        with open(rootfs_config, "w+") as config_file:
+            uri_iterator = 0
+            for uri in self.feed_uris.split():
+                full_uri = uri
+                if self.feed_prefix:
+                    full_uri = os.path.join(uri, self.feed_prefix)
+
+                for arch in self.pkg_archs.split():
+                    if not os.path.exists(os.path.join(self.deploy_dir, arch)):
+                        continue
+                    bb.note('Note: adding opkg feed url-%s-%d (%s)' %
+                        (arch, uri_iterator, full_uri))
+
+                    config_file.write("src/gz uri-%s-%d %s/%s\n" %
+                                      (arch, uri_iterator, full_uri, arch))
+                uri_iterator += 1
+
+    def update(self):
+        self.deploy_dir_lock()
+
+        cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args)
+
+        try:
+            subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+        except subprocess.CalledProcessError as e:
+            self.deploy_dir_unlock()
+            bb.fatal("Unable to update the package index files. Command '%s' "
+                     "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+        self.deploy_dir_unlock()
+
+    def install(self, pkgs, attempt_only=False):
+        if attempt_only and len(pkgs) == 0:
+            return
+
+        cmd = "%s %s install %s" % (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
+
+        os.environ['D'] = self.target_rootfs
+        os.environ['OFFLINE_ROOT'] = self.target_rootfs
+        os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+        os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+        os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True),
+                                                   "intercept_scripts")
+        os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True)
+
+        try:
+            bb.note("Installing the following packages: %s" % ' '.join(pkgs))
+            bb.note(cmd)
+            output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+            bb.note(output)
+        except subprocess.CalledProcessError as e:
+            (bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
+                                              "Command '%s' returned %d:\n%s" %
+                                              (cmd, e.returncode, e.output))
+
+    def remove(self, pkgs, with_dependencies=True):
+        if with_dependencies:
+            cmd = "%s %s --force-depends --force-remove --force-removal-of-dependent-packages remove %s" % \
+                (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
+        else:
+            cmd = "%s %s --force-depends remove %s" % \
+                (self.opkg_cmd, self.opkg_args, ' '.join(pkgs))
+
+        try:
+            bb.note(cmd)
+            output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+            bb.note(output)
+        except subprocess.CalledProcessError as e:
+            bb.fatal("Unable to remove packages. Command '%s' "
+                     "returned %d:\n%s" % (e.cmd, e.returncode, e.output))
+
+    def write_index(self):
+        self.deploy_dir_lock()
+
+        result = self.indexer.write_index()
+
+        self.deploy_dir_unlock()
+
+        if result is not None:
+            bb.fatal(result)
+
+    def remove_packaging_data(self):
+        bb.utils.remove(self.opkg_dir, True)
+        # create the directory back, it's needed by PM lock
+        bb.utils.mkdirhier(self.opkg_dir)
+
+    def list_installed(self, format=None):
+        return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list(format)
+
+    def handle_bad_recommendations(self):
+        bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS", True) or ""
+        if bad_recommendations.strip() == "":
+            return
+
+        status_file = os.path.join(self.opkg_dir, "status")
+
+        # If status file existed, it means the bad recommendations has already
+        # been handled
+        if os.path.exists(status_file):
+            return
+
+        cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args)
+
+        with open(status_file, "w+") as status:
+            for pkg in bad_recommendations.split():
+                pkg_info = cmd + pkg
+
+                try:
+                    output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip()
+                except subprocess.CalledProcessError as e:
+                    bb.fatal("Cannot get package info. Command '%s' "
+                             "returned %d:\n%s" % (pkg_info, e.returncode, e.output))
+
+                if output == "":
+                    bb.note("Ignored bad recommendation: '%s' is "
+                            "not a package" % pkg)
+                    continue
+
+                for line in output.split('\n'):
+                    if line.startswith("Status:"):
+                        status.write("Status: deinstall hold not-installed\n")
+                    else:
+                        status.write(line + "\n")
+
+                # Append a blank line after each package entry to ensure that it
+                # is separated from the following entry
+                status.write("\n")
+
+    '''
+    The following function dummy installs pkgs and returns the log of output.
+    '''
+    def dummy_install(self, pkgs):
+        if len(pkgs) == 0:
+            return
+
+        # Create an temp dir as opkg root for dummy installation
+        temp_rootfs = self.d.expand('${T}/opkg')
+        temp_opkg_dir = os.path.join(temp_rootfs, 'var/lib/opkg')
+        bb.utils.mkdirhier(temp_opkg_dir)
+
+        opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs)
+        opkg_args += self.d.getVar("OPKG_ARGS", True)
+
+        cmd = "%s %s update" % (self.opkg_cmd, opkg_args)
+        try:
+            subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+        except subprocess.CalledProcessError as e:
+            bb.fatal("Unable to update. Command '%s' "
+                     "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+        # Dummy installation
+        cmd = "%s %s --noaction install %s " % (self.opkg_cmd,
+                                                opkg_args,
+                                                ' '.join(pkgs))
+        try:
+            output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+        except subprocess.CalledProcessError as e:
+            bb.fatal("Unable to dummy install packages. Command '%s' "
+                     "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+        bb.utils.remove(temp_rootfs, True)
+
+        return output
+
+    def backup_packaging_data(self):
+        # Save the opkglib for increment ipk image generation
+        if os.path.exists(self.saved_opkg_dir):
+            bb.utils.remove(self.saved_opkg_dir, True)
+        shutil.copytree(self.opkg_dir,
+                        self.saved_opkg_dir,
+                        symlinks=True)
+
+    def recover_packaging_data(self):
+        # Move the opkglib back
+        if os.path.exists(self.saved_opkg_dir):
+            if os.path.exists(self.opkg_dir):
+                bb.utils.remove(self.opkg_dir, True)
+
+            bb.note('Recover packaging data')
+            shutil.copytree(self.saved_opkg_dir,
+                            self.opkg_dir,
+                            symlinks=True)
+
+
+class DpkgPM(PackageManager):
+    def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None):
+        super(DpkgPM, self).__init__(d)
+        self.target_rootfs = target_rootfs
+        self.deploy_dir = self.d.getVar('DEPLOY_DIR_DEB', True)
+        if apt_conf_dir is None:
+            self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt")
+        else:
+            self.apt_conf_dir = apt_conf_dir
+        self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf")
+        self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get")
+
+        self.apt_args = d.getVar("APT_ARGS", True)
+
+        self.all_arch_list = archs.split()
+        all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS', True) or "").replace('-', '_').split()
+        self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list)
+
+        self._create_configs(archs, base_archs)
+
+        self.indexer = DpkgIndexer(self.d, self.deploy_dir)
+
+    """
+    This function will change a package's status in /var/lib/dpkg/status file.
+    If 'packages' is None then the new_status will be applied to all
+    packages
+    """
+    def mark_packages(self, status_tag, packages=None):
+        status_file = self.target_rootfs + "/var/lib/dpkg/status"
+
+        with open(status_file, "r") as sf:
+            with open(status_file + ".tmp", "w+") as tmp_sf:
+                if packages is None:
+                    tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)",
+                                        r"Package: \1\n\2Status: \3%s" % status_tag,
+                                        sf.read()))
+                else:
+                    if type(packages).__name__ != "list":
+                        raise TypeError("'packages' should be a list object")
+
+                    status = sf.read()
+                    for pkg in packages:
+                        status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg,
+                                        r"Package: %s\n\1Status: \2%s" % (pkg, status_tag),
+                                        status)
+
+                    tmp_sf.write(status)
+
+        os.rename(status_file + ".tmp", status_file)
+
+    """
+    Run the pre/post installs for package "package_name". If package_name is
+    None, then run all pre/post install scriptlets.
+    """
+    def run_pre_post_installs(self, package_name=None):
+        info_dir = self.target_rootfs + "/var/lib/dpkg/info"
+        suffixes = [(".preinst", "Preinstall"), (".postinst", "Postinstall")]
+        status_file = self.target_rootfs + "/var/lib/dpkg/status"
+        installed_pkgs = []
+
+        with open(status_file, "r") as status:
+            for line in status.read().split('\n'):
+                m = re.match("^Package: (.*)", line)
+                if m is not None:
+                    installed_pkgs.append(m.group(1))
+
+        if package_name is not None and not package_name in installed_pkgs:
+            return
+
+        os.environ['D'] = self.target_rootfs
+        os.environ['OFFLINE_ROOT'] = self.target_rootfs
+        os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs
+        os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs
+        os.environ['INTERCEPT_DIR'] = os.path.join(self.d.getVar('WORKDIR', True),
+                                                   "intercept_scripts")
+        os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE', True)
+
+        failed_pkgs = []
+        for pkg_name in installed_pkgs:
+            for suffix in suffixes:
+                p_full = os.path.join(info_dir, pkg_name + suffix[0])
+                if os.path.exists(p_full):
+                    try:
+                        bb.note("Executing %s for package: %s ..." %
+                                 (suffix[1].lower(), pkg_name))
+                        subprocess.check_output(p_full, stderr=subprocess.STDOUT)
+                    except subprocess.CalledProcessError as e:
+                        bb.note("%s for package %s failed with %d:\n%s" %
+                                (suffix[1], pkg_name, e.returncode, e.output))
+                        failed_pkgs.append(pkg_name)
+                        break
+
+        if len(failed_pkgs):
+            self.mark_packages("unpacked", failed_pkgs)
+
+    def update(self):
+        os.environ['APT_CONFIG'] = self.apt_conf_file
+
+        self.deploy_dir_lock()
+
+        cmd = "%s update" % self.apt_get_cmd
+
+        try:
+            subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+        except subprocess.CalledProcessError as e:
+            bb.fatal("Unable to update the package index files. Command '%s' "
+                     "returned %d:\n%s" % (e.cmd, e.returncode, e.output))
+
+        self.deploy_dir_unlock()
+
+    def install(self, pkgs, attempt_only=False):
+        if attempt_only and len(pkgs) == 0:
+            return
+
+        os.environ['APT_CONFIG'] = self.apt_conf_file
+
+        cmd = "%s %s install --force-yes --allow-unauthenticated %s" % \
+              (self.apt_get_cmd, self.apt_args, ' '.join(pkgs))
+
+        try:
+            bb.note("Installing the following packages: %s" % ' '.join(pkgs))
+            subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+        except subprocess.CalledProcessError as e:
+            (bb.fatal, bb.note)[attempt_only]("Unable to install packages. "
+                                              "Command '%s' returned %d:\n%s" %
+                                              (cmd, e.returncode, e.output))
+
+        # rename *.dpkg-new files/dirs
+        for root, dirs, files in os.walk(self.target_rootfs):
+            for dir in dirs:
+                new_dir = re.sub("\.dpkg-new", "", dir)
+                if dir != new_dir:
+                    os.rename(os.path.join(root, dir),
+                              os.path.join(root, new_dir))
+
+            for file in files:
+                new_file = re.sub("\.dpkg-new", "", file)
+                if file != new_file:
+                    os.rename(os.path.join(root, file),
+                              os.path.join(root, new_file))
+
+
+    def remove(self, pkgs, with_dependencies=True):
+        if with_dependencies:
+            os.environ['APT_CONFIG'] = self.apt_conf_file
+            cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs))
+        else:
+            cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \
+                  " -P --force-depends %s" % \
+                  (bb.utils.which(os.getenv('PATH'), "dpkg"),
+                   self.target_rootfs, self.target_rootfs, ' '.join(pkgs))
+
+        try:
+            subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+        except subprocess.CalledProcessError as e:
+            bb.fatal("Unable to remove packages. Command '%s' "
+                     "returned %d:\n%s" % (e.cmd, e.returncode, e.output))
+
+    def write_index(self):
+        self.deploy_dir_lock()
+
+        result = self.indexer.write_index()
+
+        self.deploy_dir_unlock()
+
+        if result is not None:
+            bb.fatal(result)
+
+    def insert_feeds_uris(self):
+        if self.feed_uris == "":
+            return
+
+        sources_conf = os.path.join("%s/etc/apt/sources.list"
+                                    % self.target_rootfs)
+        arch_list = []
+
+        for arch in self.all_arch_list:
+            if not os.path.exists(os.path.join(self.deploy_dir, arch)):
+                continue
+            arch_list.append(arch)
+
+        with open(sources_conf, "w+") as sources_file:
+            for uri in self.feed_uris.split():
+                full_uri = uri
+                if self.feed_prefix:
+                    full_uri = os.path.join(uri, self.feed_prefix)
+                for arch in arch_list:
+                    bb.note('Note: adding dpkg channel at (%s)' % uri)
+                    sources_file.write("deb %s/%s ./\n" %
+                                       (full_uri, arch))
+
+    def _create_configs(self, archs, base_archs):
+        base_archs = re.sub("_", "-", base_archs)
+
+        if os.path.exists(self.apt_conf_dir):
+            bb.utils.remove(self.apt_conf_dir, True)
+
+        bb.utils.mkdirhier(self.apt_conf_dir)
+        bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/")
+        bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/")
+
+        arch_list = []
+        for arch in self.all_arch_list:
+            if not os.path.exists(os.path.join(self.deploy_dir, arch)):
+                continue
+            arch_list.append(arch)
+
+        with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file:
+            priority = 801
+            for arch in arch_list:
+                prefs_file.write(
+                    "Package: *\n"
+                    "Pin: release l=%s\n"
+                    "Pin-Priority: %d\n\n" % (arch, priority))
+
+                priority += 5
+
+            pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE', True) or ""
+            for pkg in pkg_exclude.split():
+                prefs_file.write(
+                    "Package: %s\n"
+                    "Pin: release *\n"
+                    "Pin-Priority: -1\n\n" % pkg)
+
+        arch_list.reverse()
+
+        with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file:
+            for arch in arch_list:
+                sources_file.write("deb file:%s/ ./\n" %
+                                   os.path.join(self.deploy_dir, arch))
+
+        base_arch_list = base_archs.split()
+        multilib_variants = self.d.getVar("MULTILIB_VARIANTS", True);
+        for variant in multilib_variants.split():
+            if variant == "lib32":
+                base_arch_list.append("i386")
+            elif variant == "lib64":
+                base_arch_list.append("amd64")
+
+        with open(self.apt_conf_file, "w+") as apt_conf:
+            with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
+                for line in apt_conf_sample.read().split("\n"):
+                    match_arch = re.match("  Architecture \".*\";$", line)
+                    architectures = ""
+                    if match_arch:
+                        for base_arch in base_arch_list:
+                            architectures += "\"%s\";" % base_arch
+                        apt_conf.write("  Architectures {%s};\n" % architectures);
+                        apt_conf.write("  Architecture \"%s\";\n" % base_archs)
+                    else:
+                        line = re.sub("#ROOTFS#", self.target_rootfs, line)
+                        line = re.sub("#APTCONF#", self.apt_conf_dir, line)
+                        apt_conf.write(line + "\n")
+
+        target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs
+        bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info"))
+
+        bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates"))
+
+        if not os.path.exists(os.path.join(target_dpkg_dir, "status")):
+            open(os.path.join(target_dpkg_dir, "status"), "w+").close()
+        if not os.path.exists(os.path.join(target_dpkg_dir, "available")):
+            open(os.path.join(target_dpkg_dir, "available"), "w+").close()
+
+    def remove_packaging_data(self):
+        bb.utils.remove(os.path.join(self.target_rootfs,
+                                     self.d.getVar('opkglibdir', True)), True)
+        bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True)
+
+    def fix_broken_dependencies(self):
+        os.environ['APT_CONFIG'] = self.apt_conf_file
+
+        cmd = "%s %s -f install" % (self.apt_get_cmd, self.apt_args)
+
+        try:
+            subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
+        except subprocess.CalledProcessError as e:
+            bb.fatal("Cannot fix broken dependencies. Command '%s' "
+                     "returned %d:\n%s" % (cmd, e.returncode, e.output))
+
+    def list_installed(self, format=None):
+        return DpkgPkgsList(self.d, self.target_rootfs).list()
+
+
+def generate_index_files(d):
+    classes = d.getVar('PACKAGE_CLASSES', True).replace("package_", "").split()
+
+    indexer_map = {
+        "rpm": (RpmIndexer, d.getVar('DEPLOY_DIR_RPM', True)),
+        "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK', True)),
+        "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB', True))
+    }
+
+    result = None
+
+    for pkg_class in classes:
+        if not pkg_class in indexer_map:
+            continue
+
+        if os.path.exists(indexer_map[pkg_class][1]):
+            result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index()
+
+            if result is not None:
+                bb.fatal(result)
+
+if __name__ == "__main__":
+    """
+    We should be able to run this as a standalone script, from outside bitbake
+    environment.
+    """
+    """
+    TBD
+    """
diff --git a/meta/lib/oe/packagedata.py b/meta/lib/oe/packagedata.py
new file mode 100644
index 0000000..cd5f044
--- /dev/null
+++ b/meta/lib/oe/packagedata.py
@@ -0,0 +1,94 @@
+import codecs
+
+def packaged(pkg, d):
+    return os.access(get_subpkgedata_fn(pkg, d) + '.packaged', os.R_OK)
+
+def read_pkgdatafile(fn):
+    pkgdata = {}
+
+    def decode(str):
+        c = codecs.getdecoder("string_escape")
+        return c(str)[0]
+
+    if os.access(fn, os.R_OK):
+        import re
+        f = open(fn, 'r')
+        lines = f.readlines()
+        f.close()
+        r = re.compile("([^:]+):\s*(.*)")
+        for l in lines:
+            m = r.match(l)
+            if m:
+                pkgdata[m.group(1)] = decode(m.group(2))
+
+    return pkgdata
+
+def get_subpkgedata_fn(pkg, d):
+    return d.expand('${PKGDATA_DIR}/runtime/%s' % pkg)
+
+def has_subpkgdata(pkg, d):
+    return os.access(get_subpkgedata_fn(pkg, d), os.R_OK)
+
+def read_subpkgdata(pkg, d):
+    return read_pkgdatafile(get_subpkgedata_fn(pkg, d))
+
+def has_pkgdata(pn, d):
+    fn = d.expand('${PKGDATA_DIR}/%s' % pn)
+    return os.access(fn, os.R_OK)
+
+def read_pkgdata(pn, d):
+    fn = d.expand('${PKGDATA_DIR}/%s' % pn)
+    return read_pkgdatafile(fn)
+
+#
+# Collapse FOO_pkg variables into FOO
+#
+def read_subpkgdata_dict(pkg, d):
+    ret = {}
+    subd = read_pkgdatafile(get_subpkgedata_fn(pkg, d))
+    for var in subd:
+        newvar = var.replace("_" + pkg, "")
+        if newvar == var and var + "_" + pkg in subd:
+            continue
+        ret[newvar] = subd[var]
+    return ret
+
+def _pkgmap(d):
+    """Return a dictionary mapping package to recipe name."""
+
+    pkgdatadir = d.getVar("PKGDATA_DIR", True)
+
+    pkgmap = {}
+    try:
+        files = os.listdir(pkgdatadir)
+    except OSError:
+        bb.warn("No files in %s?" % pkgdatadir)
+        files = []
+
+    for pn in filter(lambda f: not os.path.isdir(os.path.join(pkgdatadir, f)), files):
+        try:
+            pkgdata = read_pkgdatafile(os.path.join(pkgdatadir, pn))
+        except OSError:
+            continue
+
+        packages = pkgdata.get("PACKAGES") or ""
+        for pkg in packages.split():
+            pkgmap[pkg] = pn
+
+    return pkgmap
+
+def pkgmap(d):
+    """Return a dictionary mapping package to recipe name.
+    Cache the mapping in the metadata"""
+
+    pkgmap_data = d.getVar("__pkgmap_data", False)
+    if pkgmap_data is None:
+        pkgmap_data = _pkgmap(d)
+        d.setVar("__pkgmap_data", pkgmap_data)
+
+    return pkgmap_data
+
+def recipename(pkg, d):
+    """Return the recipe name for the given binary package name."""
+
+    return pkgmap(d).get(pkg)
diff --git a/meta/lib/oe/packagegroup.py b/meta/lib/oe/packagegroup.py
new file mode 100644
index 0000000..12eb421
--- /dev/null
+++ b/meta/lib/oe/packagegroup.py
@@ -0,0 +1,36 @@
+import itertools
+
+def is_optional(feature, d):
+    packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True)
+    if packages:
+        return bool(d.getVarFlag("FEATURE_PACKAGES_%s" % feature, "optional"))
+    else:
+        return bool(d.getVarFlag("PACKAGE_GROUP_%s" % feature, "optional"))
+
+def packages(features, d):
+    for feature in features:
+        packages = d.getVar("FEATURE_PACKAGES_%s" % feature, True)
+        if not packages:
+            packages = d.getVar("PACKAGE_GROUP_%s" % feature, True)
+        for pkg in (packages or "").split():
+            yield pkg
+
+def required_packages(features, d):
+    req = filter(lambda feature: not is_optional(feature, d), features)
+    return packages(req, d)
+
+def optional_packages(features, d):
+    opt = filter(lambda feature: is_optional(feature, d), features)
+    return packages(opt, d)
+
+def active_packages(features, d):
+    return itertools.chain(required_packages(features, d),
+                           optional_packages(features, d))
+
+def active_recipes(features, d):
+    import oe.packagedata
+
+    for pkg in active_packages(features, d):
+        recipe = oe.packagedata.recipename(pkg, d)
+        if recipe:
+            yield recipe
diff --git a/meta/lib/oe/patch.py b/meta/lib/oe/patch.py
new file mode 100644
index 0000000..108bf1d
--- /dev/null
+++ b/meta/lib/oe/patch.py
@@ -0,0 +1,659 @@
+import oe.path
+
+class NotFoundError(bb.BBHandledException):
+    def __init__(self, path):
+        self.path = path
+
+    def __str__(self):
+        return "Error: %s not found." % self.path
+
+class CmdError(bb.BBHandledException):
+    def __init__(self, exitstatus, output):
+        self.status = exitstatus
+        self.output = output
+
+    def __str__(self):
+        return "Command Error: exit status: %d  Output:\n%s" % (self.status, self.output)
+
+
+def runcmd(args, dir = None):
+    import pipes
+
+    if dir:
+        olddir = os.path.abspath(os.curdir)
+        if not os.path.exists(dir):
+            raise NotFoundError(dir)
+        os.chdir(dir)
+        # print("cwd: %s -> %s" % (olddir, dir))
+
+    try:
+        args = [ pipes.quote(str(arg)) for arg in args ]
+        cmd = " ".join(args)
+        # print("cmd: %s" % cmd)
+        (exitstatus, output) = oe.utils.getstatusoutput(cmd)
+        if exitstatus != 0:
+            raise CmdError(exitstatus >> 8, output)
+        return output
+
+    finally:
+        if dir:
+            os.chdir(olddir)
+
+class PatchError(Exception):
+    def __init__(self, msg):
+        self.msg = msg
+
+    def __str__(self):
+        return "Patch Error: %s" % self.msg
+
+class PatchSet(object):
+    defaults = {
+        "strippath": 1
+    }
+
+    def __init__(self, dir, d):
+        self.dir = dir
+        self.d = d
+        self.patches = []
+        self._current = None
+
+    def current(self):
+        return self._current
+
+    def Clean(self):
+        """
+        Clean out the patch set.  Generally includes unapplying all
+        patches and wiping out all associated metadata.
+        """
+        raise NotImplementedError()
+
+    def Import(self, patch, force):
+        if not patch.get("file"):
+            if not patch.get("remote"):
+                raise PatchError("Patch file must be specified in patch import.")
+            else:
+                patch["file"] = bb.fetch2.localpath(patch["remote"], self.d)
+
+        for param in PatchSet.defaults:
+            if not patch.get(param):
+                patch[param] = PatchSet.defaults[param]
+
+        if patch.get("remote"):
+            patch["file"] = bb.data.expand(bb.fetch2.localpath(patch["remote"], self.d), self.d)
+
+        patch["filemd5"] = bb.utils.md5_file(patch["file"])
+
+    def Push(self, force):
+        raise NotImplementedError()
+
+    def Pop(self, force):
+        raise NotImplementedError()
+
+    def Refresh(self, remote = None, all = None):
+        raise NotImplementedError()
+
+    @staticmethod
+    def getPatchedFiles(patchfile, striplevel, srcdir=None):
+        """
+        Read a patch file and determine which files it will modify.
+        Params:
+            patchfile: the patch file to read
+            striplevel: the strip level at which the patch is going to be applied
+            srcdir: optional path to join onto the patched file paths
+        Returns:
+            A list of tuples of file path and change mode ('A' for add,
+            'D' for delete or 'M' for modify)
+        """
+
+        def patchedpath(patchline):
+            filepth = patchline.split()[1]
+            if filepth.endswith('/dev/null'):
+                return '/dev/null'
+            filesplit = filepth.split(os.sep)
+            if striplevel > len(filesplit):
+                bb.error('Patch %s has invalid strip level %d' % (patchfile, striplevel))
+                return None
+            return os.sep.join(filesplit[striplevel:])
+
+        copiedmode = False
+        filelist = []
+        with open(patchfile) as f:
+            for line in f:
+                if line.startswith('--- '):
+                    patchpth = patchedpath(line)
+                    if not patchpth:
+                        break
+                    if copiedmode:
+                        addedfile = patchpth
+                    else:
+                        removedfile = patchpth
+                elif line.startswith('+++ '):
+                    addedfile = patchedpath(line)
+                    if not addedfile:
+                        break
+                elif line.startswith('*** '):
+                    copiedmode = True
+                    removedfile = patchedpath(line)
+                    if not removedfile:
+                        break
+                else:
+                    removedfile = None
+                    addedfile = None
+
+                if addedfile and removedfile:
+                    if removedfile == '/dev/null':
+                        mode = 'A'
+                    elif addedfile == '/dev/null':
+                        mode = 'D'
+                    else:
+                        mode = 'M'
+                    if srcdir:
+                        fullpath = os.path.abspath(os.path.join(srcdir, addedfile))
+                    else:
+                        fullpath = addedfile
+                    filelist.append((fullpath, mode))
+
+        return filelist
+
+
+class PatchTree(PatchSet):
+    def __init__(self, dir, d):
+        PatchSet.__init__(self, dir, d)
+        self.patchdir = os.path.join(self.dir, 'patches')
+        self.seriespath = os.path.join(self.dir, 'patches', 'series')
+        bb.utils.mkdirhier(self.patchdir)
+
+    def _appendPatchFile(self, patch, strippath):
+        with open(self.seriespath, 'a') as f:
+            f.write(os.path.basename(patch) + "," + strippath + "\n")
+        shellcmd = ["cat", patch, ">" , self.patchdir + "/" + os.path.basename(patch)]
+        runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+
+    def _removePatch(self, p):
+        patch = {}
+        patch['file'] = p.split(",")[0]
+        patch['strippath'] = p.split(",")[1]
+        self._applypatch(patch, False, True)
+
+    def _removePatchFile(self, all = False):
+        if not os.path.exists(self.seriespath):
+            return
+        with open(self.seriespath, 'r+') as f:
+            patches = f.readlines()
+        if all:
+            for p in reversed(patches):
+                self._removePatch(os.path.join(self.patchdir, p.strip()))
+            patches = []
+        else:
+            self._removePatch(os.path.join(self.patchdir, patches[-1].strip()))
+            patches.pop()
+        with open(self.seriespath, 'w') as f:
+            for p in patches:
+                f.write(p)
+         
+    def Import(self, patch, force = None):
+        """"""
+        PatchSet.Import(self, patch, force)
+
+        if self._current is not None:
+            i = self._current + 1
+        else:
+            i = 0
+        self.patches.insert(i, patch)
+
+    def _applypatch(self, patch, force = False, reverse = False, run = True):
+        shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']]
+        if reverse:
+            shellcmd.append('-R')
+
+        if not run:
+            return "sh" + "-c" + " ".join(shellcmd)
+
+        if not force:
+            shellcmd.append('--dry-run')
+
+        output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+
+        if force:
+            return
+
+        shellcmd.pop(len(shellcmd) - 1)
+        output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+
+        if not reverse:
+            self._appendPatchFile(patch['file'], patch['strippath'])
+
+        return output
+
+    def Push(self, force = False, all = False, run = True):
+        bb.note("self._current is %s" % self._current)
+        bb.note("patches is %s" % self.patches)
+        if all:
+            for i in self.patches:
+                bb.note("applying patch %s" % i)
+                self._applypatch(i, force)
+                self._current = i
+        else:
+            if self._current is not None:
+                next = self._current + 1
+            else:
+                next = 0
+
+            bb.note("applying patch %s" % self.patches[next])
+            ret = self._applypatch(self.patches[next], force)
+
+            self._current = next
+            return ret
+
+    def Pop(self, force = None, all = None):
+        if all:
+            self._removePatchFile(True)
+            self._current = None
+        else:
+            self._removePatchFile(False)
+
+        if self._current == 0:
+            self._current = None
+
+        if self._current is not None:
+            self._current = self._current - 1
+
+    def Clean(self):
+        """"""
+        self.Pop(all=True)
+
+class GitApplyTree(PatchTree):
+    patch_line_prefix = '%% original patch'
+
+    def __init__(self, dir, d):
+        PatchTree.__init__(self, dir, d)
+
+    @staticmethod
+    def extractPatchHeader(patchfile):
+        """
+        Extract just the header lines from the top of a patch file
+        """
+        lines = []
+        with open(patchfile, 'r') as f:
+            for line in f.readlines():
+                if line.startswith('Index: ') or line.startswith('diff -') or line.startswith('---'):
+                    break
+                lines.append(line)
+        return lines
+
+    @staticmethod
+    def prepareCommit(patchfile):
+        """
+        Prepare a git commit command line based on the header from a patch file
+        (typically this is useful for patches that cannot be applied with "git am" due to formatting)
+        """
+        import tempfile
+        import re
+        author_re = re.compile('[\S ]+ <\S+@\S+\.\S+>')
+        # Process patch header and extract useful information
+        lines = GitApplyTree.extractPatchHeader(patchfile)
+        outlines = []
+        author = None
+        date = None
+        for line in lines:
+            if line.startswith('Subject: '):
+                subject = line.split(':', 1)[1]
+                # Remove any [PATCH][oe-core] etc.
+                subject = re.sub(r'\[.+?\]\s*', '', subject)
+                outlines.insert(0, '%s\n\n' % subject.strip())
+                continue
+            if line.startswith('From: ') or line.startswith('Author: '):
+                authorval = line.split(':', 1)[1].strip().replace('"', '')
+                # git is fussy about author formatting i.e. it must be Name <email@domain>
+                if author_re.match(authorval):
+                    author = authorval
+                    continue
+            if line.startswith('Date: '):
+                if date is None:
+                    dateval = line.split(':', 1)[1].strip()
+                    # Very crude check for date format, since git will blow up if it's not in the right
+                    # format. Without e.g. a python-dateutils dependency we can't do a whole lot more
+                    if len(dateval) > 12:
+                        date = dateval
+                continue
+            if line.startswith('Signed-off-by: '):
+                authorval = line.split(':', 1)[1].strip().replace('"', '')
+                # git is fussy about author formatting i.e. it must be Name <email@domain>
+                if author_re.match(authorval):
+                    author = authorval
+            outlines.append(line)
+        # Write out commit message to a file
+        with tempfile.NamedTemporaryFile('w', delete=False) as tf:
+            tmpfile = tf.name
+            for line in outlines:
+                tf.write(line)
+        # Prepare git command
+        cmd = ["git", "commit", "-F", tmpfile]
+        # git doesn't like plain email addresses as authors
+        if author and '<' in author:
+            cmd.append('--author="%s"' % author)
+        if date:
+            cmd.append('--date="%s"' % date)
+        return (tmpfile, cmd)
+
+    @staticmethod
+    def extractPatches(tree, startcommit, outdir):
+        import tempfile
+        import shutil
+        tempdir = tempfile.mkdtemp(prefix='oepatch')
+        try:
+            shellcmd = ["git", "format-patch", startcommit, "-o", tempdir]
+            out = runcmd(["sh", "-c", " ".join(shellcmd)], tree)
+            if out:
+                for srcfile in out.split():
+                    patchlines = []
+                    outfile = None
+                    with open(srcfile, 'r') as f:
+                        for line in f:
+                            if line.startswith(GitApplyTree.patch_line_prefix):
+                                outfile = line.split()[-1].strip()
+                                continue
+                            patchlines.append(line)
+                    if not outfile:
+                        outfile = os.path.basename(srcfile)
+                    with open(os.path.join(outdir, outfile), 'w') as of:
+                        for line in patchlines:
+                            of.write(line)
+        finally:
+            shutil.rmtree(tempdir)
+
+    def _applypatch(self, patch, force = False, reverse = False, run = True):
+        import shutil
+
+        def _applypatchhelper(shellcmd, patch, force = False, reverse = False, run = True):
+            if reverse:
+                shellcmd.append('-R')
+
+            shellcmd.append(patch['file'])
+
+            if not run:
+                return "sh" + "-c" + " ".join(shellcmd)
+
+            return runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+
+        # Add hooks which add a pointer to the original patch file name in the commit message
+        reporoot = (runcmd("git rev-parse --show-toplevel".split(), self.dir) or '').strip()
+        if not reporoot:
+            raise Exception("Cannot get repository root for directory %s" % self.dir)
+        commithook = os.path.join(reporoot, '.git', 'hooks', 'commit-msg')
+        commithook_backup = commithook + '.devtool-orig'
+        applyhook = os.path.join(reporoot, '.git', 'hooks', 'applypatch-msg')
+        applyhook_backup = applyhook + '.devtool-orig'
+        if os.path.exists(commithook):
+            shutil.move(commithook, commithook_backup)
+        if os.path.exists(applyhook):
+            shutil.move(applyhook, applyhook_backup)
+        with open(commithook, 'w') as f:
+            # NOTE: the formatting here is significant; if you change it you'll also need to
+            # change other places which read it back
+            f.write('echo >> $1\n')
+            f.write('echo "%s: $PATCHFILE" >> $1\n' % GitApplyTree.patch_line_prefix)
+        os.chmod(commithook, 0755)
+        shutil.copy2(commithook, applyhook)
+        try:
+            patchfilevar = 'PATCHFILE="%s"' % os.path.basename(patch['file'])
+            try:
+                shellcmd = [patchfilevar, "git", "--work-tree=%s" % reporoot, "am", "-3", "--keep-cr", "-p%s" % patch['strippath']]
+                return _applypatchhelper(shellcmd, patch, force, reverse, run)
+            except CmdError:
+                # Need to abort the git am, or we'll still be within it at the end
+                try:
+                    shellcmd = ["git", "--work-tree=%s" % reporoot, "am", "--abort"]
+                    runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+                except CmdError:
+                    pass
+                # Fall back to git apply
+                shellcmd = ["git", "--git-dir=%s" % reporoot, "apply", "-p%s" % patch['strippath']]
+                try:
+                    output = _applypatchhelper(shellcmd, patch, force, reverse, run)
+                except CmdError:
+                    # Fall back to patch
+                    output = PatchTree._applypatch(self, patch, force, reverse, run)
+                # Add all files
+                shellcmd = ["git", "add", "-f", "-A", "."]
+                output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+                # Exclude the patches directory
+                shellcmd = ["git", "reset", "HEAD", self.patchdir]
+                output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+                # Commit the result
+                (tmpfile, shellcmd) = self.prepareCommit(patch['file'])
+                try:
+                    shellcmd.insert(0, patchfilevar)
+                    output += runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
+                finally:
+                    os.remove(tmpfile)
+                return output
+        finally:
+            os.remove(commithook)
+            os.remove(applyhook)
+            if os.path.exists(commithook_backup):
+                shutil.move(commithook_backup, commithook)
+            if os.path.exists(applyhook_backup):
+                shutil.move(applyhook_backup, applyhook)
+
+
+class QuiltTree(PatchSet):
+    def _runcmd(self, args, run = True):
+        quiltrc = self.d.getVar('QUILTRCFILE', True)
+        if not run:
+            return ["quilt"] + ["--quiltrc"] + [quiltrc] + args
+        runcmd(["quilt"] + ["--quiltrc"] + [quiltrc] + args, self.dir)
+
+    def _quiltpatchpath(self, file):
+        return os.path.join(self.dir, "patches", os.path.basename(file))
+
+
+    def __init__(self, dir, d):
+        PatchSet.__init__(self, dir, d)
+        self.initialized = False
+        p = os.path.join(self.dir, 'patches')
+        if not os.path.exists(p):
+            os.makedirs(p)
+
+    def Clean(self):
+        try:
+            self._runcmd(["pop", "-a", "-f"])
+            oe.path.remove(os.path.join(self.dir, "patches","series"))
+        except Exception:
+            pass
+        self.initialized = True
+
+    def InitFromDir(self):
+        # read series -> self.patches
+        seriespath = os.path.join(self.dir, 'patches', 'series')
+        if not os.path.exists(self.dir):
+            raise NotFoundError(self.dir)
+        if os.path.exists(seriespath):
+            with open(seriespath, 'r') as f:
+                for line in f.readlines():
+                    patch = {}
+                    parts = line.strip().split()
+                    patch["quiltfile"] = self._quiltpatchpath(parts[0])
+                    patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"])
+                    if len(parts) > 1:
+                        patch["strippath"] = parts[1][2:]
+                    self.patches.append(patch)
+
+            # determine which patches are applied -> self._current
+            try:
+                output = runcmd(["quilt", "applied"], self.dir)
+            except CmdError:
+                import sys
+                if sys.exc_value.output.strip() == "No patches applied":
+                    return
+                else:
+                    raise
+            output = [val for val in output.split('\n') if not val.startswith('#')]
+            for patch in self.patches:
+                if os.path.basename(patch["quiltfile"]) == output[-1]:
+                    self._current = self.patches.index(patch)
+        self.initialized = True
+
+    def Import(self, patch, force = None):
+        if not self.initialized:
+            self.InitFromDir()
+        PatchSet.Import(self, patch, force)
+        oe.path.symlink(patch["file"], self._quiltpatchpath(patch["file"]), force=True)
+        with open(os.path.join(self.dir, "patches", "series"), "a") as f:
+            f.write(os.path.basename(patch["file"]) + " -p" + patch["strippath"] + "\n")
+        patch["quiltfile"] = self._quiltpatchpath(patch["file"])
+        patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"])
+
+        # TODO: determine if the file being imported:
+        #      1) is already imported, and is the same
+        #      2) is already imported, but differs
+
+        self.patches.insert(self._current or 0, patch)
+
+
+    def Push(self, force = False, all = False, run = True):
+        # quilt push [-f]
+
+        args = ["push"]
+        if force:
+            args.append("-f")
+        if all:
+            args.append("-a")
+        if not run:
+            return self._runcmd(args, run)
+
+        self._runcmd(args)
+
+        if self._current is not None:
+            self._current = self._current + 1
+        else:
+            self._current = 0
+
+    def Pop(self, force = None, all = None):
+        # quilt pop [-f]
+        args = ["pop"]
+        if force:
+            args.append("-f")
+        if all:
+            args.append("-a")
+
+        self._runcmd(args)
+
+        if self._current == 0:
+            self._current = None
+
+        if self._current is not None:
+            self._current = self._current - 1
+
+    def Refresh(self, **kwargs):
+        if kwargs.get("remote"):
+            patch = self.patches[kwargs["patch"]]
+            if not patch:
+                raise PatchError("No patch found at index %s in patchset." % kwargs["patch"])
+            (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(patch["remote"])
+            if type == "file":
+                import shutil
+                if not patch.get("file") and patch.get("remote"):
+                    patch["file"] = bb.fetch2.localpath(patch["remote"], self.d)
+
+                shutil.copyfile(patch["quiltfile"], patch["file"])
+            else:
+                raise PatchError("Unable to do a remote refresh of %s, unsupported remote url scheme %s." % (os.path.basename(patch["quiltfile"]), type))
+        else:
+            # quilt refresh
+            args = ["refresh"]
+            if kwargs.get("quiltfile"):
+                args.append(os.path.basename(kwargs["quiltfile"]))
+            elif kwargs.get("patch"):
+                args.append(os.path.basename(self.patches[kwargs["patch"]]["quiltfile"]))
+            self._runcmd(args)
+
+class Resolver(object):
+    def __init__(self, patchset, terminal):
+        raise NotImplementedError()
+
+    def Resolve(self):
+        raise NotImplementedError()
+
+    def Revert(self):
+        raise NotImplementedError()
+
+    def Finalize(self):
+        raise NotImplementedError()
+
+class NOOPResolver(Resolver):
+    def __init__(self, patchset, terminal):
+        self.patchset = patchset
+        self.terminal = terminal
+
+    def Resolve(self):
+        olddir = os.path.abspath(os.curdir)
+        os.chdir(self.patchset.dir)
+        try:
+            self.patchset.Push()
+        except Exception:
+            import sys
+            os.chdir(olddir)
+            raise
+
+# Patch resolver which relies on the user doing all the work involved in the
+# resolution, with the exception of refreshing the remote copy of the patch
+# files (the urls).
+class UserResolver(Resolver):
+    def __init__(self, patchset, terminal):
+        self.patchset = patchset
+        self.terminal = terminal
+
+    # Force a push in the patchset, then drop to a shell for the user to
+    # resolve any rejected hunks
+    def Resolve(self):
+        olddir = os.path.abspath(os.curdir)
+        os.chdir(self.patchset.dir)
+        try:
+            self.patchset.Push(False)
+        except CmdError as v:
+            # Patch application failed
+            patchcmd = self.patchset.Push(True, False, False)
+
+            t = self.patchset.d.getVar('T', True)
+            if not t:
+                bb.msg.fatal("Build", "T not set")
+            bb.utils.mkdirhier(t)
+            import random
+            rcfile = "%s/bashrc.%s.%s" % (t, str(os.getpid()), random.random())
+            with open(rcfile, "w") as f:
+                f.write("echo '*** Manual patch resolution mode ***'\n")
+                f.write("echo 'Dropping to a shell, so patch rejects can be fixed manually.'\n")
+                f.write("echo 'Run \"quilt refresh\" when patch is corrected, press CTRL+D to exit.'\n")
+                f.write("echo ''\n")
+                f.write(" ".join(patchcmd) + "\n")
+            os.chmod(rcfile, 0775)
+
+            self.terminal("bash --rcfile " + rcfile, 'Patch Rejects: Please fix patch rejects manually', self.patchset.d)
+
+            # Construct a new PatchSet after the user's changes, compare the
+            # sets, checking patches for modifications, and doing a remote
+            # refresh on each.
+            oldpatchset = self.patchset
+            self.patchset = oldpatchset.__class__(self.patchset.dir, self.patchset.d)
+
+            for patch in self.patchset.patches:
+                oldpatch = None
+                for opatch in oldpatchset.patches:
+                    if opatch["quiltfile"] == patch["quiltfile"]:
+                        oldpatch = opatch
+
+                if oldpatch:
+                    patch["remote"] = oldpatch["remote"]
+                    if patch["quiltfile"] == oldpatch["quiltfile"]:
+                        if patch["quiltfilemd5"] != oldpatch["quiltfilemd5"]:
+                            bb.note("Patch %s has changed, updating remote url %s" % (os.path.basename(patch["quiltfile"]), patch["remote"]))
+                            # user change?  remote refresh
+                            self.patchset.Refresh(remote=True, patch=self.patchset.patches.index(patch))
+                        else:
+                            # User did not fix the problem.  Abort.
+                            raise PatchError("Patch application failed, and user did not fix and refresh the patch.")
+        except Exception:
+            os.chdir(olddir)
+            raise
+        os.chdir(olddir)
diff --git a/meta/lib/oe/path.py b/meta/lib/oe/path.py
new file mode 100644
index 0000000..413ebfb
--- /dev/null
+++ b/meta/lib/oe/path.py
@@ -0,0 +1,243 @@
+import errno
+import glob
+import shutil
+import subprocess
+import os.path
+
+def join(*paths):
+    """Like os.path.join but doesn't treat absolute RHS specially"""
+    return os.path.normpath("/".join(paths))
+
+def relative(src, dest):
+    """ Return a relative path from src to dest.
+
+    >>> relative("/usr/bin", "/tmp/foo/bar")
+    ../../tmp/foo/bar
+
+    >>> relative("/usr/bin", "/usr/lib")
+    ../lib
+
+    >>> relative("/tmp", "/tmp/foo/bar")
+    foo/bar
+    """
+
+    return os.path.relpath(dest, src)
+
+def make_relative_symlink(path):
+    """ Convert an absolute symlink to a relative one """
+    if not os.path.islink(path):
+        return
+    link = os.readlink(path)
+    if not os.path.isabs(link):
+        return
+
+    # find the common ancestor directory
+    ancestor = path
+    depth = 0
+    while ancestor and not link.startswith(ancestor):
+        ancestor = ancestor.rpartition('/')[0]
+        depth += 1
+
+    if not ancestor:
+        print("make_relative_symlink() Error: unable to find the common ancestor of %s and its target" % path)
+        return
+
+    base = link.partition(ancestor)[2].strip('/')
+    while depth > 1:
+        base = "../" + base
+        depth -= 1
+
+    os.remove(path)
+    os.symlink(base, path)
+
+def format_display(path, metadata):
+    """ Prepare a path for display to the user. """
+    rel = relative(metadata.getVar("TOPDIR", True), path)
+    if len(rel) > len(path):
+        return path
+    else:
+        return rel
+
+def copytree(src, dst):
+    # We could use something like shutil.copytree here but it turns out to
+    # to be slow. It takes twice as long copying to an empty directory. 
+    # If dst already has contents performance can be 15 time slower
+    # This way we also preserve hardlinks between files in the tree.
+
+    bb.utils.mkdirhier(dst)
+    cmd = 'tar -cf - -C %s -p . | tar -xf - -C %s' % (src, dst)
+    check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+
+def copyhardlinktree(src, dst):
+    """ Make the hard link when possible, otherwise copy. """
+    bb.utils.mkdirhier(dst)
+    if os.path.isdir(src) and not len(os.listdir(src)):
+        return	
+
+    if (os.stat(src).st_dev ==  os.stat(dst).st_dev):
+        # Need to copy directories only with tar first since cp will error if two 
+        # writers try and create a directory at the same time
+        cmd = 'cd %s; find . -type d -print | tar -cf - -C %s -p --files-from - --no-recursion | tar -xf - -C %s' % (src, src, dst)
+        check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+        cmd = 'cd %s; find . -print0 | cpio --null -pdlu %s' % (src, dst)
+        check_output(cmd, shell=True, stderr=subprocess.STDOUT)
+    else:
+        copytree(src, dst)
+
+def remove(path, recurse=True):
+    """Equivalent to rm -f or rm -rf"""
+    for name in glob.glob(path):
+        try:
+            os.unlink(name)
+        except OSError as exc:
+            if recurse and exc.errno == errno.EISDIR:
+                shutil.rmtree(name)
+            elif exc.errno != errno.ENOENT:
+                raise
+
+def symlink(source, destination, force=False):
+    """Create a symbolic link"""
+    try:
+        if force:
+            remove(destination)
+        os.symlink(source, destination)
+    except OSError as e:
+        if e.errno != errno.EEXIST or os.readlink(destination) != source:
+            raise
+
+class CalledProcessError(Exception):
+    def __init__(self, retcode, cmd, output = None):
+        self.retcode = retcode
+        self.cmd = cmd
+        self.output = output
+    def __str__(self):
+        return "Command '%s' returned non-zero exit status %d with output %s" % (self.cmd, self.retcode, self.output)
+
+# Not needed when we move to python 2.7
+def check_output(*popenargs, **kwargs):
+    r"""Run command with arguments and return its output as a byte string.
+
+    If the exit code was non-zero it raises a CalledProcessError.  The
+    CalledProcessError object will have the return code in the returncode
+    attribute and output in the output attribute.
+
+    The arguments are the same as for the Popen constructor.  Example:
+
+    >>> check_output(["ls", "-l", "/dev/null"])
+    'crw-rw-rw- 1 root root 1, 3 Oct 18  2007 /dev/null\n'
+
+    The stdout argument is not allowed as it is used internally.
+    To capture standard error in the result, use stderr=STDOUT.
+
+    >>> check_output(["/bin/sh", "-c",
+    ...               "ls -l non_existent_file ; exit 0"],
+    ...              stderr=STDOUT)
+    'ls: non_existent_file: No such file or directory\n'
+    """
+    if 'stdout' in kwargs:
+        raise ValueError('stdout argument not allowed, it will be overridden.')
+    process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
+    output, unused_err = process.communicate()
+    retcode = process.poll()
+    if retcode:
+        cmd = kwargs.get("args")
+        if cmd is None:
+            cmd = popenargs[0]
+        raise CalledProcessError(retcode, cmd, output=output)
+    return output
+
+def find(dir, **walkoptions):
+    """ Given a directory, recurses into that directory,
+    returning all files as absolute paths. """
+
+    for root, dirs, files in os.walk(dir, **walkoptions):
+        for file in files:
+            yield os.path.join(root, file)
+
+
+## realpath() related functions
+def __is_path_below(file, root):
+    return (file + os.path.sep).startswith(root)
+
+def __realpath_rel(start, rel_path, root, loop_cnt, assume_dir):
+    """Calculates real path of symlink 'start' + 'rel_path' below
+    'root'; no part of 'start' below 'root' must contain symlinks. """
+    have_dir = True
+
+    for d in rel_path.split(os.path.sep):
+        if not have_dir and not assume_dir:
+            raise OSError(errno.ENOENT, "no such directory %s" % start)
+
+        if d == os.path.pardir: # '..'
+            if len(start) >= len(root):
+                # do not follow '..' before root
+                start = os.path.dirname(start)
+            else:
+                # emit warning?
+                pass
+        else:
+            (start, have_dir) = __realpath(os.path.join(start, d),
+                                           root, loop_cnt, assume_dir)
+
+        assert(__is_path_below(start, root))
+
+    return start
+
+def __realpath(file, root, loop_cnt, assume_dir):
+    while os.path.islink(file) and len(file) >= len(root):
+        if loop_cnt == 0:
+            raise OSError(errno.ELOOP, file)
+
+        loop_cnt -= 1
+        target = os.path.normpath(os.readlink(file))
+
+        if not os.path.isabs(target):
+            tdir = os.path.dirname(file)
+            assert(__is_path_below(tdir, root))
+        else:
+            tdir = root
+
+        file = __realpath_rel(tdir, target, root, loop_cnt, assume_dir)
+
+    try:
+        is_dir = os.path.isdir(file)
+    except:
+        is_dir = false
+
+    return (file, is_dir)
+
+def realpath(file, root, use_physdir = True, loop_cnt = 100, assume_dir = False):
+    """ Returns the canonical path of 'file' with assuming a
+    toplevel 'root' directory. When 'use_physdir' is set, all
+    preceding path components of 'file' will be resolved first;
+    this flag should be set unless it is guaranteed that there is
+    no symlink in the path. When 'assume_dir' is not set, missing
+    path components will raise an ENOENT error"""
+
+    root = os.path.normpath(root)
+    file = os.path.normpath(file)
+
+    if not root.endswith(os.path.sep):
+        # letting root end with '/' makes some things easier
+        root = root + os.path.sep
+
+    if not __is_path_below(file, root):
+        raise OSError(errno.EINVAL, "file '%s' is not below root" % file)
+
+    try:
+        if use_physdir:
+            file = __realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir)
+        else:
+            file = __realpath(file, root, loop_cnt, assume_dir)[0]
+    except OSError as e:
+        if e.errno == errno.ELOOP:
+            # make ELOOP more readable; without catching it, there will
+            # be printed a backtrace with 100s of OSError exceptions
+            # else
+            raise OSError(errno.ELOOP,
+                          "too much recursions while resolving '%s'; loop in '%s'" %
+                          (file, e.strerror))
+
+        raise
+
+    return file
diff --git a/meta/lib/oe/prservice.py b/meta/lib/oe/prservice.py
new file mode 100644
index 0000000..b0cbcb1
--- /dev/null
+++ b/meta/lib/oe/prservice.py
@@ -0,0 +1,126 @@
+
+def prserv_make_conn(d, check = False):
+    import prserv.serv
+    host_params = filter(None, (d.getVar("PRSERV_HOST", True) or '').split(':'))
+    try:
+        conn = None
+        conn = prserv.serv.PRServerConnection(host_params[0], int(host_params[1]))
+        if check:
+            if not conn.ping():
+                raise Exception('service not available')
+        d.setVar("__PRSERV_CONN",conn)
+    except Exception, exc:
+        bb.fatal("Connecting to PR service %s:%s failed: %s" % (host_params[0], host_params[1], str(exc)))
+
+    return conn
+
+def prserv_dump_db(d):
+    if not d.getVar('PRSERV_HOST', True):
+        bb.error("Not using network based PR service")
+        return None
+
+    conn = d.getVar("__PRSERV_CONN", True)
+    if conn is None:
+        conn = prserv_make_conn(d)
+        if conn is None:
+            bb.error("Making connection failed to remote PR service")
+            return None
+
+    #dump db
+    opt_version = d.getVar('PRSERV_DUMPOPT_VERSION', True)
+    opt_pkgarch = d.getVar('PRSERV_DUMPOPT_PKGARCH', True)
+    opt_checksum = d.getVar('PRSERV_DUMPOPT_CHECKSUM', True)
+    opt_col = ("1" == d.getVar('PRSERV_DUMPOPT_COL', True))
+    return conn.export(opt_version, opt_pkgarch, opt_checksum, opt_col)
+
+def prserv_import_db(d, filter_version=None, filter_pkgarch=None, filter_checksum=None):
+    if not d.getVar('PRSERV_HOST', True):
+        bb.error("Not using network based PR service")
+        return None
+
+    conn = d.getVar("__PRSERV_CONN", True)
+    if conn is None:
+        conn = prserv_make_conn(d)
+        if conn is None:
+            bb.error("Making connection failed to remote PR service")
+            return None
+    #get the entry values
+    imported = []
+    prefix = "PRAUTO$"
+    for v in d.keys():
+        if v.startswith(prefix):
+            (remain, sep, checksum) = v.rpartition('$')
+            (remain, sep, pkgarch) = remain.rpartition('$')
+            (remain, sep, version) = remain.rpartition('$')
+            if (remain + '$' != prefix) or \
+               (filter_version and filter_version != version) or \
+               (filter_pkgarch and filter_pkgarch != pkgarch) or \
+               (filter_checksum and filter_checksum != checksum):
+               continue
+            try:
+                value = int(d.getVar(remain + '$' + version + '$' + pkgarch + '$' + checksum, True))
+            except BaseException as exc:
+                bb.debug("Not valid value of %s:%s" % (v,str(exc)))
+                continue
+            ret = conn.importone(version,pkgarch,checksum,value)
+            if ret != value:
+                bb.error("importing(%s,%s,%s,%d) failed. DB may have larger value %d" % (version,pkgarch,checksum,value,ret))
+            else:
+               imported.append((version,pkgarch,checksum,value))
+    return imported
+
+def prserv_export_tofile(d, metainfo, datainfo, lockdown, nomax=False):
+    import bb.utils
+    #initilize the output file
+    bb.utils.mkdirhier(d.getVar('PRSERV_DUMPDIR', True))
+    df = d.getVar('PRSERV_DUMPFILE', True)
+    #write data
+    lf = bb.utils.lockfile("%s.lock" % df)
+    f = open(df, "a")
+    if metainfo:
+        #dump column info 
+        f.write("#PR_core_ver = \"%s\"\n\n" % metainfo['core_ver']);
+        f.write("#Table: %s\n" % metainfo['tbl_name'])
+        f.write("#Columns:\n")
+        f.write("#name      \t type    \t notn    \t dflt    \t pk\n")
+        f.write("#----------\t --------\t --------\t --------\t ----\n")
+        for i in range(len(metainfo['col_info'])):
+            f.write("#%10s\t %8s\t %8s\t %8s\t %4s\n" % 
+                    (metainfo['col_info'][i]['name'], 
+                     metainfo['col_info'][i]['type'], 
+                     metainfo['col_info'][i]['notnull'], 
+                     metainfo['col_info'][i]['dflt_value'], 
+                     metainfo['col_info'][i]['pk']))
+        f.write("\n")
+
+    if lockdown:
+        f.write("PRSERV_LOCKDOWN = \"1\"\n\n")
+
+    if datainfo:
+        idx = {}
+        for i in range(len(datainfo)):
+            pkgarch = datainfo[i]['pkgarch']
+            value = datainfo[i]['value']
+            if pkgarch not in idx:
+                idx[pkgarch] = i
+            elif value > datainfo[idx[pkgarch]]['value']:
+                idx[pkgarch] = i
+            f.write("PRAUTO$%s$%s$%s = \"%s\"\n" % 
+                (str(datainfo[i]['version']), pkgarch, str(datainfo[i]['checksum']), str(value)))
+        if not nomax:
+            for i in idx:
+                f.write("PRAUTO_%s_%s = \"%s\"\n" % (str(datainfo[idx[i]]['version']),str(datainfo[idx[i]]['pkgarch']),str(datainfo[idx[i]]['value'])))
+    f.close()
+    bb.utils.unlockfile(lf)
+
+def prserv_check_avail(d):
+    host_params = filter(None, (d.getVar("PRSERV_HOST", True) or '').split(':'))
+    try:
+        if len(host_params) != 2:
+            raise TypeError
+        else:
+            int(host_params[1])
+    except TypeError:
+        bb.fatal('Undefined/incorrect PRSERV_HOST value. Format: "host:port"')
+    else:
+        prserv_make_conn(d, True)
diff --git a/meta/lib/oe/qa.py b/meta/lib/oe/qa.py
new file mode 100644
index 0000000..d5cdaa0
--- /dev/null
+++ b/meta/lib/oe/qa.py
@@ -0,0 +1,111 @@
+class ELFFile:
+    EI_NIDENT = 16
+
+    EI_CLASS      = 4
+    EI_DATA       = 5
+    EI_VERSION    = 6
+    EI_OSABI      = 7
+    EI_ABIVERSION = 8
+
+    # possible values for EI_CLASS
+    ELFCLASSNONE = 0
+    ELFCLASS32   = 1
+    ELFCLASS64   = 2
+
+    # possible value for EI_VERSION
+    EV_CURRENT   = 1
+
+    # possible values for EI_DATA
+    ELFDATANONE  = 0
+    ELFDATA2LSB  = 1
+    ELFDATA2MSB  = 2
+
+    def my_assert(self, expectation, result):
+        if not expectation == result:
+            #print "'%x','%x' %s" % (ord(expectation), ord(result), self.name)
+            raise Exception("This does not work as expected")
+
+    def __init__(self, name, bits = 0):
+        self.name = name
+        self.bits = bits
+        self.objdump_output = {}
+
+    def open(self):
+        self.file = file(self.name, "r")
+        self.data = self.file.read(ELFFile.EI_NIDENT+4)
+
+        self.my_assert(len(self.data), ELFFile.EI_NIDENT+4)
+        self.my_assert(self.data[0], chr(0x7f) )
+        self.my_assert(self.data[1], 'E')
+        self.my_assert(self.data[2], 'L')
+        self.my_assert(self.data[3], 'F')
+        if self.bits == 0:
+            if self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS32):
+                self.bits = 32
+            elif self.data[ELFFile.EI_CLASS] == chr(ELFFile.ELFCLASS64):
+                self.bits = 64
+            else:
+                # Not 32-bit or 64.. lets assert
+                raise Exception("ELF but not 32 or 64 bit.")
+        elif self.bits == 32:
+            self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS32))
+        elif self.bits == 64:
+            self.my_assert(self.data[ELFFile.EI_CLASS], chr(ELFFile.ELFCLASS64))
+        else:
+            raise Exception("Must specify unknown, 32 or 64 bit size.")
+        self.my_assert(self.data[ELFFile.EI_VERSION], chr(ELFFile.EV_CURRENT) )
+
+        self.sex = self.data[ELFFile.EI_DATA]
+        if self.sex == chr(ELFFile.ELFDATANONE):
+            raise Exception("self.sex == ELFDATANONE")
+        elif self.sex == chr(ELFFile.ELFDATA2LSB):
+            self.sex = "<"
+        elif self.sex == chr(ELFFile.ELFDATA2MSB):
+            self.sex = ">"
+        else:
+            raise Exception("Unknown self.sex")
+
+    def osAbi(self):
+        return ord(self.data[ELFFile.EI_OSABI])
+
+    def abiVersion(self):
+        return ord(self.data[ELFFile.EI_ABIVERSION])
+
+    def abiSize(self):
+        return self.bits
+
+    def isLittleEndian(self):
+        return self.sex == "<"
+
+    def isBigEngian(self):
+        return self.sex == ">"
+
+    def machine(self):
+        """
+        We know the sex stored in self.sex and we
+        know the position
+        """
+        import struct
+        (a,) = struct.unpack(self.sex+"H", self.data[18:20])
+        return a
+
+    def run_objdump(self, cmd, d):
+        import bb.process
+        import sys
+
+        if cmd in self.objdump_output:
+            return self.objdump_output[cmd]
+
+        objdump = d.getVar('OBJDUMP', True)
+
+        env = os.environ.copy()
+        env["LC_ALL"] = "C"
+        env["PATH"] = d.getVar('PATH', True)
+
+        try:
+            bb.note("%s %s %s" % (objdump, cmd, self.name))
+            self.objdump_output[cmd] = bb.process.run([objdump, cmd, self.name], env=env, shell=False)[0]
+            return self.objdump_output[cmd]
+        except Exception as e:
+            bb.note("%s %s %s failed: %s" % (objdump, cmd, self.name, e))
+            return ""
diff --git a/meta/lib/oe/recipeutils.py b/meta/lib/oe/recipeutils.py
new file mode 100644
index 0000000..d4fa726
--- /dev/null
+++ b/meta/lib/oe/recipeutils.py
@@ -0,0 +1,740 @@
+# Utility functions for reading and modifying recipes
+#
+# Some code borrowed from the OE layer index
+#
+# Copyright (C) 2013-2015 Intel Corporation
+#
+
+import sys
+import os
+import os.path
+import tempfile
+import textwrap
+import difflib
+import utils
+import shutil
+import re
+import fnmatch
+from collections import OrderedDict, defaultdict
+
+
+# Help us to find places to insert values
+recipe_progression = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION', 'LICENSE', 'LIC_FILES_CHKSUM', 'PROVIDES', 'DEPENDS', 'PR', 'PV', 'SRCREV', 'SRC_URI', 'S', 'do_fetch', 'do_unpack', 'do_patch', 'EXTRA_OECONF', 'do_configure', 'EXTRA_OEMAKE', 'do_compile', 'do_install', 'do_populate_sysroot', 'INITSCRIPT', 'USERADD', 'GROUPADD', 'PACKAGES', 'FILES', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'RPROVIDES', 'RREPLACES', 'RCONFLICTS', 'ALLOW_EMPTY', 'do_package', 'do_deploy']
+# Variables that sometimes are a bit long but shouldn't be wrapped
+nowrap_vars = ['SUMMARY', 'HOMEPAGE', 'BUGTRACKER']
+list_vars = ['SRC_URI', 'LIC_FILES_CHKSUM']
+meta_vars = ['SUMMARY', 'DESCRIPTION', 'HOMEPAGE', 'BUGTRACKER', 'SECTION']
+
+
+def pn_to_recipe(cooker, pn):
+    """Convert a recipe name (PN) to the path to the recipe file"""
+    import bb.providers
+
+    if pn in cooker.recipecache.pkg_pn:
+        filenames = cooker.recipecache.pkg_pn[pn]
+        best = bb.providers.findBestProvider(pn, cooker.data, cooker.recipecache, cooker.recipecache.pkg_pn)
+        return best[3]
+    else:
+        return None
+
+
+def get_unavailable_reasons(cooker, pn):
+    """If a recipe could not be found, find out why if possible"""
+    import bb.taskdata
+    taskdata = bb.taskdata.TaskData(None, skiplist=cooker.skiplist)
+    return taskdata.get_reasons(pn)
+
+
+def parse_recipe(fn, appendfiles, d):
+    """
+    Parse an individual recipe file, optionally with a list of
+    bbappend files.
+    """
+    import bb.cache
+    envdata = bb.cache.Cache.loadDataFull(fn, appendfiles, d)
+    return envdata
+
+
+def parse_recipe_simple(cooker, pn, d, appends=True):
+    """
+    Parse a recipe and optionally all bbappends that apply to it
+    in the current configuration.
+    """
+    import bb.providers
+
+    recipefile = pn_to_recipe(cooker, pn)
+    if not recipefile:
+        skipreasons = get_unavailable_reasons(cooker, pn)
+        # We may as well re-use bb.providers.NoProvider here
+        if skipreasons:
+            raise bb.providers.NoProvider(skipreasons)
+        else:
+            raise bb.providers.NoProvider('Unable to find any recipe file matching %s' % pn)
+    if appends:
+        appendfiles = cooker.collection.get_file_appends(recipefile)
+    return parse_recipe(recipefile, appendfiles, d)
+
+
+def get_var_files(fn, varlist, d):
+    """Find the file in which each of a list of variables is set.
+    Note: requires variable history to be enabled when parsing.
+    """
+    varfiles = {}
+    for v in varlist:
+        history = d.varhistory.variable(v)
+        files = []
+        for event in history:
+            if 'file' in event and not 'flag' in event:
+                files.append(event['file'])
+        if files:
+            actualfile = files[-1]
+        else:
+            actualfile = None
+        varfiles[v] = actualfile
+
+    return varfiles
+
+
+def patch_recipe_file(fn, values, patch=False, relpath=''):
+    """Update or insert variable values into a recipe file (assuming you
+       have already identified the exact file you want to update.)
+       Note that some manual inspection/intervention may be required
+       since this cannot handle all situations.
+    """
+    remainingnames = {}
+    for k in values.keys():
+        remainingnames[k] = recipe_progression.index(k) if k in recipe_progression else -1
+    remainingnames = OrderedDict(sorted(remainingnames.iteritems(), key=lambda x: x[1]))
+
+    with tempfile.NamedTemporaryFile('w', delete=False) as tf:
+        def outputvalue(name):
+            rawtext = '%s = "%s"\n' % (name, values[name])
+            if name in nowrap_vars:
+                tf.write(rawtext)
+            elif name in list_vars:
+                splitvalue = values[name].split()
+                if len(splitvalue) > 1:
+                    linesplit = ' \\\n' + (' ' * (len(name) + 4))
+                    tf.write('%s = "%s%s"\n' % (name, linesplit.join(splitvalue), linesplit))
+                else:
+                    tf.write(rawtext)
+            else:
+                wrapped = textwrap.wrap(rawtext)
+                for wrapline in wrapped[:-1]:
+                    tf.write('%s \\\n' % wrapline)
+                tf.write('%s\n' % wrapped[-1])
+
+        tfn = tf.name
+        with open(fn, 'r') as f:
+            # First runthrough - find existing names (so we know not to insert based on recipe_progression)
+            # Second runthrough - make the changes
+            existingnames = []
+            for runthrough in [1, 2]:
+                currname = None
+                for line in f:
+                    if not currname:
+                        insert = False
+                        for k in remainingnames.keys():
+                            for p in recipe_progression:
+                                if re.match('^%s(_prepend|_append)*[ ?:=(]' % p, line):
+                                    if remainingnames[k] > -1 and recipe_progression.index(p) > remainingnames[k] and runthrough > 1 and not k in existingnames:
+                                        outputvalue(k)
+                                        del remainingnames[k]
+                                    break
+                        for k in remainingnames.keys():
+                            if re.match('^%s[ ?:=]' % k, line):
+                                currname = k
+                                if runthrough == 1:
+                                    existingnames.append(k)
+                                else:
+                                    del remainingnames[k]
+                                break
+                        if currname and runthrough > 1:
+                            outputvalue(currname)
+
+                    if currname:
+                        sline = line.rstrip()
+                        if not sline.endswith('\\'):
+                            currname = None
+                        continue
+                    if runthrough > 1:
+                        tf.write(line)
+                f.seek(0)
+        if remainingnames:
+            tf.write('\n')
+            for k in remainingnames.keys():
+                outputvalue(k)
+
+    with open(tfn, 'U') as f:
+        tolines = f.readlines()
+    if patch:
+        with open(fn, 'U') as f:
+            fromlines = f.readlines()
+        relfn = os.path.relpath(fn, relpath)
+        diff = difflib.unified_diff(fromlines, tolines, 'a/%s' % relfn, 'b/%s' % relfn)
+        os.remove(tfn)
+        return diff
+    else:
+        with open(fn, 'w') as f:
+            f.writelines(tolines)
+        os.remove(tfn)
+        return None
+
+def localise_file_vars(fn, varfiles, varlist):
+    """Given a list of variables and variable history (fetched with get_var_files())
+    find where each variable should be set/changed. This handles for example where a
+    recipe includes an inc file where variables might be changed - in most cases
+    we want to update the inc file when changing the variable value rather than adding
+    it to the recipe itself.
+    """
+    fndir = os.path.dirname(fn) + os.sep
+
+    first_meta_file = None
+    for v in meta_vars:
+        f = varfiles.get(v, None)
+        if f:
+            actualdir = os.path.dirname(f) + os.sep
+            if actualdir.startswith(fndir):
+                first_meta_file = f
+                break
+
+    filevars = defaultdict(list)
+    for v in varlist:
+        f = varfiles[v]
+        # Only return files that are in the same directory as the recipe or in some directory below there
+        # (this excludes bbclass files and common inc files that wouldn't be appropriate to set the variable
+        # in if we were going to set a value specific to this recipe)
+        if f:
+            actualfile = f
+        else:
+            # Variable isn't in a file, if it's one of the "meta" vars, use the first file with a meta var in it
+            if first_meta_file:
+                actualfile = first_meta_file
+            else:
+                actualfile = fn
+
+        actualdir = os.path.dirname(actualfile) + os.sep
+        if not actualdir.startswith(fndir):
+            actualfile = fn
+        filevars[actualfile].append(v)
+
+    return filevars
+
+def patch_recipe(d, fn, varvalues, patch=False, relpath=''):
+    """Modify a list of variable values in the specified recipe. Handles inc files if
+    used by the recipe.
+    """
+    varlist = varvalues.keys()
+    varfiles = get_var_files(fn, varlist, d)
+    locs = localise_file_vars(fn, varfiles, varlist)
+    patches = []
+    for f,v in locs.iteritems():
+        vals = {k: varvalues[k] for k in v}
+        patchdata = patch_recipe_file(f, vals, patch, relpath)
+        if patch:
+            patches.append(patchdata)
+
+    if patch:
+        return patches
+    else:
+        return None
+
+
+
+def copy_recipe_files(d, tgt_dir, whole_dir=False, download=True):
+    """Copy (local) recipe files, including both files included via include/require,
+    and files referred to in the SRC_URI variable."""
+    import bb.fetch2
+    import oe.path
+
+    # FIXME need a warning if the unexpanded SRC_URI value contains variable references
+
+    uris = (d.getVar('SRC_URI', True) or "").split()
+    fetch = bb.fetch2.Fetch(uris, d)
+    if download:
+        fetch.download()
+
+    # Copy local files to target directory and gather any remote files
+    bb_dir = os.path.dirname(d.getVar('FILE', True)) + os.sep
+    remotes = []
+    includes = [path for path in d.getVar('BBINCLUDED', True).split() if
+                path.startswith(bb_dir) and os.path.exists(path)]
+    for path in fetch.localpaths() + includes:
+        # Only import files that are under the meta directory
+        if path.startswith(bb_dir):
+            if not whole_dir:
+                relpath = os.path.relpath(path, bb_dir)
+                subdir = os.path.join(tgt_dir, os.path.dirname(relpath))
+                if not os.path.exists(subdir):
+                    os.makedirs(subdir)
+                shutil.copy2(path, os.path.join(tgt_dir, relpath))
+        else:
+            remotes.append(path)
+    # Simply copy whole meta dir, if requested
+    if whole_dir:
+        shutil.copytree(bb_dir, tgt_dir)
+
+    return remotes
+
+
+def get_recipe_patches(d):
+    """Get a list of the patches included in SRC_URI within a recipe."""
+    patchfiles = []
+    # Execute src_patches() defined in patch.bbclass - this works since that class
+    # is inherited globally
+    patches = bb.utils.exec_flat_python_func('src_patches', d)
+    for patch in patches:
+        _, _, local, _, _, parm = bb.fetch.decodeurl(patch)
+        patchfiles.append(local)
+    return patchfiles
+
+
+def get_recipe_patched_files(d):
+    """
+    Get the list of patches for a recipe along with the files each patch modifies.
+    Params:
+        d: the datastore for the recipe
+    Returns:
+        a dict mapping patch file path to a list of tuples of changed files and
+        change mode ('A' for add, 'D' for delete or 'M' for modify)
+    """
+    import oe.patch
+    # Execute src_patches() defined in patch.bbclass - this works since that class
+    # is inherited globally
+    patches = bb.utils.exec_flat_python_func('src_patches', d)
+    patchedfiles = {}
+    for patch in patches:
+        _, _, patchfile, _, _, parm = bb.fetch.decodeurl(patch)
+        striplevel = int(parm['striplevel'])
+        patchedfiles[patchfile] = oe.patch.PatchSet.getPatchedFiles(patchfile, striplevel, os.path.join(d.getVar('S', True), parm.get('patchdir', '')))
+    return patchedfiles
+
+
+def validate_pn(pn):
+    """Perform validation on a recipe name (PN) for a new recipe."""
+    reserved_names = ['forcevariable', 'append', 'prepend', 'remove']
+    if not re.match('[0-9a-z-.]+', pn):
+        return 'Recipe name "%s" is invalid: only characters 0-9, a-z, - and . are allowed' % pn
+    elif pn in reserved_names:
+        return 'Recipe name "%s" is invalid: is a reserved keyword' % pn
+    elif pn.startswith('pn-'):
+        return 'Recipe name "%s" is invalid: names starting with "pn-" are reserved' % pn
+    return ''
+
+
+def get_bbappend_path(d, destlayerdir, wildcardver=False):
+    """Determine how a bbappend for a recipe should be named and located within another layer"""
+
+    import bb.cookerdata
+
+    destlayerdir = os.path.abspath(destlayerdir)
+    recipefile = d.getVar('FILE', True)
+    recipefn = os.path.splitext(os.path.basename(recipefile))[0]
+    if wildcardver and '_' in recipefn:
+        recipefn = recipefn.split('_', 1)[0] + '_%'
+    appendfn = recipefn + '.bbappend'
+
+    # Parse the specified layer's layer.conf file directly, in case the layer isn't in bblayers.conf
+    confdata = d.createCopy()
+    confdata.setVar('BBFILES', '')
+    confdata.setVar('LAYERDIR', destlayerdir)
+    destlayerconf = os.path.join(destlayerdir, "conf", "layer.conf")
+    confdata = bb.cookerdata.parse_config_file(destlayerconf, confdata)
+
+    origlayerdir = find_layerdir(recipefile)
+    if not origlayerdir:
+        return (None, False)
+    # Now join this to the path where the bbappend is going and check if it is covered by BBFILES
+    appendpath = os.path.join(destlayerdir, os.path.relpath(os.path.dirname(recipefile), origlayerdir), appendfn)
+    closepath = ''
+    pathok = True
+    for bbfilespec in confdata.getVar('BBFILES', True).split():
+        if fnmatch.fnmatchcase(appendpath, bbfilespec):
+            # Our append path works, we're done
+            break
+        elif bbfilespec.startswith(destlayerdir) and fnmatch.fnmatchcase('test.bbappend', os.path.basename(bbfilespec)):
+            # Try to find the longest matching path
+            if len(bbfilespec) > len(closepath):
+                closepath = bbfilespec
+    else:
+        # Unfortunately the bbappend layer and the original recipe's layer don't have the same structure
+        if closepath:
+            # bbappend layer's layer.conf at least has a spec that picks up .bbappend files
+            # Now we just need to substitute out any wildcards
+            appendsubdir = os.path.relpath(os.path.dirname(closepath), destlayerdir)
+            if 'recipes-*' in appendsubdir:
+                # Try to copy this part from the original recipe path
+                res = re.search('/recipes-[^/]+/', recipefile)
+                if res:
+                    appendsubdir = appendsubdir.replace('/recipes-*/', res.group(0))
+            # This is crude, but we have to do something
+            appendsubdir = appendsubdir.replace('*', recipefn.split('_')[0])
+            appendsubdir = appendsubdir.replace('?', 'a')
+            appendpath = os.path.join(destlayerdir, appendsubdir, appendfn)
+        else:
+            pathok = False
+    return (appendpath, pathok)
+
+
+def bbappend_recipe(rd, destlayerdir, srcfiles, install=None, wildcardver=False, machine=None, extralines=None, removevalues=None):
+    """
+    Writes a bbappend file for a recipe
+    Parameters:
+        rd: data dictionary for the recipe
+        destlayerdir: base directory of the layer to place the bbappend in
+            (subdirectory path from there will be determined automatically)
+        srcfiles: dict of source files to add to SRC_URI, where the value
+            is the full path to the file to be added, and the value is the
+            original filename as it would appear in SRC_URI or None if it
+            isn't already present. You may pass None for this parameter if
+            you simply want to specify your own content via the extralines
+            parameter.
+        install: dict mapping entries in srcfiles to a tuple of two elements:
+            install path (*without* ${D} prefix) and permission value (as a
+            string, e.g. '0644').
+        wildcardver: True to use a % wildcard in the bbappend filename, or
+            False to make the bbappend specific to the recipe version.
+        machine:
+            If specified, make the changes in the bbappend specific to this
+            machine. This will also cause PACKAGE_ARCH = "${MACHINE_ARCH}"
+            to be added to the bbappend.
+        extralines:
+            Extra lines to add to the bbappend. This may be a dict of name
+            value pairs, or simply a list of the lines.
+        removevalues:
+            Variable values to remove - a dict of names/values.
+    """
+
+    if not removevalues:
+        removevalues = {}
+
+    # Determine how the bbappend should be named
+    appendpath, pathok = get_bbappend_path(rd, destlayerdir, wildcardver)
+    if not appendpath:
+        bb.error('Unable to determine layer directory containing %s' % recipefile)
+        return (None, None)
+    if not pathok:
+        bb.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.' % (os.path.join(destlayerdir, 'conf', 'layer.conf'), os.path.dirname(appendpath)))
+
+    appenddir = os.path.dirname(appendpath)
+    bb.utils.mkdirhier(appenddir)
+
+    # FIXME check if the bbappend doesn't get overridden by a higher priority layer?
+
+    layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS', True).split()]
+    if not os.path.abspath(destlayerdir) in layerdirs:
+        bb.warn('Specified layer is not currently enabled in bblayers.conf, you will need to add it before this bbappend will be active')
+
+    bbappendlines = []
+    if extralines:
+        if isinstance(extralines, dict):
+            for name, value in extralines.iteritems():
+                bbappendlines.append((name, '=', value))
+        else:
+            # Do our best to split it
+            for line in extralines:
+                if line[-1] == '\n':
+                    line = line[:-1]
+                splitline = line.split(None, 2)
+                if len(splitline) == 3:
+                    bbappendlines.append(tuple(splitline))
+                else:
+                    raise Exception('Invalid extralines value passed')
+
+    def popline(varname):
+        for i in xrange(0, len(bbappendlines)):
+            if bbappendlines[i][0] == varname:
+                line = bbappendlines.pop(i)
+                return line
+        return None
+
+    def appendline(varname, op, value):
+        for i in xrange(0, len(bbappendlines)):
+            item = bbappendlines[i]
+            if item[0] == varname:
+                bbappendlines[i] = (item[0], item[1], item[2] + ' ' + value)
+                break
+        else:
+            bbappendlines.append((varname, op, value))
+
+    destsubdir = rd.getVar('PN', True)
+    if srcfiles:
+        bbappendlines.append(('FILESEXTRAPATHS_prepend', ':=', '${THISDIR}/${PN}:'))
+
+    appendoverride = ''
+    if machine:
+        bbappendlines.append(('PACKAGE_ARCH', '=', '${MACHINE_ARCH}'))
+        appendoverride = '_%s' % machine
+    copyfiles = {}
+    if srcfiles:
+        instfunclines = []
+        for newfile, origsrcfile in srcfiles.iteritems():
+            srcfile = origsrcfile
+            srcurientry = None
+            if not srcfile:
+                srcfile = os.path.basename(newfile)
+                srcurientry = 'file://%s' % srcfile
+                # Double-check it's not there already
+                # FIXME do we care if the entry is added by another bbappend that might go away?
+                if not srcurientry in rd.getVar('SRC_URI', True).split():
+                    if machine:
+                        appendline('SRC_URI_append%s' % appendoverride, '=', ' ' + srcurientry)
+                    else:
+                        appendline('SRC_URI', '+=', srcurientry)
+            copyfiles[newfile] = srcfile
+            if install:
+                institem = install.pop(newfile, None)
+                if institem:
+                    (destpath, perms) = institem
+                    instdestpath = replace_dir_vars(destpath, rd)
+                    instdirline = 'install -d ${D}%s' % os.path.dirname(instdestpath)
+                    if not instdirline in instfunclines:
+                        instfunclines.append(instdirline)
+                    instfunclines.append('install -m %s ${WORKDIR}/%s ${D}%s' % (perms, os.path.basename(srcfile), instdestpath))
+        if instfunclines:
+            bbappendlines.append(('do_install_append%s()' % appendoverride, '', instfunclines))
+
+    bb.note('Writing append file %s' % appendpath)
+
+    if os.path.exists(appendpath):
+        # Work around lack of nonlocal in python 2
+        extvars = {'destsubdir': destsubdir}
+
+        def appendfile_varfunc(varname, origvalue, op, newlines):
+            if varname == 'FILESEXTRAPATHS_prepend':
+                if origvalue.startswith('${THISDIR}/'):
+                    popline('FILESEXTRAPATHS_prepend')
+                    extvars['destsubdir'] = rd.expand(origvalue.split('${THISDIR}/', 1)[1].rstrip(':'))
+            elif varname == 'PACKAGE_ARCH':
+                if machine:
+                    popline('PACKAGE_ARCH')
+                    return (machine, None, 4, False)
+            elif varname.startswith('do_install_append'):
+                func = popline(varname)
+                if func:
+                    instfunclines = [line.strip() for line in origvalue.strip('\n').splitlines()]
+                    for line in func[2]:
+                        if not line in instfunclines:
+                            instfunclines.append(line)
+                    return (instfunclines, None, 4, False)
+            else:
+                splitval = origvalue.split()
+                changed = False
+                removevar = varname
+                if varname in ['SRC_URI', 'SRC_URI_append%s' % appendoverride]:
+                    removevar = 'SRC_URI'
+                    line = popline(varname)
+                    if line:
+                        if line[2] not in splitval:
+                            splitval.append(line[2])
+                            changed = True
+                else:
+                    line = popline(varname)
+                    if line:
+                        splitval = [line[2]]
+                        changed = True
+
+                if removevar in removevalues:
+                    remove = removevalues[removevar]
+                    if isinstance(remove, basestring):
+                        if remove in splitval:
+                            splitval.remove(remove)
+                            changed = True
+                    else:
+                        for removeitem in remove:
+                            if removeitem in splitval:
+                                splitval.remove(removeitem)
+                                changed = True
+
+                if changed:
+                    newvalue = splitval
+                    if len(newvalue) == 1:
+                        # Ensure it's written out as one line
+                        if '_append' in varname:
+                            newvalue = ' ' + newvalue[0]
+                        else:
+                            newvalue = newvalue[0]
+                    if not newvalue and (op in ['+=', '.='] or '_append' in varname):
+                        # There's no point appending nothing
+                        newvalue = None
+                    if varname.endswith('()'):
+                        indent = 4
+                    else:
+                        indent = -1
+                    return (newvalue, None, indent, True)
+            return (origvalue, None, 4, False)
+
+        varnames = [item[0] for item in bbappendlines]
+        if removevalues:
+            varnames.extend(removevalues.keys())
+
+        with open(appendpath, 'r') as f:
+            (updated, newlines) = bb.utils.edit_metadata(f, varnames, appendfile_varfunc)
+
+        destsubdir = extvars['destsubdir']
+    else:
+        updated = False
+        newlines = []
+
+    if bbappendlines:
+        for line in bbappendlines:
+            if line[0].endswith('()'):
+                newlines.append('%s {\n    %s\n}\n' % (line[0], '\n    '.join(line[2])))
+            else:
+                newlines.append('%s %s "%s"\n\n' % line)
+        updated = True
+
+    if updated:
+        with open(appendpath, 'w') as f:
+            f.writelines(newlines)
+
+    if copyfiles:
+        if machine:
+            destsubdir = os.path.join(destsubdir, machine)
+        for newfile, srcfile in copyfiles.iteritems():
+            filedest = os.path.join(appenddir, destsubdir, os.path.basename(srcfile))
+            if os.path.abspath(newfile) != os.path.abspath(filedest):
+                bb.note('Copying %s to %s' % (newfile, filedest))
+                bb.utils.mkdirhier(os.path.dirname(filedest))
+                shutil.copyfile(newfile, filedest)
+
+    return (appendpath, os.path.join(appenddir, destsubdir))
+
+
+def find_layerdir(fn):
+    """ Figure out relative path to base of layer for a file (e.g. a recipe)"""
+    pth = os.path.dirname(fn)
+    layerdir = ''
+    while pth:
+        if os.path.exists(os.path.join(pth, 'conf', 'layer.conf')):
+            layerdir = pth
+            break
+        pth = os.path.dirname(pth)
+    return layerdir
+
+
+def replace_dir_vars(path, d):
+    """Replace common directory paths with appropriate variable references (e.g. /etc becomes ${sysconfdir})"""
+    dirvars = {}
+    # Sort by length so we get the variables we're interested in first
+    for var in sorted(d.keys(), key=len):
+        if var.endswith('dir') and var.lower() == var:
+            value = d.getVar(var, True)
+            if value.startswith('/') and not '\n' in value and value not in dirvars:
+                dirvars[value] = var
+    for dirpath in sorted(dirvars.keys(), reverse=True):
+        path = path.replace(dirpath, '${%s}' % dirvars[dirpath])
+    return path
+
+def get_recipe_pv_without_srcpv(pv, uri_type):
+    """
+    Get PV without SRCPV common in SCM's for now only
+    support git.
+
+    Returns tuple with pv, prefix and suffix.
+    """
+    pfx = ''
+    sfx = ''
+
+    if uri_type == 'git':
+        git_regex = re.compile("(?P<pfx>v?)(?P<ver>[^\+]*)((?P<sfx>\+(git)?r?(AUTOINC\+))(?P<rev>.*))?")
+        m = git_regex.match(pv)
+
+        if m:
+            pv = m.group('ver')
+            pfx = m.group('pfx')
+            sfx = m.group('sfx')
+    else:
+        regex = re.compile("(?P<pfx>(v|r)?)(?P<ver>.*)")
+        m = regex.match(pv)
+        if m:
+            pv = m.group('ver')
+            pfx = m.group('pfx')
+
+    return (pv, pfx, sfx)
+
+def get_recipe_upstream_version(rd):
+    """
+        Get upstream version of recipe using bb.fetch2 methods with support for
+        http, https, ftp and git.
+
+        bb.fetch2 exceptions can be raised,
+            FetchError when don't have network access or upstream site don't response.
+            NoMethodError when uri latest_versionstring method isn't implemented.
+
+        Returns a dictonary with version, type and datetime.
+        Type can be A for Automatic, M for Manual and U for Unknown.
+    """
+    from bb.fetch2 import decodeurl
+    from datetime import datetime
+
+    ru = {}
+    ru['version'] = ''
+    ru['type'] = 'U'
+    ru['datetime'] = ''
+
+    # XXX: If don't have SRC_URI means that don't have upstream sources so
+    # returns 1.0.
+    src_uris = rd.getVar('SRC_URI', True)
+    if not src_uris:
+        ru['version'] = '1.0'
+        ru['type'] = 'M'
+        ru['datetime'] = datetime.now()
+        return ru
+
+    # XXX: we suppose that the first entry points to the upstream sources
+    src_uri = src_uris.split()[0]
+    uri_type, _, _, _, _, _ =  decodeurl(src_uri)
+
+    pv = rd.getVar('PV', True)
+
+    manual_upstream_version = rd.getVar("RECIPE_UPSTREAM_VERSION", True)
+    if manual_upstream_version:
+        # manual tracking of upstream version.
+        ru['version'] = manual_upstream_version
+        ru['type'] = 'M'
+
+        manual_upstream_date = rd.getVar("CHECK_DATE", True)
+        if manual_upstream_date:
+            date = datetime.strptime(manual_upstream_date, "%b %d, %Y")
+        else:
+            date = datetime.now()
+        ru['datetime'] = date
+
+    elif uri_type == "file":
+        # files are always up-to-date
+        ru['version'] =  pv
+        ru['type'] = 'A'
+        ru['datetime'] = datetime.now()
+    else:
+        ud = bb.fetch2.FetchData(src_uri, rd)
+        pupver = ud.method.latest_versionstring(ud, rd)
+        (upversion, revision) = pupver
+
+        # format git version version+gitAUTOINC+HASH
+        if uri_type == 'git':
+            (pv, pfx, sfx) = get_recipe_pv_without_srcpv(pv, uri_type)
+
+            # if contains revision but not upversion use current pv
+            if upversion == '' and revision:
+                upversion = pv
+
+            if upversion:
+                tmp = upversion
+                upversion = ''
+
+                if pfx:
+                    upversion = pfx + tmp
+                else:
+                    upversion = tmp
+
+                if sfx:
+                    upversion = upversion + sfx + revision[:10]
+
+        if upversion:
+            ru['version'] = upversion
+            ru['type'] = 'A'
+
+        ru['datetime'] = datetime.now()
+
+    return ru
diff --git a/meta/lib/oe/rootfs.py b/meta/lib/oe/rootfs.py
new file mode 100644
index 0000000..3b53fce
--- /dev/null
+++ b/meta/lib/oe/rootfs.py
@@ -0,0 +1,984 @@
+from abc import ABCMeta, abstractmethod
+from oe.utils import execute_pre_post_process
+from oe.package_manager import *
+from oe.manifest import *
+import oe.path
+import filecmp
+import shutil
+import os
+import subprocess
+import re
+
+
+class Rootfs(object):
+    """
+    This is an abstract class. Do not instantiate this directly.
+    """
+    __metaclass__ = ABCMeta
+
+    def __init__(self, d):
+        self.d = d
+        self.pm = None
+        self.image_rootfs = self.d.getVar('IMAGE_ROOTFS', True)
+        self.deploy_dir_image = self.d.getVar('DEPLOY_DIR_IMAGE', True)
+
+        self.install_order = Manifest.INSTALL_ORDER
+
+    @abstractmethod
+    def _create(self):
+        pass
+
+    @abstractmethod
+    def _get_delayed_postinsts(self):
+        pass
+
+    @abstractmethod
+    def _save_postinsts(self):
+        pass
+
+    @abstractmethod
+    def _log_check(self):
+        pass
+
+    def _log_check_warn(self):
+        r = re.compile('^(warn|Warn|NOTE: warn|NOTE: Warn|WARNING:)')
+        log_path = self.d.expand("${T}/log.do_rootfs")
+        with open(log_path, 'r') as log:
+            for line in log:
+                if 'log_check' in line or 'NOTE:' in line:
+                    continue
+
+                m = r.search(line)
+                if m:
+                    bb.warn('[log_check] %s: found a warning message in the logfile (keyword \'%s\'):\n[log_check] %s'
+				    % (self.d.getVar('PN', True), m.group(), line))
+
+    def _log_check_error(self):
+        r = re.compile(self.log_check_regex)
+        log_path = self.d.expand("${T}/log.do_rootfs")
+        with open(log_path, 'r') as log:
+            found_error = 0
+            message = "\n"
+            for line in log:
+                if 'log_check' in line:
+                    continue
+
+                m = r.search(line)
+                if m:
+                    found_error = 1
+                    bb.warn('[log_check] %s: found an error message in the logfile (keyword \'%s\'):\n[log_check] %s'
+				    % (self.d.getVar('PN', True), m.group(), line))
+
+                if found_error >= 1 and found_error <= 5:
+                    message += line + '\n'
+                    found_error += 1
+
+                if found_error == 6:
+                    bb.fatal(message)
+
+    def _insert_feed_uris(self):
+        if bb.utils.contains("IMAGE_FEATURES", "package-management",
+                         True, False, self.d):
+            self.pm.insert_feeds_uris()
+
+    @abstractmethod
+    def _handle_intercept_failure(self, failed_script):
+        pass
+
+    """
+    The _cleanup() method should be used to clean-up stuff that we don't really
+    want to end up on target. For example, in the case of RPM, the DB locks.
+    The method is called, once, at the end of create() method.
+    """
+    @abstractmethod
+    def _cleanup(self):
+        pass
+
+    def _setup_dbg_rootfs(self, dirs):
+        gen_debugfs = self.d.getVar('IMAGE_GEN_DEBUGFS', True) or '0'
+        if gen_debugfs != '1':
+           return
+
+        bb.note("  Renaming the original rootfs...")
+        try:
+            shutil.rmtree(self.image_rootfs + '-orig')
+        except:
+            pass
+        os.rename(self.image_rootfs, self.image_rootfs + '-orig')
+
+        bb.note("  Creating debug rootfs...")
+        bb.utils.mkdirhier(self.image_rootfs)
+
+        bb.note("  Copying back package database...")
+        for dir in dirs:
+            bb.utils.mkdirhier(self.image_rootfs + os.path.dirname(dir))
+            shutil.copytree(self.image_rootfs + '-orig' + dir, self.image_rootfs + dir)
+
+        cpath = oe.cachedpath.CachedPath()
+        # Copy files located in /usr/lib/debug or /usr/src/debug
+        for dir in ["/usr/lib/debug", "/usr/src/debug"]:
+            src = self.image_rootfs + '-orig' + dir
+            if cpath.exists(src):
+                dst = self.image_rootfs + dir
+                bb.utils.mkdirhier(os.path.dirname(dst))
+                shutil.copytree(src, dst)
+
+        # Copy files with suffix '.debug' or located in '.debug' dir.
+        for root, dirs, files in cpath.walk(self.image_rootfs + '-orig'):
+            relative_dir = root[len(self.image_rootfs + '-orig'):]
+            for f in files:
+                if f.endswith('.debug') or '/.debug' in relative_dir:
+                    bb.utils.mkdirhier(self.image_rootfs + relative_dir)
+                    shutil.copy(os.path.join(root, f),
+                                self.image_rootfs + relative_dir)
+
+        bb.note("  Install complementary '*-dbg' packages...")
+        self.pm.install_complementary('*-dbg')
+
+        bb.note("  Rename debug rootfs...")
+        try:
+            shutil.rmtree(self.image_rootfs + '-dbg')
+        except:
+            pass
+        os.rename(self.image_rootfs, self.image_rootfs + '-dbg')
+
+        bb.note("  Restoreing original rootfs...")
+        os.rename(self.image_rootfs + '-orig', self.image_rootfs)
+
+    def _exec_shell_cmd(self, cmd):
+        fakerootcmd = self.d.getVar('FAKEROOT', True)
+        if fakerootcmd is not None:
+            exec_cmd = [fakerootcmd, cmd]
+        else:
+            exec_cmd = cmd
+
+        try:
+            subprocess.check_output(exec_cmd, stderr=subprocess.STDOUT)
+        except subprocess.CalledProcessError as e:
+            return("Command '%s' returned %d:\n%s" % (e.cmd, e.returncode, e.output))
+
+        return None
+
+    def create(self):
+        bb.note("###### Generate rootfs #######")
+        pre_process_cmds = self.d.getVar("ROOTFS_PREPROCESS_COMMAND", True)
+        post_process_cmds = self.d.getVar("ROOTFS_POSTPROCESS_COMMAND", True)
+
+        postinst_intercepts_dir = self.d.getVar("POSTINST_INTERCEPTS_DIR", True)
+        if not postinst_intercepts_dir:
+            postinst_intercepts_dir = self.d.expand("${COREBASE}/scripts/postinst-intercepts")
+        intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True),
+                                      "intercept_scripts")
+
+        bb.utils.remove(intercepts_dir, True)
+
+        bb.utils.mkdirhier(self.image_rootfs)
+
+        bb.utils.mkdirhier(self.deploy_dir_image)
+
+        shutil.copytree(postinst_intercepts_dir, intercepts_dir)
+
+        shutil.copy(self.d.expand("${COREBASE}/meta/files/deploydir_readme.txt"),
+                    self.deploy_dir_image +
+                    "/README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt")
+
+        execute_pre_post_process(self.d, pre_process_cmds)
+
+        # call the package manager dependent create method
+        self._create()
+
+        sysconfdir = self.image_rootfs + self.d.getVar('sysconfdir', True)
+        bb.utils.mkdirhier(sysconfdir)
+        with open(sysconfdir + "/version", "w+") as ver:
+            ver.write(self.d.getVar('BUILDNAME', True) + "\n")
+
+        self._run_intercepts()
+
+        execute_pre_post_process(self.d, post_process_cmds)
+
+        if bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
+                         True, False, self.d):
+            delayed_postinsts = self._get_delayed_postinsts()
+            if delayed_postinsts is not None:
+                bb.fatal("The following packages could not be configured "
+                         "offline and rootfs is read-only: %s" %
+                         delayed_postinsts)
+
+        if self.d.getVar('USE_DEVFS', True) != "1":
+            self._create_devfs()
+
+        self._uninstall_unneeded()
+
+        self._insert_feed_uris()
+
+        self._run_ldconfig()
+
+        if self.d.getVar('USE_DEPMOD', True) != "0":
+            self._generate_kernel_module_deps()
+
+        self._cleanup()
+        self._log_check()
+
+    def _uninstall_unneeded(self):
+        # Remove unneeded init script symlinks
+        delayed_postinsts = self._get_delayed_postinsts()
+        if delayed_postinsts is None:
+            if os.path.exists(self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/init.d/run-postinsts")):
+                self._exec_shell_cmd(["update-rc.d", "-f", "-r",
+                                      self.d.getVar('IMAGE_ROOTFS', True),
+                                      "run-postinsts", "remove"])
+
+        runtime_pkgmanage = bb.utils.contains("IMAGE_FEATURES", "package-management",
+                         True, False, self.d)
+        sysvcompat_in_distro = bb.utils.contains("DISTRO_FEATURES", [ "systemd", "sysvinit" ],
+                         True, False, self.d)
+        image_rorfs = bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs",
+                         True, False, self.d)
+        if sysvcompat_in_distro and not image_rorfs:
+            pkg_to_remove = ""
+        else:
+            pkg_to_remove = "update-rc.d"
+        if not runtime_pkgmanage:
+            # Remove components that we don't need if we're not going to install
+            # additional packages at runtime
+            if delayed_postinsts is None:
+                installed_pkgs_dir = self.d.expand('${WORKDIR}/installed_pkgs.txt')
+                pkgs_to_remove = list()
+                with open(installed_pkgs_dir, "r+") as installed_pkgs:
+                    pkgs_installed = installed_pkgs.read().splitlines()
+                    for pkg_installed in pkgs_installed[:]:
+                        pkg = pkg_installed.split()[0]
+                        if pkg in ["update-rc.d",
+                                "base-passwd",
+                                "shadow",
+                                "update-alternatives", pkg_to_remove,
+                                self.d.getVar("ROOTFS_BOOTSTRAP_INSTALL", True)
+                                ]:
+                            pkgs_to_remove.append(pkg)
+                            pkgs_installed.remove(pkg_installed)
+
+                if len(pkgs_to_remove) > 0:
+                    self.pm.remove(pkgs_to_remove, False)
+                    # Update installed_pkgs.txt
+                    open(installed_pkgs_dir, "w+").write('\n'.join(pkgs_installed))
+
+            else:
+                self._save_postinsts()
+
+        post_uninstall_cmds = self.d.getVar("ROOTFS_POSTUNINSTALL_COMMAND", True)
+        execute_pre_post_process(self.d, post_uninstall_cmds)
+
+        if not runtime_pkgmanage:
+            # Remove the package manager data files
+            self.pm.remove_packaging_data()
+
+    def _run_intercepts(self):
+        intercepts_dir = os.path.join(self.d.getVar('WORKDIR', True),
+                                      "intercept_scripts")
+
+        bb.note("Running intercept scripts:")
+        os.environ['D'] = self.image_rootfs
+        for script in os.listdir(intercepts_dir):
+            script_full = os.path.join(intercepts_dir, script)
+
+            if script == "postinst_intercept" or not os.access(script_full, os.X_OK):
+                continue
+
+            bb.note("> Executing %s intercept ..." % script)
+
+            try:
+                subprocess.check_call(script_full)
+            except subprocess.CalledProcessError as e:
+                bb.warn("The postinstall intercept hook '%s' failed (exit code: %d)! See log for details!" %
+                        (script, e.returncode))
+
+                with open(script_full) as intercept:
+                    registered_pkgs = None
+                    for line in intercept.read().split("\n"):
+                        m = re.match("^##PKGS:(.*)", line)
+                        if m is not None:
+                            registered_pkgs = m.group(1).strip()
+                            break
+
+                    if registered_pkgs is not None:
+                        bb.warn("The postinstalls for the following packages "
+                                "will be postponed for first boot: %s" %
+                                registered_pkgs)
+
+                        # call the backend dependent handler
+                        self._handle_intercept_failure(registered_pkgs)
+
+    def _run_ldconfig(self):
+        if self.d.getVar('LDCONFIGDEPEND', True):
+            bb.note("Executing: ldconfig -r" + self.image_rootfs + "-c new -v")
+            self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c',
+                                  'new', '-v'])
+
+    def _check_for_kernel_modules(self, modules_dir):
+        for root, dirs, files in os.walk(modules_dir, topdown=True):
+            for name in files:
+                found_ko = name.endswith(".ko")
+                if found_ko:
+                    return found_ko
+        return False
+
+    def _generate_kernel_module_deps(self):
+        modules_dir = os.path.join(self.image_rootfs, 'lib', 'modules')
+        # if we don't have any modules don't bother to do the depmod
+        if not self._check_for_kernel_modules(modules_dir):
+            bb.note("No Kernel Modules found, not running depmod")
+            return
+
+        kernel_abi_ver_file = oe.path.join(self.d.getVar('PKGDATA_DIR', True), "kernel-depmod",
+                                           'kernel-abiversion')
+        if not os.path.exists(kernel_abi_ver_file):
+            bb.fatal("No kernel-abiversion file found (%s), cannot run depmod, aborting" % kernel_abi_ver_file)
+
+        kernel_ver = open(kernel_abi_ver_file).read().strip(' \n')
+        versioned_modules_dir = os.path.join(self.image_rootfs, modules_dir, kernel_ver)
+
+        bb.utils.mkdirhier(versioned_modules_dir)
+
+        self._exec_shell_cmd(['depmodwrapper', '-a', '-b', self.image_rootfs, kernel_ver])
+
+    """
+    Create devfs:
+    * IMAGE_DEVICE_TABLE is the old name to an absolute path to a device table file
+    * IMAGE_DEVICE_TABLES is a new name for a file, or list of files, seached
+      for in the BBPATH
+    If neither are specified then the default name of files/device_table-minimal.txt
+    is searched for in the BBPATH (same as the old version.)
+    """
+    def _create_devfs(self):
+        devtable_list = []
+        devtable = self.d.getVar('IMAGE_DEVICE_TABLE', True)
+        if devtable is not None:
+            devtable_list.append(devtable)
+        else:
+            devtables = self.d.getVar('IMAGE_DEVICE_TABLES', True)
+            if devtables is None:
+                devtables = 'files/device_table-minimal.txt'
+            for devtable in devtables.split():
+                devtable_list.append("%s" % bb.utils.which(self.d.getVar('BBPATH', True), devtable))
+
+        for devtable in devtable_list:
+            self._exec_shell_cmd(["makedevs", "-r",
+                                  self.image_rootfs, "-D", devtable])
+
+
+class RpmRootfs(Rootfs):
+    def __init__(self, d, manifest_dir):
+        super(RpmRootfs, self).__init__(d)
+        self.log_check_regex = '(unpacking of archive failed|Cannot find package'\
+                               '|exit 1|ERROR: |Error: |Error |ERROR '\
+                               '|Failed |Failed: |Failed$|Failed\(\d+\):)'
+        self.manifest = RpmManifest(d, manifest_dir)
+
+        self.pm = RpmPM(d,
+                        d.getVar('IMAGE_ROOTFS', True),
+                        self.d.getVar('TARGET_VENDOR', True)
+                        )
+
+        self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN', True)
+        if self.inc_rpm_image_gen != "1":
+            bb.utils.remove(self.image_rootfs, True)
+        else:
+            self.pm.recovery_packaging_data()
+        bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
+
+        self.pm.create_configs()
+
+    '''
+    While rpm incremental image generation is enabled, it will remove the
+    unneeded pkgs by comparing the new install solution manifest and the
+    old installed manifest.
+    '''
+    def _create_incremental(self, pkgs_initial_install):
+        if self.inc_rpm_image_gen == "1":
+
+            pkgs_to_install = list()
+            for pkg_type in pkgs_initial_install:
+                pkgs_to_install += pkgs_initial_install[pkg_type]
+
+            installed_manifest = self.pm.load_old_install_solution()
+            solution_manifest = self.pm.dump_install_solution(pkgs_to_install)
+
+            pkg_to_remove = list()
+            for pkg in installed_manifest:
+                if pkg not in solution_manifest:
+                    pkg_to_remove.append(pkg)
+
+            self.pm.update()
+
+            bb.note('incremental update -- upgrade packages in place ')
+            self.pm.upgrade()
+            if pkg_to_remove != []:
+                bb.note('incremental removed: %s' % ' '.join(pkg_to_remove))
+                self.pm.remove(pkg_to_remove)
+
+    def _create(self):
+        pkgs_to_install = self.manifest.parse_initial_manifest()
+        rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS', True)
+        rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS', True)
+
+        # update PM index files
+        self.pm.write_index()
+
+        execute_pre_post_process(self.d, rpm_pre_process_cmds)
+
+        self.pm.dump_all_available_pkgs()
+
+        if self.inc_rpm_image_gen == "1":
+            self._create_incremental(pkgs_to_install)
+
+        self.pm.update()
+
+        pkgs = []
+        pkgs_attempt = []
+        for pkg_type in pkgs_to_install:
+            if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
+                pkgs_attempt += pkgs_to_install[pkg_type]
+            else:
+                pkgs += pkgs_to_install[pkg_type]
+
+        self.pm.install(pkgs)
+
+        self.pm.install(pkgs_attempt, True)
+
+        self.pm.install_complementary()
+
+        self._setup_dbg_rootfs(['/etc/rpm', '/var/lib/rpm', '/var/lib/smart'])
+
+        execute_pre_post_process(self.d, rpm_post_process_cmds)
+
+        self._log_check()
+
+        if self.inc_rpm_image_gen == "1":
+            self.pm.backup_packaging_data()
+
+        self.pm.rpm_setup_smart_target_config()
+
+    @staticmethod
+    def _depends_list():
+        return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS',
+                'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH']
+
+    def _get_delayed_postinsts(self):
+        postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts")
+        if os.path.isdir(postinst_dir):
+            files = os.listdir(postinst_dir)
+            for f in files:
+                bb.note('Delayed package scriptlet: %s' % f)
+            return files
+
+        return None
+
+    def _save_postinsts(self):
+        # this is just a stub. For RPM, the failed postinstalls are
+        # already saved in /etc/rpm-postinsts
+        pass
+
+    def _log_check_error(self):
+        r = re.compile('(unpacking of archive failed|Cannot find package|exit 1|ERR|Fail)')
+        log_path = self.d.expand("${T}/log.do_rootfs")
+        with open(log_path, 'r') as log:
+            found_error = 0
+            message = "\n"
+            for line in log.read().split('\n'):
+                if 'log_check' in line:
+                    continue
+                # sh -x may emit code which isn't actually executed
+                if line.startswith('+'):
+		    continue
+
+                m = r.search(line)
+                if m:
+                    found_error = 1
+                    bb.warn('log_check: There were error messages in the logfile')
+                    bb.warn('log_check: Matched keyword: [%s]\n\n' % m.group())
+
+                if found_error >= 1 and found_error <= 5:
+                    message += line + '\n'
+                    found_error += 1
+
+                if found_error == 6:
+                    bb.fatal(message)
+
+    def _log_check(self):
+        self._log_check_warn()
+        self._log_check_error()
+
+    def _handle_intercept_failure(self, registered_pkgs):
+        rpm_postinsts_dir = self.image_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/')
+        bb.utils.mkdirhier(rpm_postinsts_dir)
+
+        # Save the package postinstalls in /etc/rpm-postinsts
+        for pkg in registered_pkgs.split():
+            self.pm.save_rpmpostinst(pkg)
+
+    def _cleanup(self):
+        # during the execution of postprocess commands, rpm is called several
+        # times to get the files installed, dependencies, etc. This creates the
+        # __db.00* (Berkeley DB files that hold locks, rpm specific environment
+        # settings, etc.), that should not get into the final rootfs
+        self.pm.unlock_rpm_db()
+        if os.path.isdir(self.pm.install_dir_path + "/tmp") and not os.listdir(self.pm.install_dir_path + "/tmp"):
+           bb.utils.remove(self.pm.install_dir_path + "/tmp", True)
+        if os.path.isdir(self.pm.install_dir_path) and not os.listdir(self.pm.install_dir_path):
+           bb.utils.remove(self.pm.install_dir_path, True)
+
+class DpkgOpkgRootfs(Rootfs):
+    def __init__(self, d):
+        super(DpkgOpkgRootfs, self).__init__(d)
+
+    def _get_pkgs_postinsts(self, status_file):
+        def _get_pkg_depends_list(pkg_depends):
+            pkg_depends_list = []
+            # filter version requirements like libc (>= 1.1)
+            for dep in pkg_depends.split(', '):
+                m_dep = re.match("^(.*) \(.*\)$", dep)
+                if m_dep:
+                    dep = m_dep.group(1)
+                pkg_depends_list.append(dep)
+
+            return pkg_depends_list
+
+        pkgs = {}
+        pkg_name = ""
+        pkg_status_match = False
+        pkg_depends = ""
+
+        with open(status_file) as status:
+            data = status.read()
+            status.close()
+            for line in data.split('\n'):
+                m_pkg = re.match("^Package: (.*)", line)
+                m_status = re.match("^Status:.*unpacked", line)
+                m_depends = re.match("^Depends: (.*)", line)
+
+                if m_pkg is not None:
+                    if pkg_name and pkg_status_match:
+                        pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends)
+
+                    pkg_name = m_pkg.group(1)
+                    pkg_status_match = False
+                    pkg_depends = ""
+                elif m_status is not None:
+                    pkg_status_match = True
+                elif m_depends is not None:
+                    pkg_depends = m_depends.group(1)
+
+        # remove package dependencies not in postinsts
+        pkg_names = pkgs.keys()
+        for pkg_name in pkg_names:
+            deps = pkgs[pkg_name][:]
+
+            for d in deps:
+                if d not in pkg_names:
+                    pkgs[pkg_name].remove(d)
+
+        return pkgs
+
+    def _get_delayed_postinsts_common(self, status_file):
+        def _dep_resolve(graph, node, resolved, seen):
+            seen.append(node)
+
+            for edge in graph[node]:
+                if edge not in resolved:
+                    if edge in seen:
+                        raise RuntimeError("Packages %s and %s have " \
+                                "a circular dependency in postinsts scripts." \
+                                % (node, edge))
+                    _dep_resolve(graph, edge, resolved, seen)
+
+            resolved.append(node)
+
+        pkg_list = []
+
+        pkgs = self._get_pkgs_postinsts(status_file)
+        if pkgs:
+            root = "__packagegroup_postinst__"
+            pkgs[root] = pkgs.keys()
+            _dep_resolve(pkgs, root, pkg_list, [])
+            pkg_list.remove(root)
+
+        if len(pkg_list) == 0:
+            return None
+
+        return pkg_list
+
+    def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir):
+        num = 0
+        for p in self._get_delayed_postinsts():
+            bb.utils.mkdirhier(dst_postinst_dir)
+
+            if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")):
+                shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"),
+                            os.path.join(dst_postinst_dir, "%03d-%s" % (num, p)))
+
+            num += 1
+
+class DpkgRootfs(DpkgOpkgRootfs):
+    def __init__(self, d, manifest_dir):
+        super(DpkgRootfs, self).__init__(d)
+        self.log_check_regex = '^E:'
+
+        bb.utils.remove(self.image_rootfs, True)
+        bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
+        self.manifest = DpkgManifest(d, manifest_dir)
+        self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS', True),
+                         d.getVar('PACKAGE_ARCHS', True),
+                         d.getVar('DPKG_ARCH', True))
+
+
+    def _create(self):
+        pkgs_to_install = self.manifest.parse_initial_manifest()
+        deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS', True)
+        deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS', True)
+
+        alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives")
+        bb.utils.mkdirhier(alt_dir)
+
+        # update PM index files
+        self.pm.write_index()
+
+        execute_pre_post_process(self.d, deb_pre_process_cmds)
+
+        self.pm.update()
+
+        for pkg_type in self.install_order:
+            if pkg_type in pkgs_to_install:
+                self.pm.install(pkgs_to_install[pkg_type],
+                                [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+        self.pm.install_complementary()
+
+        self._setup_dbg_rootfs(['/var/lib/dpkg'])
+
+        self.pm.fix_broken_dependencies()
+
+        self.pm.mark_packages("installed")
+
+        self.pm.run_pre_post_installs()
+
+        execute_pre_post_process(self.d, deb_post_process_cmds)
+
+    @staticmethod
+    def _depends_list():
+        return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMANDS']
+
+    def _get_delayed_postinsts(self):
+        status_file = self.image_rootfs + "/var/lib/dpkg/status"
+        return self._get_delayed_postinsts_common(status_file)
+
+    def _save_postinsts(self):
+        dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts")
+        src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info")
+        return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
+
+    def _handle_intercept_failure(self, registered_pkgs):
+        self.pm.mark_packages("unpacked", registered_pkgs.split())
+
+    def _log_check(self):
+        self._log_check_warn()
+        self._log_check_error()
+
+    def _cleanup(self):
+        pass
+
+
+class OpkgRootfs(DpkgOpkgRootfs):
+    def __init__(self, d, manifest_dir):
+        super(OpkgRootfs, self).__init__(d)
+        self.log_check_regex = '(exit 1|Collected errors)'
+
+        self.manifest = OpkgManifest(d, manifest_dir)
+        self.opkg_conf = self.d.getVar("IPKGCONF_TARGET", True)
+        self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True)
+
+        self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN', True) or ""
+        if self._remove_old_rootfs():
+            bb.utils.remove(self.image_rootfs, True)
+            self.pm = OpkgPM(d,
+                             self.image_rootfs,
+                             self.opkg_conf,
+                             self.pkg_archs)
+        else:
+            self.pm = OpkgPM(d,
+                             self.image_rootfs,
+                             self.opkg_conf,
+                             self.pkg_archs)
+            self.pm.recover_packaging_data()
+
+        bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS', True), True)
+
+    def _prelink_file(self, root_dir, filename):
+        bb.note('prelink %s in %s' % (filename, root_dir))
+        prelink_cfg = oe.path.join(root_dir,
+                                   self.d.expand('${sysconfdir}/prelink.conf'))
+        if not os.path.exists(prelink_cfg):
+            shutil.copy(self.d.expand('${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf'),
+                        prelink_cfg)
+
+        cmd_prelink = self.d.expand('${STAGING_DIR_NATIVE}${sbindir_native}/prelink')
+        self._exec_shell_cmd([cmd_prelink,
+                              '--root',
+                              root_dir,
+                              '-amR',
+                              '-N',
+                              '-c',
+                              self.d.expand('${sysconfdir}/prelink.conf')])
+
+    '''
+    Compare two files with the same key twice to see if they are equal.
+    If they are not equal, it means they are duplicated and come from
+    different packages.
+    1st: Comapre them directly;
+    2nd: While incremental image creation is enabled, one of the
+         files could be probaly prelinked in the previous image
+         creation and the file has been changed, so we need to
+         prelink the other one and compare them.
+    '''
+    def _file_equal(self, key, f1, f2):
+
+        # Both of them are not prelinked
+        if filecmp.cmp(f1, f2):
+            return True
+
+        if self.image_rootfs not in f1:
+            self._prelink_file(f1.replace(key, ''), f1)
+
+        if self.image_rootfs not in f2:
+            self._prelink_file(f2.replace(key, ''), f2)
+
+        # Both of them are prelinked
+        if filecmp.cmp(f1, f2):
+            return True
+
+        # Not equal
+        return False
+
+    """
+    This function was reused from the old implementation.
+    See commit: "image.bbclass: Added variables for multilib support." by
+    Lianhao Lu.
+    """
+    def _multilib_sanity_test(self, dirs):
+
+        allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP", True)
+        if allow_replace is None:
+            allow_replace = ""
+
+        allow_rep = re.compile(re.sub("\|$", "", allow_replace))
+        error_prompt = "Multilib check error:"
+
+        files = {}
+        for dir in dirs:
+            for root, subfolders, subfiles in os.walk(dir):
+                for file in subfiles:
+                    item = os.path.join(root, file)
+                    key = str(os.path.join("/", os.path.relpath(item, dir)))
+
+                    valid = True
+                    if key in files:
+                        #check whether the file is allow to replace
+                        if allow_rep.match(key):
+                            valid = True
+                        else:
+                            if os.path.exists(files[key]) and \
+                               os.path.exists(item) and \
+                               not self._file_equal(key, files[key], item):
+                                valid = False
+                                bb.fatal("%s duplicate files %s %s is not the same\n" %
+                                         (error_prompt, item, files[key]))
+
+                    #pass the check, add to list
+                    if valid:
+                        files[key] = item
+
+    def _multilib_test_install(self, pkgs):
+        ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS", True)
+        bb.utils.mkdirhier(ml_temp)
+
+        dirs = [self.image_rootfs]
+
+        for variant in self.d.getVar("MULTILIB_VARIANTS", True).split():
+            ml_target_rootfs = os.path.join(ml_temp, variant)
+
+            bb.utils.remove(ml_target_rootfs, True)
+
+            ml_opkg_conf = os.path.join(ml_temp,
+                                        variant + "-" + os.path.basename(self.opkg_conf))
+
+            ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs)
+
+            ml_pm.update()
+            ml_pm.install(pkgs)
+
+            dirs.append(ml_target_rootfs)
+
+        self._multilib_sanity_test(dirs)
+
+    '''
+    While ipk incremental image generation is enabled, it will remove the
+    unneeded pkgs by comparing the old full manifest in previous existing
+    image and the new full manifest in the current image.
+    '''
+    def _remove_extra_packages(self, pkgs_initial_install):
+        if self.inc_opkg_image_gen == "1":
+            # Parse full manifest in previous existing image creation session
+            old_full_manifest = self.manifest.parse_full_manifest()
+
+            # Create full manifest for the current image session, the old one
+            # will be replaced by the new one.
+            self.manifest.create_full(self.pm)
+
+            # Parse full manifest in current image creation session
+            new_full_manifest = self.manifest.parse_full_manifest()
+
+            pkg_to_remove = list()
+            for pkg in old_full_manifest:
+                if pkg not in new_full_manifest:
+                    pkg_to_remove.append(pkg)
+
+            if pkg_to_remove != []:
+                bb.note('decremental removed: %s' % ' '.join(pkg_to_remove))
+                self.pm.remove(pkg_to_remove)
+
+    '''
+    Compare with previous existing image creation, if some conditions
+    triggered, the previous old image should be removed.
+    The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS
+    and BAD_RECOMMENDATIONS' has been changed.
+    '''
+    def _remove_old_rootfs(self):
+        if self.inc_opkg_image_gen != "1":
+            return True
+
+        vars_list_file = self.d.expand('${T}/vars_list')
+
+        old_vars_list = ""
+        if os.path.exists(vars_list_file):
+            old_vars_list = open(vars_list_file, 'r+').read()
+
+        new_vars_list = '%s:%s:%s\n' % \
+                ((self.d.getVar('BAD_RECOMMENDATIONS', True) or '').strip(),
+                 (self.d.getVar('NO_RECOMMENDATIONS', True) or '').strip(),
+                 (self.d.getVar('PACKAGE_EXCLUDE', True) or '').strip())
+        open(vars_list_file, 'w+').write(new_vars_list)
+
+        if old_vars_list != new_vars_list:
+            return True
+
+        return False
+
+    def _create(self):
+        pkgs_to_install = self.manifest.parse_initial_manifest()
+        opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS', True)
+        opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS', True)
+        rootfs_post_install_cmds = self.d.getVar('ROOTFS_POSTINSTALL_COMMAND', True)
+
+        # update PM index files, unless users provide their own feeds
+        if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
+            self.pm.write_index()
+
+        execute_pre_post_process(self.d, opkg_pre_process_cmds)
+
+        self.pm.update()
+
+        self.pm.handle_bad_recommendations()
+
+        if self.inc_opkg_image_gen == "1":
+            self._remove_extra_packages(pkgs_to_install)
+
+        for pkg_type in self.install_order:
+            if pkg_type in pkgs_to_install:
+                # For multilib, we perform a sanity test before final install
+                # If sanity test fails, it will automatically do a bb.fatal()
+                # and the installation will stop
+                if pkg_type == Manifest.PKG_TYPE_MULTILIB:
+                    self._multilib_test_install(pkgs_to_install[pkg_type])
+
+                self.pm.install(pkgs_to_install[pkg_type],
+                                [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY])
+
+        self.pm.install_complementary()
+
+        self._setup_dbg_rootfs(['/var/lib/opkg'])
+
+        execute_pre_post_process(self.d, opkg_post_process_cmds)
+        execute_pre_post_process(self.d, rootfs_post_install_cmds)
+
+        if self.inc_opkg_image_gen == "1":
+            self.pm.backup_packaging_data()
+
+    @staticmethod
+    def _depends_list():
+        return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR']
+
+    def _get_delayed_postinsts(self):
+        status_file = os.path.join(self.image_rootfs,
+                                   self.d.getVar('OPKGLIBDIR', True).strip('/'),
+                                   "opkg", "status")
+        return self._get_delayed_postinsts_common(status_file)
+
+    def _save_postinsts(self):
+        dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts")
+        src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info")
+        return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir)
+
+    def _handle_intercept_failure(self, registered_pkgs):
+        self.pm.mark_packages("unpacked", registered_pkgs.split())
+
+    def _log_check(self):
+        self._log_check_warn()
+        self._log_check_error()
+
+    def _cleanup(self):
+        pass
+
+def get_class_for_type(imgtype):
+    return {"rpm": RpmRootfs,
+            "ipk": OpkgRootfs,
+            "deb": DpkgRootfs}[imgtype]
+
+def variable_depends(d, manifest_dir=None):
+    img_type = d.getVar('IMAGE_PKGTYPE', True)
+    cls = get_class_for_type(img_type)
+    return cls._depends_list()
+
+def create_rootfs(d, manifest_dir=None):
+    env_bkp = os.environ.copy()
+
+    img_type = d.getVar('IMAGE_PKGTYPE', True)
+    if img_type == "rpm":
+        RpmRootfs(d, manifest_dir).create()
+    elif img_type == "ipk":
+        OpkgRootfs(d, manifest_dir).create()
+    elif img_type == "deb":
+        DpkgRootfs(d, manifest_dir).create()
+
+    os.environ.clear()
+    os.environ.update(env_bkp)
+
+
+def image_list_installed_packages(d, format=None, rootfs_dir=None):
+    if not rootfs_dir:
+        rootfs_dir = d.getVar('IMAGE_ROOTFS', True)
+
+    img_type = d.getVar('IMAGE_PKGTYPE', True)
+    if img_type == "rpm":
+        return RpmPkgsList(d, rootfs_dir).list(format)
+    elif img_type == "ipk":
+        return OpkgPkgsList(d, rootfs_dir, d.getVar("IPKGCONF_TARGET", True)).list(format)
+    elif img_type == "deb":
+        return DpkgPkgsList(d, rootfs_dir).list(format)
+
+if __name__ == "__main__":
+    """
+    We should be able to run this as a standalone script, from outside bitbake
+    environment.
+    """
+    """
+    TBD
+    """
diff --git a/meta/lib/oe/sdk.py b/meta/lib/oe/sdk.py
new file mode 100644
index 0000000..53da0f0
--- /dev/null
+++ b/meta/lib/oe/sdk.py
@@ -0,0 +1,349 @@
+from abc import ABCMeta, abstractmethod
+from oe.utils import execute_pre_post_process
+from oe.manifest import *
+from oe.package_manager import *
+import os
+import shutil
+import glob
+
+
+class Sdk(object):
+    __metaclass__ = ABCMeta
+
+    def __init__(self, d, manifest_dir):
+        self.d = d
+        self.sdk_output = self.d.getVar('SDK_OUTPUT', True)
+        self.sdk_native_path = self.d.getVar('SDKPATHNATIVE', True).strip('/')
+        self.target_path = self.d.getVar('SDKTARGETSYSROOT', True).strip('/')
+        self.sysconfdir = self.d.getVar('sysconfdir', True).strip('/')
+
+        self.sdk_target_sysroot = os.path.join(self.sdk_output, self.target_path)
+        self.sdk_host_sysroot = self.sdk_output
+
+        if manifest_dir is None:
+            self.manifest_dir = self.d.getVar("SDK_DIR", True)
+        else:
+            self.manifest_dir = manifest_dir
+
+        bb.utils.remove(self.sdk_output, True)
+
+        self.install_order = Manifest.INSTALL_ORDER
+
+    @abstractmethod
+    def _populate(self):
+        pass
+
+    def populate(self):
+        bb.utils.mkdirhier(self.sdk_output)
+
+        # call backend dependent implementation
+        self._populate()
+
+        # Don't ship any libGL in the SDK
+        bb.utils.remove(os.path.join(self.sdk_output, self.sdk_native_path,
+                                     self.d.getVar('libdir_nativesdk', True).strip('/'),
+                                     "libGL*"))
+
+        # Fix or remove broken .la files
+        bb.utils.remove(os.path.join(self.sdk_output, self.sdk_native_path,
+                                     self.d.getVar('libdir_nativesdk', True).strip('/'),
+                                     "*.la"))
+
+        # Link the ld.so.cache file into the hosts filesystem
+        link_name = os.path.join(self.sdk_output, self.sdk_native_path,
+                                 self.sysconfdir, "ld.so.cache")
+        bb.utils.mkdirhier(os.path.dirname(link_name))
+        os.symlink("/etc/ld.so.cache", link_name)
+
+        execute_pre_post_process(self.d, self.d.getVar('SDK_POSTPROCESS_COMMAND', True))
+
+
+class RpmSdk(Sdk):
+    def __init__(self, d, manifest_dir=None):
+        super(RpmSdk, self).__init__(d, manifest_dir)
+
+        self.target_manifest = RpmManifest(d, self.manifest_dir,
+                                           Manifest.MANIFEST_TYPE_SDK_TARGET)
+        self.host_manifest = RpmManifest(d, self.manifest_dir,
+                                         Manifest.MANIFEST_TYPE_SDK_HOST)
+
+        target_providename = ['/bin/sh',
+                              '/bin/bash',
+                              '/usr/bin/env',
+                              '/usr/bin/perl',
+                              'pkgconfig'
+                              ]
+
+        self.target_pm = RpmPM(d,
+                               self.sdk_target_sysroot,
+                               self.d.getVar('TARGET_VENDOR', True),
+                               'target',
+                               target_providename
+                               )
+
+        sdk_providename = ['/bin/sh',
+                           '/bin/bash',
+                           '/usr/bin/env',
+                           '/usr/bin/perl',
+                           'pkgconfig',
+                           'libGL.so()(64bit)',
+                           'libGL.so'
+                           ]
+
+        self.host_pm = RpmPM(d,
+                             self.sdk_host_sysroot,
+                             self.d.getVar('SDK_VENDOR', True),
+                             'host',
+                             sdk_providename,
+                             "SDK_PACKAGE_ARCHS",
+                             "SDK_OS"
+                             )
+
+    def _populate_sysroot(self, pm, manifest):
+        pkgs_to_install = manifest.parse_initial_manifest()
+
+        pm.create_configs()
+        pm.write_index()
+        pm.dump_all_available_pkgs()
+        pm.update()
+
+        pkgs = []
+        pkgs_attempt = []
+        for pkg_type in pkgs_to_install:
+            if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
+                pkgs_attempt += pkgs_to_install[pkg_type]
+            else:
+                pkgs += pkgs_to_install[pkg_type]
+
+        pm.install(pkgs)
+
+        pm.install(pkgs_attempt, True)
+
+    def _populate(self):
+        bb.note("Installing TARGET packages")
+        self._populate_sysroot(self.target_pm, self.target_manifest)
+
+        self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
+
+        execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
+
+        self.target_pm.remove_packaging_data()
+
+        bb.note("Installing NATIVESDK packages")
+        self._populate_sysroot(self.host_pm, self.host_manifest)
+
+        execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
+
+        self.host_pm.remove_packaging_data()
+
+        # Move host RPM library data
+        native_rpm_state_dir = os.path.join(self.sdk_output,
+                                            self.sdk_native_path,
+                                            self.d.getVar('localstatedir_nativesdk', True).strip('/'),
+                                            "lib",
+                                            "rpm"
+                                            )
+        bb.utils.mkdirhier(native_rpm_state_dir)
+        for f in glob.glob(os.path.join(self.sdk_output,
+                                        "var",
+                                        "lib",
+                                        "rpm",
+                                        "*")):
+            bb.utils.movefile(f, native_rpm_state_dir)
+
+        bb.utils.remove(os.path.join(self.sdk_output, "var"), True)
+
+        # Move host sysconfig data
+        native_sysconf_dir = os.path.join(self.sdk_output,
+                                          self.sdk_native_path,
+                                          self.d.getVar('sysconfdir',
+                                                        True).strip('/'),
+                                          )
+        bb.utils.mkdirhier(native_sysconf_dir)
+        for f in glob.glob(os.path.join(self.sdk_output, "etc", "*")):
+            bb.utils.movefile(f, native_sysconf_dir)
+        bb.utils.remove(os.path.join(self.sdk_output, "etc"), True)
+
+
+class OpkgSdk(Sdk):
+    def __init__(self, d, manifest_dir=None):
+        super(OpkgSdk, self).__init__(d, manifest_dir)
+
+        self.target_conf = self.d.getVar("IPKGCONF_TARGET", True)
+        self.host_conf = self.d.getVar("IPKGCONF_SDK", True)
+
+        self.target_manifest = OpkgManifest(d, self.manifest_dir,
+                                            Manifest.MANIFEST_TYPE_SDK_TARGET)
+        self.host_manifest = OpkgManifest(d, self.manifest_dir,
+                                          Manifest.MANIFEST_TYPE_SDK_HOST)
+
+        self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf,
+                                self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS", True))
+
+        self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf,
+                              self.d.getVar("SDK_PACKAGE_ARCHS", True))
+
+    def _populate_sysroot(self, pm, manifest):
+        pkgs_to_install = manifest.parse_initial_manifest()
+
+        if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS', True) or "") != "1":
+            pm.write_index()
+
+        pm.update()
+
+        pkgs = []
+        pkgs_attempt = []
+        for pkg_type in pkgs_to_install:
+            if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
+                pkgs_attempt += pkgs_to_install[pkg_type]
+            else:
+                pkgs += pkgs_to_install[pkg_type]
+
+        pm.install(pkgs)
+
+        pm.install(pkgs_attempt, True)
+
+    def _populate(self):
+        bb.note("Installing TARGET packages")
+        self._populate_sysroot(self.target_pm, self.target_manifest)
+
+        self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
+
+        execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
+
+        bb.note("Installing NATIVESDK packages")
+        self._populate_sysroot(self.host_pm, self.host_manifest)
+
+        execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
+
+        target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir)
+        host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir)
+
+        bb.utils.mkdirhier(target_sysconfdir)
+        shutil.copy(self.target_conf, target_sysconfdir)
+        os.chmod(os.path.join(target_sysconfdir,
+                              os.path.basename(self.target_conf)), 0644)
+
+        bb.utils.mkdirhier(host_sysconfdir)
+        shutil.copy(self.host_conf, host_sysconfdir)
+        os.chmod(os.path.join(host_sysconfdir,
+                              os.path.basename(self.host_conf)), 0644)
+
+        native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
+                                             self.d.getVar('localstatedir_nativesdk', True).strip('/'),
+                                             "lib", "opkg")
+        bb.utils.mkdirhier(native_opkg_state_dir)
+        for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")):
+            bb.utils.movefile(f, native_opkg_state_dir)
+
+        bb.utils.remove(os.path.join(self.sdk_output, "var"), True)
+
+
+class DpkgSdk(Sdk):
+    def __init__(self, d, manifest_dir=None):
+        super(DpkgSdk, self).__init__(d, manifest_dir)
+
+        self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt")
+        self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET", True), "apt-sdk")
+
+        self.target_manifest = DpkgManifest(d, self.manifest_dir,
+                                            Manifest.MANIFEST_TYPE_SDK_TARGET)
+        self.host_manifest = DpkgManifest(d, self.manifest_dir,
+                                          Manifest.MANIFEST_TYPE_SDK_HOST)
+
+        self.target_pm = DpkgPM(d, self.sdk_target_sysroot,
+                                self.d.getVar("PACKAGE_ARCHS", True),
+                                self.d.getVar("DPKG_ARCH", True),
+                                self.target_conf_dir)
+
+        self.host_pm = DpkgPM(d, self.sdk_host_sysroot,
+                              self.d.getVar("SDK_PACKAGE_ARCHS", True),
+                              self.d.getVar("DEB_SDK_ARCH", True),
+                              self.host_conf_dir)
+
+    def _copy_apt_dir_to(self, dst_dir):
+        staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE", True)
+
+        bb.utils.remove(dst_dir, True)
+
+        shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir)
+
+    def _populate_sysroot(self, pm, manifest):
+        pkgs_to_install = manifest.parse_initial_manifest()
+
+        pm.write_index()
+        pm.update()
+
+        pkgs = []
+        pkgs_attempt = []
+        for pkg_type in pkgs_to_install:
+            if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY:
+                pkgs_attempt += pkgs_to_install[pkg_type]
+            else:
+                pkgs += pkgs_to_install[pkg_type]
+
+        pm.install(pkgs)
+
+        pm.install(pkgs_attempt, True)
+
+    def _populate(self):
+        bb.note("Installing TARGET packages")
+        self._populate_sysroot(self.target_pm, self.target_manifest)
+
+        self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY', True))
+
+        execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND", True))
+
+        self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt"))
+
+        bb.note("Installing NATIVESDK packages")
+        self._populate_sysroot(self.host_pm, self.host_manifest)
+
+        execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND", True))
+
+        self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path,
+                                           "etc", "apt"))
+
+        native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path,
+                                             "var", "lib", "dpkg")
+        bb.utils.mkdirhier(native_dpkg_state_dir)
+        for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")):
+            bb.utils.movefile(f, native_dpkg_state_dir)
+
+        bb.utils.remove(os.path.join(self.sdk_output, "var"), True)
+
+
+def sdk_list_installed_packages(d, target, format=None, rootfs_dir=None):
+    if rootfs_dir is None:
+        sdk_output = d.getVar('SDK_OUTPUT', True)
+        target_path = d.getVar('SDKTARGETSYSROOT', True).strip('/')
+
+        rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True]
+
+    img_type = d.getVar('IMAGE_PKGTYPE', True)
+    if img_type == "rpm":
+        arch_var = ["SDK_PACKAGE_ARCHS", None][target is True]
+        os_var = ["SDK_OS", None][target is True]
+        return RpmPkgsList(d, rootfs_dir, arch_var, os_var).list(format)
+    elif img_type == "ipk":
+        conf_file_var = ["IPKGCONF_SDK", "IPKGCONF_TARGET"][target is True]
+        return OpkgPkgsList(d, rootfs_dir, d.getVar(conf_file_var, True)).list(format)
+    elif img_type == "deb":
+        return DpkgPkgsList(d, rootfs_dir).list(format)
+
+def populate_sdk(d, manifest_dir=None):
+    env_bkp = os.environ.copy()
+
+    img_type = d.getVar('IMAGE_PKGTYPE', True)
+    if img_type == "rpm":
+        RpmSdk(d, manifest_dir).populate()
+    elif img_type == "ipk":
+        OpkgSdk(d, manifest_dir).populate()
+    elif img_type == "deb":
+        DpkgSdk(d, manifest_dir).populate()
+
+    os.environ.clear()
+    os.environ.update(env_bkp)
+
+if __name__ == "__main__":
+    pass
diff --git a/meta/lib/oe/sstatesig.py b/meta/lib/oe/sstatesig.py
new file mode 100644
index 0000000..cb46712
--- /dev/null
+++ b/meta/lib/oe/sstatesig.py
@@ -0,0 +1,291 @@
+import bb.siggen
+
+def sstate_rundepfilter(siggen, fn, recipename, task, dep, depname, dataCache):
+    # Return True if we should keep the dependency, False to drop it
+    def isNative(x):
+        return x.endswith("-native")
+    def isCross(x):
+        return "-cross-" in x
+    def isNativeSDK(x):
+        return x.startswith("nativesdk-")
+    def isKernel(fn):
+        inherits = " ".join(dataCache.inherits[fn])
+        return inherits.find("/module-base.bbclass") != -1 or inherits.find("/linux-kernel-base.bbclass") != -1
+    def isPackageGroup(fn):
+        inherits = " ".join(dataCache.inherits[fn])
+        return "/packagegroup.bbclass" in inherits
+    def isAllArch(fn):
+        inherits = " ".join(dataCache.inherits[fn])
+        return "/allarch.bbclass" in inherits
+    def isImage(fn):
+        return "/image.bbclass" in " ".join(dataCache.inherits[fn])
+
+    # Always include our own inter-task dependencies
+    if recipename == depname:
+        return True
+
+    # Quilt (patch application) changing isn't likely to affect anything
+    excludelist = ['quilt-native', 'subversion-native', 'git-native']
+    if depname in excludelist and recipename != depname:
+        return False
+
+    # Exclude well defined recipe->dependency
+    if "%s->%s" % (recipename, depname) in siggen.saferecipedeps:
+        return False
+
+    # Don't change native/cross/nativesdk recipe dependencies any further
+    if isNative(recipename) or isCross(recipename) or isNativeSDK(recipename):
+        return True
+
+    # Only target packages beyond here
+
+    # allarch packagegroups are assumed to have well behaved names which don't change between architecures/tunes
+    if isPackageGroup(fn) and isAllArch(fn):
+        return False  
+
+    # Exclude well defined machine specific configurations which don't change ABI
+    if depname in siggen.abisaferecipes and not isImage(fn):
+        return False
+
+    # Kernel modules are well namespaced. We don't want to depend on the kernel's checksum
+    # if we're just doing an RRECOMMENDS_xxx = "kernel-module-*", not least because the checksum
+    # is machine specific.
+    # Therefore if we're not a kernel or a module recipe (inheriting the kernel classes)
+    # and we reccomend a kernel-module, we exclude the dependency.
+    depfn = dep.rsplit(".", 1)[0]
+    if dataCache and isKernel(depfn) and not isKernel(fn):
+        for pkg in dataCache.runrecs[fn]:
+            if " ".join(dataCache.runrecs[fn][pkg]).find("kernel-module-") != -1:
+                return False
+
+    # Default to keep dependencies
+    return True
+
+def sstate_lockedsigs(d):
+    sigs = {}
+    types = (d.getVar("SIGGEN_LOCKEDSIGS_TYPES", True) or "").split()
+    for t in types:
+        lockedsigs = (d.getVar("SIGGEN_LOCKEDSIGS_%s" % t, True) or "").split()
+        for ls in lockedsigs:
+            pn, task, h = ls.split(":", 2)
+            if pn not in sigs:
+                sigs[pn] = {}
+            sigs[pn][task] = h
+    return sigs
+
+class SignatureGeneratorOEBasic(bb.siggen.SignatureGeneratorBasic):
+    name = "OEBasic"
+    def init_rundepcheck(self, data):
+        self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split()
+        self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split()
+        pass
+    def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
+        return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
+
+class SignatureGeneratorOEBasicHash(bb.siggen.SignatureGeneratorBasicHash):
+    name = "OEBasicHash"
+    def init_rundepcheck(self, data):
+        self.abisaferecipes = (data.getVar("SIGGEN_EXCLUDERECIPES_ABISAFE", True) or "").split()
+        self.saferecipedeps = (data.getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", True) or "").split()
+        self.lockedsigs = sstate_lockedsigs(data)
+        self.lockedhashes = {}
+        self.lockedpnmap = {}
+        self.lockedhashfn = {}
+        self.machine = data.getVar("MACHINE", True)
+        self.mismatch_msgs = []
+        pass
+    def rundep_check(self, fn, recipename, task, dep, depname, dataCache = None):
+        return sstate_rundepfilter(self, fn, recipename, task, dep, depname, dataCache)
+
+    def get_taskdata(self):
+        data = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskdata()
+        return (data, self.lockedpnmap, self.lockedhashfn)
+
+    def set_taskdata(self, data):
+        coredata, self.lockedpnmap, self.lockedhashfn = data
+        super(bb.siggen.SignatureGeneratorBasicHash, self).set_taskdata(coredata)
+
+    def dump_sigs(self, dataCache, options):
+        self.dump_lockedsigs()
+        return super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigs(dataCache, options)
+
+    def get_taskhash(self, fn, task, deps, dataCache):
+        h = super(bb.siggen.SignatureGeneratorBasicHash, self).get_taskhash(fn, task, deps, dataCache)
+
+        recipename = dataCache.pkg_fn[fn]
+        self.lockedpnmap[fn] = recipename
+        self.lockedhashfn[fn] = dataCache.hashfn[fn]
+        if recipename in self.lockedsigs:
+            if task in self.lockedsigs[recipename]:
+                k = fn + "." + task
+                h_locked = self.lockedsigs[recipename][task]
+                self.lockedhashes[k] = h_locked
+                self.taskhash[k] = h_locked
+                #bb.warn("Using %s %s %s" % (recipename, task, h))
+
+                if h != h_locked:
+                    self.mismatch_msgs.append('The %s:%s sig (%s) changed, use locked sig %s to instead'
+                                          % (recipename, task, h, h_locked))
+
+                return h_locked
+        #bb.warn("%s %s %s" % (recipename, task, h))
+        return h
+
+    def dump_sigtask(self, fn, task, stampbase, runtime):
+        k = fn + "." + task
+        if k in self.lockedhashes:
+            return
+        super(bb.siggen.SignatureGeneratorBasicHash, self).dump_sigtask(fn, task, stampbase, runtime)
+
+    def dump_lockedsigs(self, sigfile=None, taskfilter=None):
+        if not sigfile:
+            sigfile = os.getcwd() + "/locked-sigs.inc"
+
+        bb.plain("Writing locked sigs to %s" % sigfile)
+        types = {}
+        for k in self.runtaskdeps:
+            if taskfilter:
+                if not k in taskfilter:
+                    continue
+            fn = k.rsplit(".",1)[0]
+            t = self.lockedhashfn[fn].split(" ")[1].split(":")[5]
+            t = 't-' + t.replace('_', '-')
+            if t not in types:
+                types[t] = []
+            types[t].append(k)
+
+        with open(sigfile, "w") as f:
+            for t in types:
+                f.write('SIGGEN_LOCKEDSIGS_%s = "\\\n' % t)
+                types[t].sort()
+                sortedk = sorted(types[t], key=lambda k: self.lockedpnmap[k.rsplit(".",1)[0]])
+                for k in sortedk:
+                    fn = k.rsplit(".",1)[0]
+                    task = k.rsplit(".",1)[1]
+                    if k not in self.taskhash:
+                        continue
+                    f.write("    " + self.lockedpnmap[fn] + ":" + task + ":" + self.taskhash[k] + " \\\n")
+                f.write('    "\n')
+            f.write('SIGGEN_LOCKEDSIGS_TYPES_%s = "%s"' % (self.machine, " ".join(types.keys())))
+
+    def checkhashes(self, missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d):
+        checklevel = d.getVar("SIGGEN_LOCKEDSIGS_CHECK_LEVEL", True)
+        for task in range(len(sq_fn)):
+            if task not in ret:
+                for pn in self.lockedsigs:
+                    if sq_hash[task] in self.lockedsigs[pn].itervalues():
+                        self.mismatch_msgs.append("Locked sig is set for %s:%s (%s) yet not in sstate cache?"
+                                               % (pn, sq_task[task], sq_hash[task]))
+
+        if self.mismatch_msgs and checklevel == 'warn':
+            bb.warn("\n".join(self.mismatch_msgs))
+        elif self.mismatch_msgs and checklevel == 'error':
+            bb.fatal("\n".join(self.mismatch_msgs))
+
+
+# Insert these classes into siggen's namespace so it can see and select them
+bb.siggen.SignatureGeneratorOEBasic = SignatureGeneratorOEBasic
+bb.siggen.SignatureGeneratorOEBasicHash = SignatureGeneratorOEBasicHash
+
+
+def find_siginfo(pn, taskname, taskhashlist, d):
+    """ Find signature data files for comparison purposes """
+
+    import fnmatch
+    import glob
+
+    if taskhashlist:
+        hashfiles = {}
+
+    if not taskname:
+        # We have to derive pn and taskname
+        key = pn
+        splitit = key.split('.bb.')
+        taskname = splitit[1]
+        pn = os.path.basename(splitit[0]).split('_')[0]
+        if key.startswith('virtual:native:'):
+            pn = pn + '-native'
+
+    filedates = {}
+
+    # First search in stamps dir
+    localdata = d.createCopy()
+    localdata.setVar('MULTIMACH_TARGET_SYS', '*')
+    localdata.setVar('PN', pn)
+    localdata.setVar('PV', '*')
+    localdata.setVar('PR', '*')
+    localdata.setVar('EXTENDPE', '')
+    stamp = localdata.getVar('STAMP', True)
+    filespec = '%s.%s.sigdata.*' % (stamp, taskname)
+    foundall = False
+    import glob
+    for fullpath in glob.glob(filespec):
+        match = False
+        if taskhashlist:
+            for taskhash in taskhashlist:
+                if fullpath.endswith('.%s' % taskhash):
+                    hashfiles[taskhash] = fullpath
+                    if len(hashfiles) == len(taskhashlist):
+                        foundall = True
+                        break
+        else:
+            try:
+                filedates[fullpath] = os.stat(fullpath).st_mtime
+            except OSError:
+                continue
+
+    if not taskhashlist or (len(filedates) < 2 and not foundall):
+        # That didn't work, look in sstate-cache
+        hashes = taskhashlist or ['*']
+        localdata = bb.data.createCopy(d)
+        for hashval in hashes:
+            localdata.setVar('PACKAGE_ARCH', '*')
+            localdata.setVar('TARGET_VENDOR', '*')
+            localdata.setVar('TARGET_OS', '*')
+            localdata.setVar('PN', pn)
+            localdata.setVar('PV', '*')
+            localdata.setVar('PR', '*')
+            localdata.setVar('BB_TASKHASH', hashval)
+            swspec = localdata.getVar('SSTATE_SWSPEC', True)
+            if taskname in ['do_fetch', 'do_unpack', 'do_patch', 'do_populate_lic', 'do_preconfigure'] and swspec:
+                localdata.setVar('SSTATE_PKGSPEC', '${SSTATE_SWSPEC}')
+            elif pn.endswith('-native') or "-cross-" in pn or "-crosssdk-" in pn:
+                localdata.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
+            sstatename = taskname[3:]
+            filespec = '%s_%s.*.siginfo' % (localdata.getVar('SSTATE_PKG', True), sstatename)
+
+            if hashval != '*':
+                sstatedir = "%s/%s" % (d.getVar('SSTATE_DIR', True), hashval[:2])
+            else:
+                sstatedir = d.getVar('SSTATE_DIR', True)
+
+            for root, dirs, files in os.walk(sstatedir):
+                for fn in files:
+                    fullpath = os.path.join(root, fn)
+                    if fnmatch.fnmatch(fullpath, filespec):
+                        if taskhashlist:
+                            hashfiles[hashval] = fullpath
+                        else:
+                            try:
+                                filedates[fullpath] = os.stat(fullpath).st_mtime
+                            except:
+                                continue
+
+    if taskhashlist:
+        return hashfiles
+    else:
+        return filedates
+
+bb.siggen.find_siginfo = find_siginfo
+
+
+def sstate_get_manifest_filename(task, d):
+    """
+    Return the sstate manifest file path for a particular task.
+    Also returns the datastore that can be used to query related variables.
+    """
+    d2 = d.createCopy()
+    extrainf = d.getVarFlag("do_" + task, 'stamp-extra-info', True)
+    if extrainf:
+        d2.setVar("SSTATE_MANMACH", extrainf)
+    return (d2.expand("${SSTATE_MANFILEPREFIX}.%s" % task), d2)
diff --git a/meta/lib/oe/terminal.py b/meta/lib/oe/terminal.py
new file mode 100644
index 0000000..52a8913
--- /dev/null
+++ b/meta/lib/oe/terminal.py
@@ -0,0 +1,263 @@
+import logging
+import oe.classutils
+import shlex
+from bb.process import Popen, ExecutionError
+from distutils.version import LooseVersion
+
+logger = logging.getLogger('BitBake.OE.Terminal')
+
+
+class UnsupportedTerminal(Exception):
+    pass
+
+class NoSupportedTerminals(Exception):
+    pass
+
+
+class Registry(oe.classutils.ClassRegistry):
+    command = None
+
+    def __init__(cls, name, bases, attrs):
+        super(Registry, cls).__init__(name.lower(), bases, attrs)
+
+    @property
+    def implemented(cls):
+        return bool(cls.command)
+
+
+class Terminal(Popen):
+    __metaclass__ = Registry
+
+    def __init__(self, sh_cmd, title=None, env=None, d=None):
+        fmt_sh_cmd = self.format_command(sh_cmd, title)
+        try:
+            Popen.__init__(self, fmt_sh_cmd, env=env)
+        except OSError as exc:
+            import errno
+            if exc.errno == errno.ENOENT:
+                raise UnsupportedTerminal(self.name)
+            else:
+                raise
+
+    def format_command(self, sh_cmd, title):
+        fmt = {'title': title or 'Terminal', 'command': sh_cmd}
+        if isinstance(self.command, basestring):
+            return shlex.split(self.command.format(**fmt))
+        else:
+            return [element.format(**fmt) for element in self.command]
+
+class XTerminal(Terminal):
+    def __init__(self, sh_cmd, title=None, env=None, d=None):
+        Terminal.__init__(self, sh_cmd, title, env, d)
+        if not os.environ.get('DISPLAY'):
+            raise UnsupportedTerminal(self.name)
+
+class Gnome(XTerminal):
+    command = 'gnome-terminal -t "{title}" --disable-factory -x {command}'
+    priority = 2
+
+    def __init__(self, sh_cmd, title=None, env=None, d=None):
+        # Recent versions of gnome-terminal does not support non-UTF8 charset:
+        # https://bugzilla.gnome.org/show_bug.cgi?id=732127; as a workaround,
+        # clearing the LC_ALL environment variable so it uses the locale.
+        # Once fixed on the gnome-terminal project, this should be removed.
+        if os.getenv('LC_ALL'): os.putenv('LC_ALL','')
+
+        # Check version
+        vernum = check_terminal_version("gnome-terminal")
+        if vernum and LooseVersion(vernum) >= '3.10':
+            logger.debug(1, 'Gnome-Terminal 3.10 or later does not support --disable-factory')
+            self.command = 'gnome-terminal -t "{title}" -x {command}'
+        XTerminal.__init__(self, sh_cmd, title, env, d)
+
+class Mate(XTerminal):
+    command = 'mate-terminal -t "{title}" -x {command}'
+    priority = 2
+
+class Xfce(XTerminal):
+    command = 'xfce4-terminal -T "{title}" -e "{command}"'
+    priority = 2
+
+class Terminology(XTerminal):
+    command = 'terminology -T="{title}" -e {command}'
+    priority = 2
+
+class Konsole(XTerminal):
+    command = 'konsole --nofork -p tabtitle="{title}" -e {command}'
+    priority = 2
+
+    def __init__(self, sh_cmd, title=None, env=None, d=None):
+        # Check version
+        vernum = check_terminal_version("konsole")
+        if vernum and LooseVersion(vernum) < '2.0.0':
+            # Konsole from KDE 3.x
+            self.command = 'konsole -T "{title}" -e {command}'
+        XTerminal.__init__(self, sh_cmd, title, env, d)
+
+class XTerm(XTerminal):
+    command = 'xterm -T "{title}" -e {command}'
+    priority = 1
+
+class Rxvt(XTerminal):
+    command = 'rxvt -T "{title}" -e {command}'
+    priority = 1
+
+class Screen(Terminal):
+    command = 'screen -D -m -t "{title}" -S devshell {command}'
+
+    def __init__(self, sh_cmd, title=None, env=None, d=None):
+        s_id = "devshell_%i" % os.getpid()
+        self.command = "screen -D -m -t \"{title}\" -S %s {command}" % s_id
+        Terminal.__init__(self, sh_cmd, title, env, d)
+        msg = 'Screen started. Please connect in another terminal with ' \
+            '"screen -r %s"' % s_id
+        if (d):
+            bb.event.fire(bb.event.LogExecTTY(msg, "screen -r %s" % s_id,
+                                              0.5, 10), d)
+        else:
+            logger.warn(msg)
+
+class TmuxRunning(Terminal):
+    """Open a new pane in the current running tmux window"""
+    name = 'tmux-running'
+    command = 'tmux split-window "{command}"'
+    priority = 2.75
+
+    def __init__(self, sh_cmd, title=None, env=None, d=None):
+        if not bb.utils.which(os.getenv('PATH'), 'tmux'):
+            raise UnsupportedTerminal('tmux is not installed')
+
+        if not os.getenv('TMUX'):
+            raise UnsupportedTerminal('tmux is not running')
+
+        if not check_tmux_pane_size('tmux'):
+            raise UnsupportedTerminal('tmux pane too small')
+
+        Terminal.__init__(self, sh_cmd, title, env, d)
+
+class TmuxNewWindow(Terminal):
+    """Open a new window in the current running tmux session"""
+    name = 'tmux-new-window'
+    command = 'tmux new-window -n "{title}" "{command}"'
+    priority = 2.70
+
+    def __init__(self, sh_cmd, title=None, env=None, d=None):
+        if not bb.utils.which(os.getenv('PATH'), 'tmux'):
+            raise UnsupportedTerminal('tmux is not installed')
+
+        if not os.getenv('TMUX'):
+            raise UnsupportedTerminal('tmux is not running')
+
+        Terminal.__init__(self, sh_cmd, title, env, d)
+
+class Tmux(Terminal):
+    """Start a new tmux session and window"""
+    command = 'tmux new -d -s devshell -n devshell "{command}"'
+    priority = 0.75
+
+    def __init__(self, sh_cmd, title=None, env=None, d=None):
+        if not bb.utils.which(os.getenv('PATH'), 'tmux'):
+            raise UnsupportedTerminal('tmux is not installed')
+
+        # TODO: consider using a 'devshell' session shared amongst all
+        # devshells, if it's already there, add a new window to it.
+        window_name = 'devshell-%i' % os.getpid()
+
+        self.command = 'tmux new -d -s {0} -n {0} "{{command}}"'.format(window_name)
+        Terminal.__init__(self, sh_cmd, title, env, d)
+
+        attach_cmd = 'tmux att -t {0}'.format(window_name)
+        msg = 'Tmux started. Please connect in another terminal with `tmux att -t {0}`'.format(window_name)
+        if d:
+            bb.event.fire(bb.event.LogExecTTY(msg, attach_cmd, 0.5, 10), d)
+        else:
+            logger.warn(msg)
+
+class Custom(Terminal):
+    command = 'false' # This is a placeholder
+    priority = 3
+
+    def __init__(self, sh_cmd, title=None, env=None, d=None):
+        self.command = d and d.getVar('OE_TERMINAL_CUSTOMCMD', True)
+        if self.command:
+            if not '{command}' in self.command:
+                self.command += ' {command}'
+            Terminal.__init__(self, sh_cmd, title, env, d)
+            logger.warn('Custom terminal was started.')
+        else:
+            logger.debug(1, 'No custom terminal (OE_TERMINAL_CUSTOMCMD) set')
+            raise UnsupportedTerminal('OE_TERMINAL_CUSTOMCMD not set')
+
+
+def prioritized():
+    return Registry.prioritized()
+
+def spawn_preferred(sh_cmd, title=None, env=None, d=None):
+    """Spawn the first supported terminal, by priority"""
+    for terminal in prioritized():
+        try:
+            spawn(terminal.name, sh_cmd, title, env, d)
+            break
+        except UnsupportedTerminal:
+            continue
+    else:
+        raise NoSupportedTerminals()
+
+def spawn(name, sh_cmd, title=None, env=None, d=None):
+    """Spawn the specified terminal, by name"""
+    logger.debug(1, 'Attempting to spawn terminal "%s"', name)
+    try:
+        terminal = Registry.registry[name]
+    except KeyError:
+        raise UnsupportedTerminal(name)
+
+    pipe = terminal(sh_cmd, title, env, d)
+    output = pipe.communicate()[0]
+    if pipe.returncode != 0:
+        raise ExecutionError(sh_cmd, pipe.returncode, output)
+
+def check_tmux_pane_size(tmux):
+    import subprocess as sub
+    try:
+        p = sub.Popen('%s list-panes -F "#{?pane_active,#{pane_height},}"' % tmux,
+                shell=True,stdout=sub.PIPE,stderr=sub.PIPE)
+        out, err = p.communicate()
+        size = int(out.strip())
+    except OSError as exc:
+        import errno
+        if exc.errno == errno.ENOENT:
+            return None
+        else:
+            raise
+    if size/2 >= 19:
+        return True
+    return False
+
+def check_terminal_version(terminalName):
+    import subprocess as sub
+    try:
+        p = sub.Popen(['sh', '-c', '%s --version' % terminalName],stdout=sub.PIPE,stderr=sub.PIPE)
+        out, err = p.communicate()
+        ver_info = out.rstrip().split('\n')
+    except OSError as exc:
+        import errno
+        if exc.errno == errno.ENOENT:
+            return None
+        else:
+            raise
+    vernum = None
+    for ver in ver_info:
+        if ver.startswith('Konsole'):
+            vernum = ver.split(' ')[-1]
+        if ver.startswith('GNOME Terminal'):
+            vernum = ver.split(' ')[-1]
+    return vernum
+
+def distro_name():
+    try:
+        p = Popen(['lsb_release', '-i'])
+        out, err = p.communicate()
+        distro = out.split(':')[1].strip().lower()
+    except:
+        distro = "unknown"
+    return distro
diff --git a/meta/lib/oe/tests/__init__.py b/meta/lib/oe/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/meta/lib/oe/tests/__init__.py
diff --git a/meta/lib/oe/tests/test_license.py b/meta/lib/oe/tests/test_license.py
new file mode 100644
index 0000000..c388886
--- /dev/null
+++ b/meta/lib/oe/tests/test_license.py
@@ -0,0 +1,68 @@
+import unittest
+import oe.license
+
+class SeenVisitor(oe.license.LicenseVisitor):
+    def __init__(self):
+        self.seen = []
+        oe.license.LicenseVisitor.__init__(self)
+
+    def visit_Str(self, node):
+        self.seen.append(node.s)
+
+class TestSingleLicense(unittest.TestCase):
+    licenses = [
+        "GPLv2",
+        "LGPL-2.0",
+        "Artistic",
+        "MIT",
+        "GPLv3+",
+        "FOO_BAR",
+    ]
+    invalid_licenses = ["GPL/BSD"]
+
+    @staticmethod
+    def parse(licensestr):
+        visitor = SeenVisitor()
+        visitor.visit_string(licensestr)
+        return visitor.seen
+
+    def test_single_licenses(self):
+        for license in self.licenses:
+            licenses = self.parse(license)
+            self.assertListEqual(licenses, [license])
+
+    def test_invalid_licenses(self):
+        for license in self.invalid_licenses:
+            with self.assertRaises(oe.license.InvalidLicense) as cm:
+                self.parse(license)
+            self.assertEqual(cm.exception.license, license)
+
+class TestSimpleCombinations(unittest.TestCase):
+    tests = {
+        "FOO&BAR": ["FOO", "BAR"],
+        "BAZ & MOO": ["BAZ", "MOO"],
+        "ALPHA|BETA": ["ALPHA"],
+        "BAZ&MOO|FOO": ["FOO"],
+        "FOO&BAR|BAZ": ["FOO", "BAR"],
+    }
+    preferred = ["ALPHA", "FOO", "BAR"]
+
+    def test_tests(self):
+        def choose(a, b):
+            if all(lic in self.preferred for lic in b):
+                return b
+            else:
+                return a
+
+        for license, expected in self.tests.items():
+            licenses = oe.license.flattened_licenses(license, choose)
+            self.assertListEqual(licenses, expected)
+
+class TestComplexCombinations(TestSimpleCombinations):
+    tests = {
+        "FOO & (BAR | BAZ)&MOO": ["FOO", "BAR", "MOO"],
+        "(ALPHA|(BETA&THETA)|OMEGA)&DELTA": ["OMEGA", "DELTA"],
+        "((ALPHA|BETA)&FOO)|BAZ": ["BETA", "FOO"],
+        "(GPL-2.0|Proprietary)&BSD-4-clause&MIT": ["GPL-2.0", "BSD-4-clause", "MIT"],
+    }
+    preferred = ["BAR", "OMEGA", "BETA", "GPL-2.0"]
diff --git a/meta/lib/oe/tests/test_path.py b/meta/lib/oe/tests/test_path.py
new file mode 100644
index 0000000..3d41ce1
--- /dev/null
+++ b/meta/lib/oe/tests/test_path.py
@@ -0,0 +1,89 @@
+import unittest
+import oe, oe.path
+import tempfile
+import os
+import errno
+import shutil
+
+class TestRealPath(unittest.TestCase):
+    DIRS = [ "a", "b", "etc", "sbin", "usr", "usr/bin", "usr/binX", "usr/sbin", "usr/include", "usr/include/gdbm" ]
+    FILES = [ "etc/passwd", "b/file" ]
+    LINKS = [
+        ( "bin",             "/usr/bin",             "/usr/bin" ),
+        ( "binX",            "usr/binX",             "/usr/binX" ),
+        ( "c",               "broken",               "/broken" ),
+        ( "etc/passwd-1",    "passwd",               "/etc/passwd" ),
+        ( "etc/passwd-2",    "passwd-1",             "/etc/passwd" ),
+        ( "etc/passwd-3",    "/etc/passwd-1",        "/etc/passwd" ),
+        ( "etc/shadow-1",    "/etc/shadow",          "/etc/shadow" ),
+        ( "etc/shadow-2",    "/etc/shadow-1",        "/etc/shadow" ),
+        ( "prog-A",          "bin/prog-A",           "/usr/bin/prog-A" ),
+        ( "prog-B",          "/bin/prog-B",          "/usr/bin/prog-B" ),
+        ( "usr/bin/prog-C",  "../../sbin/prog-C",    "/sbin/prog-C" ),
+        ( "usr/bin/prog-D",  "/sbin/prog-D",         "/sbin/prog-D" ),
+        ( "usr/binX/prog-E", "../sbin/prog-E",       None ),
+        ( "usr/bin/prog-F",  "../../../sbin/prog-F", "/sbin/prog-F" ),
+        ( "loop",            "a/loop",               None ),
+        ( "a/loop",          "../loop",              None ),
+        ( "b/test",          "file/foo",             "/b/file/foo" ),
+    ]
+
+    LINKS_PHYS = [
+        ( "./",          "/",                "" ),
+        ( "binX/prog-E", "/usr/sbin/prog-E", "/sbin/prog-E" ),
+    ]
+
+    EXCEPTIONS = [
+        ( "loop",   errno.ELOOP ),
+        ( "b/test", errno.ENOENT ),
+    ]
+
+    def __del__(self):
+        try:
+            #os.system("tree -F %s" % self.tmpdir)
+            shutil.rmtree(self.tmpdir)
+        except:
+            pass
+
+    def setUp(self):
+        self.tmpdir = tempfile.mkdtemp(prefix = "oe-test_path")
+        self.root = os.path.join(self.tmpdir, "R")
+
+        os.mkdir(os.path.join(self.tmpdir, "_real"))
+        os.symlink("_real", self.root)
+
+        for d in self.DIRS:
+            os.mkdir(os.path.join(self.root, d))
+        for f in self.FILES:
+            file(os.path.join(self.root, f), "w")
+        for l in self.LINKS:
+            os.symlink(l[1], os.path.join(self.root, l[0]))
+
+    def __realpath(self, file, use_physdir, assume_dir = True):
+        return oe.path.realpath(os.path.join(self.root, file), self.root,
+                                use_physdir, assume_dir = assume_dir)
+
+    def test_norm(self):
+        for l in self.LINKS:
+            if l[2] == None:
+                continue
+
+            target_p = self.__realpath(l[0], True)
+            target_l = self.__realpath(l[0], False)
+
+            if l[2] != False:
+                self.assertEqual(target_p, target_l)
+                self.assertEqual(l[2], target_p[len(self.root):])
+
+    def test_phys(self):
+        for l in self.LINKS_PHYS:
+            target_p = self.__realpath(l[0], True)
+            target_l = self.__realpath(l[0], False)
+
+            self.assertEqual(l[1], target_p[len(self.root):])
+            self.assertEqual(l[2], target_l[len(self.root):])
+
+    def test_loop(self):
+        for e in self.EXCEPTIONS:
+            self.assertRaisesRegexp(OSError, r'\[Errno %u\]' % e[1],
+                                    self.__realpath, e[0], False, False)
diff --git a/meta/lib/oe/tests/test_types.py b/meta/lib/oe/tests/test_types.py
new file mode 100644
index 0000000..367cc30
--- /dev/null
+++ b/meta/lib/oe/tests/test_types.py
@@ -0,0 +1,62 @@
+import unittest
+from oe.maketype import create, factory
+
+class TestTypes(unittest.TestCase):
+    def assertIsInstance(self, obj, cls):
+        return self.assertTrue(isinstance(obj, cls))
+
+    def assertIsNot(self, obj, other):
+        return self.assertFalse(obj is other)
+
+    def assertFactoryCreated(self, value, type, **flags):
+        cls = factory(type)
+        self.assertIsNot(cls, None)
+        self.assertIsInstance(create(value, type, **flags), cls)
+
+class TestBooleanType(TestTypes):
+    def test_invalid(self):
+        self.assertRaises(ValueError, create, '', 'boolean')
+        self.assertRaises(ValueError, create, 'foo', 'boolean')
+        self.assertRaises(TypeError, create, object(), 'boolean')
+
+    def test_true(self):
+        self.assertTrue(create('y', 'boolean'))
+        self.assertTrue(create('yes', 'boolean'))
+        self.assertTrue(create('1', 'boolean'))
+        self.assertTrue(create('t', 'boolean'))
+        self.assertTrue(create('true', 'boolean'))
+        self.assertTrue(create('TRUE', 'boolean'))
+        self.assertTrue(create('truE', 'boolean'))
+
+    def test_false(self):
+        self.assertFalse(create('n', 'boolean'))
+        self.assertFalse(create('no', 'boolean'))
+        self.assertFalse(create('0', 'boolean'))
+        self.assertFalse(create('f', 'boolean'))
+        self.assertFalse(create('false', 'boolean'))
+        self.assertFalse(create('FALSE', 'boolean'))
+        self.assertFalse(create('faLse', 'boolean'))
+
+    def test_bool_equality(self):
+        self.assertEqual(create('n', 'boolean'), False)
+        self.assertNotEqual(create('n', 'boolean'), True)
+        self.assertEqual(create('y', 'boolean'), True)
+        self.assertNotEqual(create('y', 'boolean'), False)
+
+class TestList(TestTypes):
+    def assertListEqual(self, value, valid, sep=None):
+        obj = create(value, 'list', separator=sep)
+        self.assertEqual(obj, valid)
+        if sep is not None:
+            self.assertEqual(obj.separator, sep)
+        self.assertEqual(str(obj), obj.separator.join(obj))
+
+    def test_list_nosep(self):
+        testlist = ['alpha', 'beta', 'theta']
+        self.assertListEqual('alpha beta theta', testlist)
+        self.assertListEqual('alpha  beta\ttheta', testlist)
+        self.assertListEqual('alpha', ['alpha'])
+
+    def test_list_usersep(self):
+        self.assertListEqual('foo:bar', ['foo', 'bar'], ':')
+        self.assertListEqual('foo:bar:baz', ['foo', 'bar', 'baz'], ':')
diff --git a/meta/lib/oe/tests/test_utils.py b/meta/lib/oe/tests/test_utils.py
new file mode 100644
index 0000000..5d9ac52
--- /dev/null
+++ b/meta/lib/oe/tests/test_utils.py
@@ -0,0 +1,51 @@
+import unittest
+from oe.utils import packages_filter_out_system
+
+class TestPackagesFilterOutSystem(unittest.TestCase):
+    def test_filter(self):
+        """
+        Test that oe.utils.packages_filter_out_system works.
+        """
+        try:
+            import bb
+        except ImportError:
+            self.skipTest("Cannot import bb")
+
+        d = bb.data_smart.DataSmart()
+        d.setVar("PN", "foo")
+
+        d.setVar("PACKAGES", "foo foo-doc foo-dev")
+        pkgs = packages_filter_out_system(d)
+        self.assertEqual(pkgs, [])
+
+        d.setVar("PACKAGES", "foo foo-doc foo-data foo-dev")
+        pkgs = packages_filter_out_system(d)
+        self.assertEqual(pkgs, ["foo-data"])
+
+        d.setVar("PACKAGES", "foo foo-locale-en-gb")
+        pkgs = packages_filter_out_system(d)
+        self.assertEqual(pkgs, [])
+
+        d.setVar("PACKAGES", "foo foo-data foo-locale-en-gb")
+        pkgs = packages_filter_out_system(d)
+        self.assertEqual(pkgs, ["foo-data"])
+
+
+class TestTrimVersion(unittest.TestCase):
+    def test_version_exception(self):
+        with self.assertRaises(TypeError):
+            trim_version(None, 2)
+        with self.assertRaises(TypeError):
+            trim_version((1, 2, 3), 2)
+
+    def test_num_exception(self):
+        with self.assertRaises(ValueError):
+            trim_version("1.2.3", 0)
+        with self.assertRaises(ValueError):
+            trim_version("1.2.3", -1)
+
+    def test_valid(self):
+        self.assertEqual(trim_version("1.2.3", 1), "1")
+        self.assertEqual(trim_version("1.2.3", 2), "1.2")
+        self.assertEqual(trim_version("1.2.3", 3), "1.2.3")
+        self.assertEqual(trim_version("1.2.3", 4), "1.2.3")
diff --git a/meta/lib/oe/types.py b/meta/lib/oe/types.py
new file mode 100644
index 0000000..7f47c17
--- /dev/null
+++ b/meta/lib/oe/types.py
@@ -0,0 +1,153 @@
+import errno
+import re
+import os
+
+
+class OEList(list):
+    """OpenEmbedded 'list' type
+
+    Acts as an ordinary list, but is constructed from a string value and a
+    separator (optional), and re-joins itself when converted to a string with
+    str().  Set the variable type flag to 'list' to use this type, and the
+    'separator' flag may be specified (defaulting to whitespace)."""
+
+    name = "list"
+
+    def __init__(self, value, separator = None):
+        if value is not None:
+            list.__init__(self, value.split(separator))
+        else:
+            list.__init__(self)
+
+        if separator is None:
+            self.separator = " "
+        else:
+            self.separator = separator
+
+    def __str__(self):
+        return self.separator.join(self)
+
+def choice(value, choices):
+    """OpenEmbedded 'choice' type
+
+    Acts as a multiple choice for the user.  To use this, set the variable
+    type flag to 'choice', and set the 'choices' flag to a space separated
+    list of valid values."""
+    if not isinstance(value, basestring):
+        raise TypeError("choice accepts a string, not '%s'" % type(value))
+
+    value = value.lower()
+    choices = choices.lower()
+    if value not in choices.split():
+        raise ValueError("Invalid choice '%s'.  Valid choices: %s" %
+                         (value, choices))
+    return value
+
+class NoMatch(object):
+    """Stub python regex pattern object which never matches anything"""
+    def findall(self, string, flags=0):
+        return None
+
+    def finditer(self, string, flags=0):
+        return None
+
+    def match(self, flags=0):
+        return None
+
+    def search(self, string, flags=0):
+        return None
+
+    def split(self, string, maxsplit=0):
+        return None
+
+    def sub(pattern, repl, string, count=0):
+        return None
+
+    def subn(pattern, repl, string, count=0):
+        return None
+
+NoMatch = NoMatch()
+
+def regex(value, regexflags=None):
+    """OpenEmbedded 'regex' type
+
+    Acts as a regular expression, returning the pre-compiled regular
+    expression pattern object.  To use this type, set the variable type flag
+    to 'regex', and optionally, set the 'regexflags' type to a space separated
+    list of the flags to control the regular expression matching (e.g.
+    FOO[regexflags] += 'ignorecase').  See the python documentation on the
+    're' module for a list of valid flags."""
+
+    flagval = 0
+    if regexflags:
+        for flag in regexflags.split():
+            flag = flag.upper()
+            try:
+                flagval |= getattr(re, flag)
+            except AttributeError:
+                raise ValueError("Invalid regex flag '%s'" % flag)
+
+    if not value:
+        # Let's ensure that the default behavior for an undefined or empty
+        # variable is to match nothing. If the user explicitly wants to match
+        # anything, they can match '.*' instead.
+        return NoMatch
+
+    try:
+        return re.compile(value, flagval)
+    except re.error as exc:
+        raise ValueError("Invalid regex value '%s': %s" %
+                         (value, exc.args[0]))
+
+def boolean(value):
+    """OpenEmbedded 'boolean' type
+
+    Valid values for true: 'yes', 'y', 'true', 't', '1'
+    Valid values for false: 'no', 'n', 'false', 'f', '0'
+    """
+
+    if not isinstance(value, basestring):
+        raise TypeError("boolean accepts a string, not '%s'" % type(value))
+
+    value = value.lower()
+    if value in ('yes', 'y', 'true', 't', '1'):
+        return True
+    elif value in ('no', 'n', 'false', 'f', '0'):
+        return False
+    raise ValueError("Invalid boolean value '%s'" % value)
+
+def integer(value, numberbase=10):
+    """OpenEmbedded 'integer' type
+
+    Defaults to base 10, but this can be specified using the optional
+    'numberbase' flag."""
+
+    return int(value, int(numberbase))
+
+_float = float
+def float(value, fromhex='false'):
+    """OpenEmbedded floating point type
+
+    To use this type, set the type flag to 'float', and optionally set the
+    'fromhex' flag to a true value (obeying the same rules as for the
+    'boolean' type) if the value is in base 16 rather than base 10."""
+
+    if boolean(fromhex):
+        return _float.fromhex(value)
+    else:
+        return _float(value)
+
+def path(value, relativeto='', normalize='true', mustexist='false'):
+    value = os.path.join(relativeto, value)
+
+    if boolean(normalize):
+        value = os.path.normpath(value)
+
+    if boolean(mustexist):
+        try:
+            open(value, 'r')
+        except IOError as exc:
+            if exc.errno == errno.ENOENT:
+                raise ValueError("{0}: {1}".format(value, os.strerror(errno.ENOENT)))
+
+    return value
diff --git a/meta/lib/oe/utils.py b/meta/lib/oe/utils.py
new file mode 100644
index 0000000..cee087f
--- /dev/null
+++ b/meta/lib/oe/utils.py
@@ -0,0 +1,273 @@
+try:
+    # Python 2
+    import commands as cmdstatus
+except ImportError:
+    # Python 3
+    import subprocess as cmdstatus
+
+def read_file(filename):
+    try:
+        f = open( filename, "r" )
+    except IOError as reason:
+        return "" # WARNING: can't raise an error now because of the new RDEPENDS handling. This is a bit ugly. :M:
+    else:
+        data = f.read().strip()
+        f.close()
+        return data
+    return None
+
+def ifelse(condition, iftrue = True, iffalse = False):
+    if condition:
+        return iftrue
+    else:
+        return iffalse
+
+def conditional(variable, checkvalue, truevalue, falsevalue, d):
+    if d.getVar(variable,1) == checkvalue:
+        return truevalue
+    else:
+        return falsevalue
+
+def less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
+    if float(d.getVar(variable,1)) <= float(checkvalue):
+        return truevalue
+    else:
+        return falsevalue
+
+def version_less_or_equal(variable, checkvalue, truevalue, falsevalue, d):
+    result = bb.utils.vercmp_string(d.getVar(variable,True), checkvalue)
+    if result <= 0:
+        return truevalue
+    else:
+        return falsevalue
+
+def both_contain(variable1, variable2, checkvalue, d):
+    val1 = d.getVar(variable1, True)
+    val2 = d.getVar(variable2, True)
+    val1 = set(val1.split())
+    val2 = set(val2.split())
+    if isinstance(checkvalue, basestring):
+        checkvalue = set(checkvalue.split())
+    else:
+        checkvalue = set(checkvalue)
+    if checkvalue.issubset(val1) and checkvalue.issubset(val2):
+        return " ".join(checkvalue)
+    else:
+        return ""
+
+def set_intersect(variable1, variable2, d):
+    """
+    Expand both variables, interpret them as lists of strings, and return the
+    intersection as a flattened string.
+
+    For example:
+    s1 = "a b c"
+    s2 = "b c d"
+    s3 = set_intersect(s1, s2)
+    => s3 = "b c"
+    """
+    val1 = set(d.getVar(variable1, True).split())
+    val2 = set(d.getVar(variable2, True).split())
+    return " ".join(val1 & val2)
+
+def prune_suffix(var, suffixes, d):
+    # See if var ends with any of the suffixes listed and
+    # remove it if found
+    for suffix in suffixes:
+        if var.endswith(suffix):
+            var = var.replace(suffix, "")
+
+    prefix = d.getVar("MLPREFIX", True)
+    if prefix and var.startswith(prefix):
+        var = var.replace(prefix, "")
+
+    return var
+
+def str_filter(f, str, d):
+    from re import match
+    return " ".join(filter(lambda x: match(f, x, 0), str.split()))
+
+def str_filter_out(f, str, d):
+    from re import match
+    return " ".join(filter(lambda x: not match(f, x, 0), str.split()))
+
+def param_bool(cfg, field, dflt = None):
+    """Lookup <field> in <cfg> map and convert it to a boolean; take
+    <dflt> when this <field> does not exist"""
+    value = cfg.get(field, dflt)
+    strvalue = str(value).lower()
+    if strvalue in ('yes', 'y', 'true', 't', '1'):
+        return True
+    elif strvalue in ('no', 'n', 'false', 'f', '0'):
+        return False
+    raise ValueError("invalid value for boolean parameter '%s': '%s'" % (field, value))
+
+def inherits(d, *classes):
+    """Return True if the metadata inherits any of the specified classes"""
+    return any(bb.data.inherits_class(cls, d) for cls in classes)
+
+def features_backfill(var,d):
+    # This construct allows the addition of new features to variable specified
+    # as var
+    # Example for var = "DISTRO_FEATURES"
+    # This construct allows the addition of new features to DISTRO_FEATURES
+    # that if not present would disable existing functionality, without
+    # disturbing distributions that have already set DISTRO_FEATURES.
+    # Distributions wanting to elide a value in DISTRO_FEATURES_BACKFILL should
+    # add the feature to DISTRO_FEATURES_BACKFILL_CONSIDERED
+    features = (d.getVar(var, True) or "").split()
+    backfill = (d.getVar(var+"_BACKFILL", True) or "").split()
+    considered = (d.getVar(var+"_BACKFILL_CONSIDERED", True) or "").split()
+
+    addfeatures = []
+    for feature in backfill:
+        if feature not in features and feature not in considered:
+            addfeatures.append(feature)
+
+    if addfeatures:
+        d.appendVar(var, " " + " ".join(addfeatures))
+
+
+def packages_filter_out_system(d):
+    """
+    Return a list of packages from PACKAGES with the "system" packages such as
+    PN-dbg PN-doc PN-locale-eb-gb removed.
+    """
+    pn = d.getVar('PN', True)
+    blacklist = map(lambda suffix: pn + suffix, ('', '-dbg', '-dev', '-doc', '-locale', '-staticdev'))
+    localepkg = pn + "-locale-"
+    pkgs = []
+
+    for pkg in d.getVar('PACKAGES', True).split():
+        if pkg not in blacklist and localepkg not in pkg:
+            pkgs.append(pkg)
+    return pkgs
+
+def getstatusoutput(cmd):
+    return cmdstatus.getstatusoutput(cmd)
+
+
+def trim_version(version, num_parts=2):
+    """
+    Return just the first <num_parts> of <version>, split by periods.  For
+    example, trim_version("1.2.3", 2) will return "1.2".
+    """
+    if type(version) is not str:
+        raise TypeError("Version should be a string")
+    if num_parts < 1:
+        raise ValueError("Cannot split to parts < 1")
+
+    parts = version.split(".")
+    trimmed = ".".join(parts[:num_parts])
+    return trimmed
+
+def cpu_count():
+    import multiprocessing
+    return multiprocessing.cpu_count()
+
+def execute_pre_post_process(d, cmds):
+    if cmds is None:
+        return
+
+    for cmd in cmds.strip().split(';'):
+        cmd = cmd.strip()
+        if cmd != '':
+            bb.note("Executing %s ..." % cmd)
+            bb.build.exec_func(cmd, d)
+
+def multiprocess_exec(commands, function):
+    import signal
+    import multiprocessing
+
+    if not commands:
+        return []
+
+    def init_worker():
+        signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+    nproc = min(multiprocessing.cpu_count(), len(commands))
+    pool = bb.utils.multiprocessingpool(nproc, init_worker)
+    imap = pool.imap(function, commands)
+
+    try:
+        res = list(imap)
+        pool.close()
+        pool.join()
+        results = []
+        for result in res:
+            if result is not None:
+                results.append(result)
+        return results
+
+    except KeyboardInterrupt:
+        pool.terminate()
+        pool.join()
+        raise
+
+def squashspaces(string):
+    import re
+    return re.sub("\s+", " ", string).strip()
+
+#
+# Python 2.7 doesn't have threaded pools (just multiprocessing)
+# so implement a version here
+#
+
+from Queue import Queue
+from threading import Thread
+
+class ThreadedWorker(Thread):
+    """Thread executing tasks from a given tasks queue"""
+    def __init__(self, tasks, worker_init, worker_end):
+        Thread.__init__(self)
+        self.tasks = tasks
+        self.daemon = True
+
+        self.worker_init = worker_init
+        self.worker_end = worker_end
+
+    def run(self):
+        from Queue import Empty
+
+        if self.worker_init is not None:
+            self.worker_init(self)
+
+        while True:
+            try:
+                func, args, kargs = self.tasks.get(block=False)
+            except Empty:
+                if self.worker_end is not None:
+                    self.worker_end(self)
+                break
+
+            try:
+                func(self, *args, **kargs)
+            except Exception, e:
+                print e
+            finally:
+                self.tasks.task_done()
+
+class ThreadedPool:
+    """Pool of threads consuming tasks from a queue"""
+    def __init__(self, num_workers, num_tasks, worker_init=None,
+            worker_end=None):
+        self.tasks = Queue(num_tasks)
+        self.workers = []
+
+        for _ in range(num_workers):
+            worker = ThreadedWorker(self.tasks, worker_init, worker_end)
+            self.workers.append(worker)
+
+    def start(self):
+        for worker in self.workers:
+            worker.start()
+
+    def add_task(self, func, *args, **kargs):
+        """Add a task to the queue"""
+        self.tasks.put((func, args, kargs))
+
+    def wait_completion(self):
+        """Wait for completion of all the tasks in the queue"""
+        self.tasks.join()
+        for worker in self.workers:
+            worker.join()