Yocto 2.3

Move OpenBMC to Yocto 2.3(pyro).

Tested: Built and verified Witherspoon and Palmetto images
Change-Id: I50744030e771f4850afc2a93a10d3507e76d36bc
Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
Resolves: openbmc/openbmc#2461
diff --git a/import-layers/yocto-poky/scripts/buildhistory-collect-srcrevs b/import-layers/yocto-poky/scripts/buildhistory-collect-srcrevs
index 8a03580..d375b04 100755
--- a/import-layers/yocto-poky/scripts/buildhistory-collect-srcrevs
+++ b/import-layers/yocto-poky/scripts/buildhistory-collect-srcrevs
@@ -101,7 +101,7 @@
                 for name, value in srcrevs.items():
                     orig = orig_srcrevs.get(name, orig_srcrev)
                     if options.reportall or value != orig:
-                        all_srcrevs[curdir].append((pn, name, srcrev))
+                        all_srcrevs[curdir].append((pn, name, value))
 
     for curdir, srcrevs in sorted(all_srcrevs.items()):
         if srcrevs:
diff --git a/import-layers/yocto-poky/scripts/buildhistory-diff b/import-layers/yocto-poky/scripts/buildhistory-diff
index d8ca12d..dd9745e 100755
--- a/import-layers/yocto-poky/scripts/buildhistory-diff
+++ b/import-layers/yocto-poky/scripts/buildhistory-diff
@@ -33,6 +33,12 @@
     parser.add_option("-a", "--report-all",
             help = "Report all changes, not just the default significant ones",
             action="store_true", dest="report_all", default=False)
+    parser.add_option("-s", "--signatures",
+            help = "Report list of signatures differing instead of output",
+            action="store_true", dest="sigs", default=False)
+    parser.add_option("-S", "--signatures-with-diff",
+            help = "Report on actual signature differences instead of output (requires signature data to have been generated, either by running the actual tasks or using bitbake -S)",
+            action="store_true", dest="sigsdiff", default=False)
 
     options, args = parser.parse_args(sys.argv)
 
@@ -46,6 +52,11 @@
         sys.exit(1)
 
     if not os.path.exists(options.buildhistory_dir):
+        if options.buildhistory_dir == 'buildhistory/':
+            cwd = os.getcwd()
+            if os.path.basename(cwd) == 'buildhistory':
+                options.buildhistory_dir = cwd
+    if not os.path.exists(options.buildhistory_dir):
         sys.stderr.write('Buildhistory directory "%s" does not exist\n\n' % options.buildhistory_dir)
         parser.print_help()
         sys.exit(1)
@@ -81,7 +92,7 @@
 
     import gitdb
     try:
-        changes = oe.buildhistory_analysis.process_changes(options.buildhistory_dir, fromrev, torev, options.report_all, options.report_ver)
+        changes = oe.buildhistory_analysis.process_changes(options.buildhistory_dir, fromrev, torev, options.report_all, options.report_ver, options.sigs, options.sigsdiff)
     except gitdb.exc.BadObject as e:
         if len(args) == 1:
             sys.stderr.write("Unable to find previous build revision in buildhistory repository\n\n")
diff --git a/import-layers/yocto-poky/scripts/buildstats-diff b/import-layers/yocto-poky/scripts/buildstats-diff
index f918a6d..adeba44 100755
--- a/import-layers/yocto-poky/scripts/buildstats-diff
+++ b/import-layers/yocto-poky/scripts/buildstats-diff
@@ -22,7 +22,6 @@
 import re
 import sys
 from collections import namedtuple
-from datetime import datetime, timedelta, tzinfo
 from operator import attrgetter
 
 # Setup logging
@@ -35,38 +34,11 @@
     pass
 
 
-class TimeZone(tzinfo):
-    """Simple fixed-offset tzinfo"""
-    def __init__(self, seconds, name):
-        self._offset = timedelta(seconds=seconds)
-        self._name = name
-
-    def utcoffset(self, dt):
-        return self._offset
-
-    def tzname(self, dt):
-        return self._name
-
-    def dst(self, dt):
-        return None
-
-TIMEZONES = {'UTC': TimeZone(0, 'UTC'),
-             'EET': TimeZone(7200, 'EET'),
-             'EEST': TimeZone(10800, 'EEST')}
-
 taskdiff_fields = ('pkg', 'pkg_op', 'task', 'task_op', 'value1', 'value2',
                    'absdiff', 'reldiff')
 TaskDiff = namedtuple('TaskDiff', ' '.join(taskdiff_fields))
 
 
-def to_datetime_obj(obj):
-    """Helper for getting timestamps in datetime format"""
-    if isinstance(obj, datetime):
-        return obj
-    else:
-        return datetime.utcfromtimestamp(obj).replace(tzinfo=TIMEZONES['UTC'])
-
-
 class BSTask(dict):
     def __init__(self, *args, **kwargs):
         self['start_time'] = None
@@ -86,7 +58,7 @@
     @property
     def walltime(self):
         """Elapsed wall clock time"""
-        return self['elapsed_time'].total_seconds()
+        return self['elapsed_time']
 
     @property
     def read_bytes(self):
@@ -118,10 +90,10 @@
             key, val = line.split(':', 1)
             val = val.strip()
             if key == 'Started':
-                start_time = to_datetime_obj(float(val))
+                start_time = float(val)
                 bs_task['start_time'] = start_time
             elif key == 'Ended':
-                end_time = to_datetime_obj(float(val))
+                end_time = float(val)
             elif key.startswith('IO '):
                 split = key.split()
                 bs_task['iostat'][split[1]] = int(val)
@@ -329,7 +301,7 @@
         print("\nEPOCH CHANGED:")
         print("--------------")
         for pkg in sorted(echanged):
-            field1 = "{} -> {}".format(pkg, bs1[pkg]['epoch'], bs2[pkg]['epoch'])
+            field1 = "{} -> {}".format(bs1[pkg]['epoch'], bs2[pkg]['epoch'])
             field2 = "{} -> {}".format(bs1[pkg]['nevr'], bs2[pkg]['nevr'])
             print(fmt_str.format(pkg, field1, field2, maxlen=maxlen))
 
diff --git a/import-layers/yocto-poky/scripts/cleanup-workdir b/import-layers/yocto-poky/scripts/cleanup-workdir
deleted file mode 100755
index 98769f6..0000000
--- a/import-layers/yocto-poky/scripts/cleanup-workdir
+++ /dev/null
@@ -1,198 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) 2012 Wind River Systems, Inc.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-import os
-import sys
-import optparse
-import re
-import subprocess
-import shutil
-
-pkg_cur_dirs = {}
-obsolete_dirs = []
-parser = None
-
-def err_quit(msg):
-    print(msg)
-    parser.print_usage()
-    sys.exit(1)
-
-def parse_version(verstr):
-    elems = verstr.split(':')
-    epoch = elems[0]
-    if len(epoch) == 0:
-        return elems[1]
-    else:
-        return epoch + '_' + elems[1]
-
-def run_command(cmd):
-    pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
-    output = pipe.communicate()[0]
-    if pipe.returncode != 0:
-        print("Execute command '%s' failed." % cmd)
-        sys.exit(1)
-    return output.decode('utf-8')
-
-def get_cur_arch_dirs(workdir, arch_dirs):
-    pattern = workdir + '/(.*?)/'
-
-    cmd = "bitbake -e | grep ^SDK_SYS="
-    output = run_command(cmd)
-    sdk_sys = output.split('"')[1]
-
-    # select thest 5 packages to get the dirs of current arch
-    pkgs = ['hicolor-icon-theme', 'base-files', 'acl-native', 'binutils-crosssdk-' + sdk_sys, 'nativesdk-autoconf']
-
-    for pkg in pkgs:
-        cmd = "bitbake -e " + pkg + " | grep ^IMAGE_ROOTFS="
-        output = run_command(cmd)
-        output = output.split('"')[1]
-        m = re.match(pattern, output)
-        arch_dirs.append(m.group(1))
-
-def main():
-    global parser
-    parser = optparse.OptionParser(
-        usage = """%prog
-
-%prog removes the obsolete packages' build directories in WORKDIR.
-This script must be ran under BUILDDIR after source file \"oe-init-build-env\".
-
-Any file or directory under WORKDIR which is not created by Yocto
-will be deleted. Be CAUTIOUS.""")
-
-    options, args = parser.parse_args(sys.argv)
-
-    builddir = run_command('echo $BUILDDIR').strip()
-    if len(builddir) == 0:
-        err_quit("Please source file \"oe-init-build-env\" first.\n")
-
-    if os.getcwd() != builddir:
-        err_quit("Please run %s under: %s\n" % (os.path.basename(args[0]), builddir))
-
-    print('Updating bitbake caches...')
-    cmd = "bitbake -s"
-    output = run_command(cmd)
-
-    output = output.split('\n')
-    index = 0
-    while len(output[index]) > 0:
-        index += 1
-    alllines = output[index+1:]
-
-    for line in alllines:
-        # empty again means end of the versions output
-        if len(line) == 0:
-            break
-        line = line.strip()
-        line = re.sub('\s+', ' ', line)
-        elems = line.split(' ')
-        if len(elems) == 2:
-            version = parse_version(elems[1])
-        else:
-            version = parse_version(elems[2])
-        pkg_cur_dirs[elems[0]] = version
-
-    cmd = "bitbake -e"
-    output = run_command(cmd)
-
-    tmpdir = None
-    image_rootfs = None
-    output = output.split('\n')
-    for line in output:
-        if tmpdir and image_rootfs:
-            break
-
-        if not tmpdir:
-            m = re.match('TMPDIR="(.*)"', line)
-            if m:
-                tmpdir = m.group(1)
-
-        if not image_rootfs:
-            m = re.match('IMAGE_ROOTFS="(.*)"', line)
-            if m:
-                image_rootfs = m.group(1)
-
-    # won't fail just in case
-    if not tmpdir or not image_rootfs:
-        print("Can't get TMPDIR or IMAGE_ROOTFS.")
-        return 1
-
-    pattern = tmpdir + '/(.*?)/(.*?)/'
-    m = re.match(pattern, image_rootfs)
-    if not m:
-        print("Can't get WORKDIR.")
-        return 1
-
-    workdir = os.path.join(tmpdir, m.group(1))
-
-    # we only deal the dirs of current arch, total numbers of dirs are 6
-    cur_arch_dirs = [m.group(2)]
-    get_cur_arch_dirs(workdir, cur_arch_dirs)
-
-    for workroot, dirs, files in os.walk(workdir):
-        # For the files, they should NOT exist in WORKDIR. Remove them.
-        for f in files:
-            obsolete_dirs.append(os.path.join(workroot, f))
-
-        for d in dirs:
-            if d not in cur_arch_dirs:
-                continue
-
-            for pkgroot, pkgdirs, filenames in os.walk(os.path.join(workroot, d)):
-                for f in filenames:
-                    obsolete_dirs.append(os.path.join(pkgroot, f))
-
-                for pkgdir in sorted(pkgdirs):
-                    if pkgdir not in pkg_cur_dirs:
-                        obsolete_dirs.append(os.path.join(pkgroot, pkgdir))
-                    else:
-                        for verroot, verdirs, verfiles in os.walk(os.path.join(pkgroot, pkgdir)):
-                            for f in verfiles:
-                                obsolete_dirs.append(os.path.join(pkgroot, f))
-                            for v in sorted(verdirs):
-                               if v not in pkg_cur_dirs[pkgdir]:
-                                   obsolete_dirs.append(os.path.join(pkgroot, pkgdir, v))
-                            break
-
-                # just process the top dir of every package under tmp/work/*/,
-                # then jump out of the above os.walk()
-                break
-
-        # it is convenient to use os.walk() to get dirs and files at same time
-        # both of them have been dealed in the loop, so jump out
-        break
-
-    for d in obsolete_dirs:
-        print("Deleting %s" % d)
-        shutil.rmtree(d, True)
-
-    if len(obsolete_dirs):
-        print('\nTotal %d items.' % len(obsolete_dirs))
-    else:
-        print('\nNo obsolete directory found under %s.' % workdir)
-
-    return 0
-
-if __name__ == '__main__':
-    try:
-        ret = main()
-    except Exception:
-        ret = 2
-        import traceback
-        traceback.print_exc()
-    sys.exit(ret)
diff --git a/import-layers/yocto-poky/scripts/combo-layer b/import-layers/yocto-poky/scripts/combo-layer
index b90bfc8..d04d88b 100755
--- a/import-layers/yocto-poky/scripts/combo-layer
+++ b/import-layers/yocto-poky/scripts/combo-layer
@@ -294,6 +294,8 @@
                     # again. Uses the list of files created by tar (easier
                     # than walking the tree).
                     for file in files.split('\n'):
+                        if file.endswith(os.path.sep):
+                            continue
                         for pattern in exclude_patterns:
                             if fnmatch.fnmatch(file, pattern):
                                 os.unlink(os.path.join(*([extract_dir] + ['..'] * subdir_components + [file])))
@@ -329,7 +331,7 @@
                         # one. The commit should be in both repos with
                         # the same tree, but better check here.
                         tree = runcmd('git show -s --pretty=format:%%T %s' % rev).strip()
-                        with tempfile.NamedTemporaryFile() as editor:
+                        with tempfile.NamedTemporaryFile(mode='wt') as editor:
                             editor.write('''cat >$1 <<EOF
 tree %s
 author %s
@@ -353,7 +355,7 @@
                 # Optional: rewrite history to change commit messages or to move files.
                 if 'hook' in repo or dest_dir != ".":
                     filter_branch = ['git', 'filter-branch', '--force']
-                    with tempfile.NamedTemporaryFile() as hookwrapper:
+                    with tempfile.NamedTemporaryFile(mode='wt') as hookwrapper:
                         if 'hook' in repo:
                             # Create a shell script wrapper around the original hook that
                             # can be used by git filter-branch. Hook may or may not have
@@ -426,7 +428,7 @@
                 merge.append(name)
                 # Root all commits which have no parent in the common
                 # ancestor in the new repository.
-                for start in runcmd('git log --pretty=format:%%H --max-parents=0 %s' % name).split('\n'):
+                for start in runcmd('git log --pretty=format:%%H --max-parents=0 %s --' % name).split('\n'):
                     runcmd('git replace --graft %s %s' % (start, startrev))
             try:
                 runcmd(merge)
@@ -1137,7 +1139,7 @@
             if hook:
                 # Need to turn the verbatim commit message into something resembling a patch header
                 # for the hook.
-                with tempfile.NamedTemporaryFile(delete=False) as patch:
+                with tempfile.NamedTemporaryFile(mode='wt', delete=False) as patch:
                     patch.write('Subject: [PATCH] ')
                     patch.write(body)
                     patch.write('\n---\n')
diff --git a/import-layers/yocto-poky/scripts/contrib/bb-perf/buildstats-plot.sh b/import-layers/yocto-poky/scripts/contrib/bb-perf/buildstats-plot.sh
new file mode 100755
index 0000000..7e8ae04
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/contrib/bb-perf/buildstats-plot.sh
@@ -0,0 +1,157 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2011, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# DESCRIPTION
+#
+# Produces script data to be consumed by gnuplot. There are two possible plots
+# depending if either the -S parameter is present or not:
+#
+#     * without -S: Produces a histogram listing top N recipes/tasks versus
+#       stats. The first stat defined in the -s parameter is the one taken
+#       into account for ranking
+#     * -S: Produces a histogram listing tasks versus stats.  In this case,
+#       the value of each stat is the sum for that particular stat in all recipes found.
+#       Stats values  are in descending order defined by the first stat defined on -s
+#
+# EXAMPLES
+#
+# 1. Top recipes' tasks taking into account utime
+#
+#     $ buildstats-plot.sh -s utime | gnuplot -p
+#
+# 2. Tasks versus utime:stime
+#
+#     $ buildstats-plot.sh -s utime:stime -S | gnuplot -p
+#
+# 3. Tasks versus IO write_bytes:IO read_bytes
+#
+#     $ buildstats-plot.sh -s 'IO write_bytes:IO read_bytes' -S | gnuplot -p
+#
+# AUTHORS
+# Leonardo Sandoval <leonardo.sandoval.gonzalez@linux.intel.com>
+#
+
+set -o nounset
+set -o errexit
+
+BS_DIR="tmp/buildstats"
+N=10
+STATS="utime"
+SUM=""
+OUTDATA_FILE="$PWD/buildstats-plot.out"
+
+function usage {
+    CMD=$(basename $0)
+    cat <<EOM
+Usage: $CMD [-b buildstats_dir] [-t do_task]
+  -b buildstats The path where the folder resides
+                (default: "$BS_DIR")
+  -n N          Top N recipes to display. Ignored if -S is present
+                (default: "$N")
+  -s stats      The stats to be matched. If more that one stat, units
+                should be the same because data is plot as histogram.
+                (see buildstats.sh -h for all options) or any other defined
+                (build)stat separated by colons, i.e. stime:utime
+                (default: "$STATS")
+  -S            Sum values for a particular stat for found recipes
+  -o            Output data file.
+                (default: "$OUTDATA_FILE")
+  -h            Display this help message
+EOM
+}
+
+# Parse and validate arguments
+while getopts "b:n:s:o:Sh" OPT; do
+	case $OPT in
+	b)
+		BS_DIR="$OPTARG"
+		;;
+	n)
+		N="$OPTARG"
+		;;
+	s)
+	        STATS="$OPTARG"
+	        ;;
+	S)
+	        SUM="y"
+	        ;;
+	o)
+	        OUTDATA_FILE="$OPTARG"
+	        ;;
+	h)
+		usage
+		exit 0
+		;;
+	*)
+		usage
+		exit 1
+		;;
+	esac
+done
+
+# Get number of stats
+IFS=':'; statsarray=(${STATS}); unset IFS
+nstats=${#statsarray[@]}
+
+# Get script folder, use to run buildstats.sh
+CD=$(dirname $0)
+
+# Parse buildstats recipes to produce a single table
+OUTBUILDSTATS="$PWD/buildstats.log"
+$CD/buildstats.sh -H -s "$STATS" -H > $OUTBUILDSTATS
+
+# Get headers
+HEADERS=$(cat $OUTBUILDSTATS | sed -n -e '1s/ /-/g' -e '1s/:/ /gp')
+
+echo -e "set boxwidth 0.9 relative"
+echo -e "set style data histograms"
+echo -e "set style fill solid 1.0 border lt -1"
+echo -e "set xtics rotate by 45 right"
+
+# Get output data
+if [ -z "$SUM" ]; then
+    cat $OUTBUILDSTATS | sed -e '1d' | sort -k3 -n -r | head -$N > $OUTDATA_FILE
+    # include task at recipe column
+    sed -i -e "1i\
+${HEADERS}" $OUTDATA_FILE
+    echo -e "set title \"Top task/recipes\""
+    echo -e "plot for [COL=3:`expr 3 + ${nstats} - 1`] '${OUTDATA_FILE}' using COL:xtic(stringcolumn(1).' '.stringcolumn(2)) title columnheader(COL)"
+else
+
+    # Construct datatamash sum argument (sum 3 sum 4 ...)
+    declare -a sumargs
+    j=0
+    for i in `seq $nstats`; do
+	sumargs[j]=sum; j=$(( $j + 1 ))
+	sumargs[j]=`expr 3 + $i - 1`;  j=$(( $j + 1 ))
+    done
+
+    # Do the processing with datamash
+    cat $OUTBUILDSTATS | sed -e '1d' | datamash -t ' ' -g1 ${sumargs[*]} | sort -k2 -n -r > $OUTDATA_FILE
+
+    # Include headers into resulted file, so we can include gnuplot xtics
+    HEADERS=$(echo $HEADERS | sed -e 's/recipe//1')
+    sed -i -e "1i\
+${HEADERS}" $OUTDATA_FILE
+
+    # Plot
+    echo -e "set title \"Sum stats values per task for all recipes\""
+    echo -e "plot for [COL=2:`expr 2 + ${nstats} - 1`] '${OUTDATA_FILE}' using COL:xtic(1) title columnheader(COL)"
+fi
+
diff --git a/import-layers/yocto-poky/scripts/contrib/bb-perf/buildstats.sh b/import-layers/yocto-poky/scripts/contrib/bb-perf/buildstats.sh
index 96158a9..8d7e248 100755
--- a/import-layers/yocto-poky/scripts/contrib/bb-perf/buildstats.sh
+++ b/import-layers/yocto-poky/scripts/contrib/bb-perf/buildstats.sh
@@ -18,24 +18,40 @@
 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 #
 # DESCRIPTION
-# Given a 'buildstats' path (created by bitbake when setting
-# USER_CLASSES ?= "buildstats" on local.conf) and task names, outputs
-# '<task> <recipe> <elapsed time>' for all recipes. Elapsed times are in
-# seconds, and task should be given without the 'do_' prefix.
+# Given 'buildstats' data (generate by bitbake when setting
+# USER_CLASSES ?= "buildstats" on local.conf), task names and a stats values
+# (these are the ones preset on the buildstats files), outputs
+# '<task> <recipe> <value_1> <value_2> ... <value_n>'. The units are the ones
+# defined at buildstats, which in turn takes data from /proc/[pid] files
 #
 # Some useful pipelines
 #
-# 1. Tasks with largest elapsed times
-# $ buildstats.sh -b <buildstats> | sort -k3 -n -r | head
+# 1. Tasks with largest stime (Amount of time that this process has been scheduled
+#    in kernel mode) values
+# $ buildstats.sh -b <buildstats> -s stime | sort -k3 -n -r | head
 #
-# 2. Min, max, sum per task (in needs GNU datamash)
-# $ buildstats.sh -b <buildstats> | datamash -t' ' -g1 min 3 max 3 sum 3 | sort -k4 -n -r
+# 2. Min, max, sum utime (Amount  of  time  that  this process has been scheduled
+#    in user mode) per task (in needs GNU datamash)
+# $ buildstats.sh -b <buildstats> -s utime | datamash -t' ' -g1 min 3 max 3 sum 3 | sort -k4 -n -r
 #
 # AUTHORS
 # Leonardo Sandoval <leonardo.sandoval.gonzalez@linux.intel.com>
 #
+
+# Stats, by type
+TIME="utime:stime:cutime:cstime"
+IO="IO wchar:IO write_bytes:IO syscr:IO read_bytes:IO rchar:IO syscw:IO cancelled_write_bytes"
+RUSAGE="rusage ru_utime:rusage ru_stime:rusage ru_maxrss:rusage ru_minflt:rusage ru_majflt:\
+rusage ru_inblock:rusage ru_oublock:rusage ru_nvcsw:rusage ru_nivcsw"
+
+CHILD_RUSAGE="Child rusage ru_utime:Child rusage ru_stime:Child rusage ru_maxrss:Child rusage ru_minflt:\
+Child rusage ru_majflt:Child rusage ru_inblock:Child rusage ru_oublock:Child rusage ru_nvcsw:\
+Child rusage ru_nivcsw"
+
 BS_DIR="tmp/buildstats"
 TASKS="compile:configure:fetch:install:patch:populate_lic:populate_sysroot:unpack"
+STATS="$TIME"
+HEADER="" # No header by default
 
 function usage {
 CMD=$(basename $0)
@@ -45,12 +61,20 @@
                 (default: "$BS_DIR")
   -t tasks      The tasks to be computed
                 (default: "$TASKS")
+  -s stats      The stats to be matched. Options: TIME, IO, RUSAGE, CHILD_RUSAGE
+                or any other defined buildstat separated by colons, i.e. stime:utime
+                (default: "$STATS")
+                Default stat sets:
+                    TIME=$TIME
+                    IO=$IO
+                    RUSAGE=$RUSAGE
+                    CHILD_RUSAGE=$CHILD_RUSAGE
   -h            Display this help message
 EOM
 }
 
 # Parse and validate arguments
-while getopts "b:t:h" OPT; do
+while getopts "b:t:s:Hh" OPT; do
 	case $OPT in
 	b)
 		BS_DIR="$OPTARG"
@@ -58,6 +82,12 @@
 	t)
 		TASKS="$OPTARG"
 		;;
+	s)
+		STATS="$OPTARG"
+		;;
+	H)
+	        HEADER="y"
+	        ;;
 	h)
 		usage
 		exit 0
@@ -76,15 +106,50 @@
 	exit 1
 fi
 
-RECIPE_FIELD=1
-TIME_FIELD=4
+stats=""
+IFS=":"
+for stat in ${STATS}; do
+	case $stat in
+	    TIME)
+		stats="${stats}:${TIME}"
+		;;
+	    IO)
+		stats="${stats}:${IO}"
+		;;
+	    RUSAGE)
+		stats="${stats}:${RUSAGE}"
+		;;
+	    CHILD_RUSAGE)
+		stats="${stats}:${CHILD_RUSAGE}"
+		;;
+	    *)
+		stats="${STATS}"
+	esac
+done
 
-tasks=(${TASKS//:/ })
-for task in "${tasks[@]}"; do
+# remove possible colon at the beginning
+stats="$(echo "$stats" | sed -e 's/^://1')"
+
+# Provide a header if required by the user
+[ -n "$HEADER" ] && { echo "task:recipe:$stats"; }
+
+for task in ${TASKS}; do
     task="do_${task}"
-    for file in $(find ${BS_DIR} -type f -name ${task}); do
-        recipe=$(sed -n -e "/$task/p" ${file} | cut -d ':' -f${RECIPE_FIELD})
-        time=$(sed -n -e "/$task/p" ${file} | cut -d ':' -f${TIME_FIELD} | cut -d ' ' -f2)
-        echo "${task} ${recipe} ${time}"
+    for file in $(find ${BS_DIR} -type f -name ${task} | awk 'BEGIN{ ORS=""; OFS=":" } { print $0,"" }'); do
+        recipe="$(basename $(dirname $file))"
+	times=""
+	for stat in ${stats}; do
+	    [ -z "$stat" ] && { echo "empty stats"; }
+	    time=$(sed -n -e "s/^\($stat\): \\(.*\\)/\\2/p" $file)
+	    # in case the stat is not present, set the value as NA
+	    [ -z "$time" ] && { time="NA"; }
+	    # Append it to times
+	    if [ -z "$times" ]; then
+		times="${time}"
+	    else
+		times="${times} ${time}"
+	    fi
+	done
+        echo "${task} ${recipe} ${times}"
     done
 done
diff --git a/import-layers/yocto-poky/scripts/contrib/build-perf-test-wrapper.sh b/import-layers/yocto-poky/scripts/contrib/build-perf-test-wrapper.sh
index e03ea97..3da3253 100755
--- a/import-layers/yocto-poky/scripts/contrib/build-perf-test-wrapper.sh
+++ b/import-layers/yocto-poky/scripts/contrib/build-perf-test-wrapper.sh
@@ -19,6 +19,7 @@
 # oe-build-perf-test and archives the results.
 
 script=`basename $0`
+script_dir=$(realpath $(dirname $0))
 archive_dir=~/perf-results/archives
 
 usage () {
@@ -29,29 +30,44 @@
   -h                show this help and exit.
   -a ARCHIVE_DIR    archive results tarball here, give an empty string to
                     disable tarball archiving (default: $archive_dir)
-  -c COMMITISH      test (checkout) this commit
+  -c COMMITISH      test (checkout) this commit, <branch>:<commit> can be
+                    specified to test specific commit of certain branch
   -C GIT_REPO       commit results into Git
+  -E EMAIL_ADDR     send email report
+  -P GIT_REMOTE     push results to a remote Git repository
   -w WORK_DIR       work dir for this script
                     (default: GIT_TOP_DIR/build-perf-test)
+  -x                create xml report (instead of json)
 EOF
 }
 
+get_os_release_var () {
+    ( source /etc/os-release; eval echo '$'$1 )
+}
+
 
 # Parse command line arguments
 commitish=""
-while getopts "ha:c:C:w:" opt; do
+oe_build_perf_test_extra_opts=()
+oe_git_archive_extra_opts=()
+while getopts "ha:c:C:E:P:w:x" opt; do
     case $opt in
         h)  usage
             exit 0
             ;;
-        a)  archive_dir=`realpath "$OPTARG"`
+        a)  archive_dir=`realpath -s "$OPTARG"`
             ;;
         c)  commitish=$OPTARG
             ;;
-        C)  results_repo=`realpath "$OPTARG"`
-            commit_results=("--commit-results" "$results_repo")
+        C)  results_repo=`realpath -s "$OPTARG"`
             ;;
-        w)  base_dir=`realpath "$OPTARG"`
+        E)  email_to="$OPTARG"
+            ;;
+        P)  oe_git_archive_extra_opts+=("--push" "$OPTARG")
+            ;;
+        w)  base_dir=`realpath -s "$OPTARG"`
+            ;;
+        x)  oe_build_perf_test_extra_opts+=("--xml")
             ;;
         *)  usage
             exit 1
@@ -67,6 +83,17 @@
     exit 1
 fi
 
+# Open a file descriptor for flock and acquire lock
+LOCK_FILE="/tmp/oe-build-perf-test-wrapper.lock"
+if ! exec 3> "$LOCK_FILE"; then
+    echo "ERROR: Unable to open lock file"
+    exit 1
+fi
+if ! flock -n 3; then
+    echo "ERROR: Another instance of this script is running"
+    exit 1
+fi
+
 echo "Running on `uname -n`"
 if ! git_topdir=$(git rev-parse --show-toplevel); then
         echo "The current working dir doesn't seem to be a git clone. Please cd there before running `basename $0`"
@@ -76,15 +103,33 @@
 cd "$git_topdir"
 
 if [ -n "$commitish" ]; then
-    # Checkout correct revision
-    echo "Checking out $commitish"
+    echo "Running git fetch"
     git fetch &> /dev/null
     git checkout HEAD^0 &> /dev/null
-    git branch -D $commitish &> /dev/null
-    if ! git checkout -f $commitish &> /dev/null; then
-        echo "Git checkout failed"
+
+    # Handle <branch>:<commit> format
+    if echo "$commitish" | grep -q ":"; then
+        commit=`echo "$commitish" | cut -d":" -f2`
+        branch=`echo "$commitish" | cut -d":" -f1`
+    else
+        commit="$commitish"
+        branch="$commitish"
+    fi
+
+    echo "Checking out $commitish"
+    git branch -D $branch &> /dev/null
+    if ! git checkout -f $branch &> /dev/null; then
+        echo "ERROR: Git checkout failed"
         exit 1
     fi
+
+    # Check that the specified branch really contains the commit
+    commit_hash=`git rev-parse --revs-only $commit --`
+    if [ -z "$commit_hash" -o "`git merge-base $branch $commit`" != "$commit_hash" ]; then
+        echo "ERROR: branch $branch does not contain commit $commit"
+        exit 1
+    fi
+    git reset --hard $commit > /dev/null
 fi
 
 # Setup build environment
@@ -119,10 +164,8 @@
 # Run actual test script
 oe-build-perf-test --out-dir "$results_dir" \
                    --globalres-file "$globalres_log" \
-                   --lock-file "$base_dir/oe-build-perf.lock" \
-                   "${commit_results[@]}" \
-                   --commit-results-branch "{tester_host}/{git_branch}/$machine" \
-                   --commit-results-tag "{tester_host}/{git_branch}/$machine/{git_commit_count}-g{git_commit}/{tag_num}"
+                   "${oe_build_perf_test_extra_opts[@]}" \
+                   --lock-file "$base_dir/oe-build-perf.lock"
 
 case $? in
     1)  echo "ERROR: oe-build-perf-test script failed!"
@@ -132,6 +175,29 @@
         ;;
 esac
 
+# Commit results to git
+if [ -n "$results_repo" ]; then
+    echo -e "\nArchiving results in $results_repo"
+    oe-git-archive \
+        --git-dir "$results_repo" \
+        --branch-name "{hostname}/{branch}/{machine}" \
+        --tag-name "{hostname}/{branch}/{machine}/{commit_count}-g{commit}/{tag_number}" \
+        --exclude "buildstats.json" \
+        --notes "buildstats/{branch_name}" "$results_dir/buildstats.json" \
+        "${oe_git_archive_extra_opts[@]}" \
+        "$results_dir"
+
+    # Send email report
+    if [ -n "$email_to" ]; then
+        echo -e "\nEmailing test report"
+        os_name=`get_os_release_var PRETTY_NAME`
+        oe-build-perf-report -r "$results_repo" > report.txt
+        oe-build-perf-report -r "$results_repo" --html > report.html
+        "$script_dir"/oe-build-perf-report-email.py --to "$email_to" --subject "Build Perf Test Report for $os_name" --text report.txt --html report.html "${OE_BUILD_PERF_REPORT_EMAIL_EXTRA_ARGS[@]}"
+    fi
+fi
+
+
 echo -ne "\n\n-----------------\n"
 echo "Global results file:"
 echo -ne "\n"
diff --git a/import-layers/yocto-poky/scripts/contrib/list-packageconfig-flags.py b/import-layers/yocto-poky/scripts/contrib/list-packageconfig-flags.py
index 389fb97..7ce7186 100755
--- a/import-layers/yocto-poky/scripts/contrib/list-packageconfig-flags.py
+++ b/import-layers/yocto-poky/scripts/contrib/list-packageconfig-flags.py
@@ -76,7 +76,7 @@
     for fn in data_dict:
         pkgconfigflags = data_dict[fn].getVarFlags("PACKAGECONFIG")
         pkgconfigflags.pop('doc', None)
-        pkgname = data_dict[fn].getVar("P", True)
+        pkgname = data_dict[fn].getVar("P")
         pkg_dict[pkgname] = sorted(pkgconfigflags.keys())
 
     return pkg_dict
@@ -124,9 +124,9 @@
     ''' Display all pkgs and PACKAGECONFIG information '''
     print(str("").ljust(50, '='))
     for fn in data_dict:
-        print('%s' % data_dict[fn].getVar("P", True))
+        print('%s' % data_dict[fn].getVar("P"))
         print(fn)
-        packageconfig = data_dict[fn].getVar("PACKAGECONFIG", True) or ''
+        packageconfig = data_dict[fn].getVar("PACKAGECONFIG") or ''
         if packageconfig.strip() == '':
             packageconfig = 'None'
         print('PACKAGECONFIG %s' % packageconfig)
diff --git a/import-layers/yocto-poky/scripts/contrib/mkefidisk.sh b/import-layers/yocto-poky/scripts/contrib/mkefidisk.sh
index d8db3c0..ac4ec9c 100755
--- a/import-layers/yocto-poky/scripts/contrib/mkefidisk.sh
+++ b/import-layers/yocto-poky/scripts/contrib/mkefidisk.sh
@@ -20,6 +20,11 @@
 
 LANG=C
 
+echo
+echo "WARNING: This script is deprecated and will be removed soon."
+echo "Please consider using wic EFI images instead."
+echo
+
 # Set to 1 to enable additional output
 DEBUG=0
 OUT="/dev/null"
@@ -379,7 +384,7 @@
 cp $HDDIMG_MNT/vmlinuz $BOOTFS_MNT >$OUT 2>&1 || error "Failed to copy vmlinuz"
 # Copy the efi loader and configs (booti*.efi and grub.cfg if it exists)
 cp -r $HDDIMG_MNT/EFI $BOOTFS_MNT >$OUT 2>&1 || error "Failed to copy EFI dir"
-# Silently ignore a missing gummiboot loader dir (we might just be a GRUB image)
+# Silently ignore a missing systemd-boot loader dir (we might just be a GRUB image)
 cp -r $HDDIMG_MNT/loader $BOOTFS_MNT >$OUT 2>&1
 
 # Update the boot loaders configurations for an installed image
@@ -405,25 +410,25 @@
 	sed -i "s@vmlinuz @vmlinuz root=$TARGET_ROOTFS ro rootwait console=ttyS0 console=tty0 @" $GRUB_CFG
 fi
 
-# Look for a gummiboot installation
-GUMMI_ENTRIES="$BOOTFS_MNT/loader/entries"
-GUMMI_CFG="$GUMMI_ENTRIES/boot.conf"
-if [ -d "$GUMMI_ENTRIES" ]; then
-	info "Configuring Gummiboot"
+# Look for a systemd-boot installation
+SYSTEMD_BOOT_ENTRIES="$BOOTFS_MNT/loader/entries"
+SYSTEMD_BOOT_CFG="$SYSTEMD_BOOT_ENTRIES/boot.conf"
+if [ -d "$SYSTEMD_BOOT_ENTRIES" ]; then
+	info "Configuring SystemD-boot"
 	# remove the install target if it exists
-	rm $GUMMI_ENTRIES/install.conf >$OUT 2>&1
+	rm $SYSTEMD_BOOT_ENTRIES/install.conf >$OUT 2>&1
 
-	if [ ! -e "$GUMMI_CFG" ]; then
-		echo "ERROR: $GUMMI_CFG not found"
+	if [ ! -e "$SYSTEMD_BOOT_CFG" ]; then
+		echo "ERROR: $SYSTEMD_BOOT_CFG not found"
 	fi
 
-	sed -i "/initrd /d" $GUMMI_CFG
-	sed -i "s@ root=[^ ]*@ @" $GUMMI_CFG
-	sed -i "s@options *LABEL=boot @options LABEL=Boot root=$TARGET_ROOTFS ro rootwait console=ttyS0 console=tty0 @" $GUMMI_CFG
+	sed -i "/initrd /d" $SYSTEMD_BOOT_CFG
+	sed -i "s@ root=[^ ]*@ @" $SYSTEMD_BOOT_CFG
+	sed -i "s@options *LABEL=boot @options LABEL=Boot root=$TARGET_ROOTFS ro rootwait console=ttyS0 console=tty0 @" $SYSTEMD_BOOT_CFG
 fi
 
 # Ensure we have at least one EFI bootloader configured
-if [ ! -e $GRUB_CFG ] && [ ! -e $GUMMI_CFG ]; then
+if [ ! -e $GRUB_CFG ] && [ ! -e $SYSTEMD_BOOT_CFG ]; then
 	die "No EFI bootloader configuration found"
 fi
 
@@ -439,7 +444,7 @@
 fi
 
 # Add startup.nsh script for automated boot
-echo "fs0:\EFI\BOOT\bootx64.efi" > $BOOTFS_MNT/startup.nsh
+printf "fs0:\%s\BOOT\%s\n" "EFI" "bootx64.efi" > $BOOTFS_MNT/startup.nsh
 
 
 # Call cleanup to unmount devices and images and remove the TMPDIR
diff --git a/import-layers/yocto-poky/scripts/contrib/oe-build-perf-report-email.py b/import-layers/yocto-poky/scripts/contrib/oe-build-perf-report-email.py
new file mode 100755
index 0000000..261ca51
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/contrib/oe-build-perf-report-email.py
@@ -0,0 +1,269 @@
+#!/usr/bin/python3
+#
+# Send build performance test report emails
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+import argparse
+import base64
+import logging
+import os
+import pwd
+import re
+import shutil
+import smtplib
+import socket
+import subprocess
+import sys
+import tempfile
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+
+
+# Setup logging
+logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
+log = logging.getLogger('oe-build-perf-report')
+
+
+# Find js scaper script
+SCRAPE_JS = os.path.join(os.path.dirname(__file__), '..', 'lib', 'build_perf',
+                         'scrape-html-report.js')
+if not os.path.isfile(SCRAPE_JS):
+    log.error("Unableto find oe-build-perf-report-scrape.js")
+    sys.exit(1)
+
+
+class ReportError(Exception):
+    """Local errors"""
+    pass
+
+
+def check_utils():
+    """Check that all needed utils are installed in the system"""
+    missing = []
+    for cmd in ('phantomjs', 'optipng'):
+        if not shutil.which(cmd):
+            missing.append(cmd)
+    if missing:
+        log.error("The following tools are missing: %s", ' '.join(missing))
+        sys.exit(1)
+
+
+def parse_args(argv):
+    """Parse command line arguments"""
+    description = """Email build perf test report"""
+    parser = argparse.ArgumentParser(
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+        description=description)
+
+    parser.add_argument('--debug', '-d', action='store_true',
+                        help="Verbose logging")
+    parser.add_argument('--quiet', '-q', action='store_true',
+                        help="Only print errors")
+    parser.add_argument('--to', action='append',
+                        help="Recipients of the email")
+    parser.add_argument('--subject', default="Yocto build perf test report",
+                        help="Email subject")
+    parser.add_argument('--outdir', '-o',
+                        help="Store files in OUTDIR. Can be used to preserve "
+                             "the email parts")
+    parser.add_argument('--text',
+                        help="Plain text message")
+    parser.add_argument('--html',
+                        help="HTML peport generated by oe-build-perf-report")
+    parser.add_argument('--phantomjs-args', action='append',
+                        help="Extra command line arguments passed to PhantomJS")
+
+    args = parser.parse_args(argv)
+
+    if not args.html and not args.text:
+        parser.error("Please specify --html and/or --text")
+
+    return args
+
+
+def decode_png(infile, outfile):
+    """Parse/decode/optimize png data from a html element"""
+    with open(infile) as f:
+        raw_data = f.read()
+
+    # Grab raw base64 data
+    b64_data = re.sub('^.*href="data:image/png;base64,', '', raw_data, 1)
+    b64_data = re.sub('">.+$', '', b64_data, 1)
+
+    # Replace file with proper decoded png
+    with open(outfile, 'wb') as f:
+        f.write(base64.b64decode(b64_data))
+
+    subprocess.check_output(['optipng', outfile], stderr=subprocess.STDOUT)
+
+
+def encode_png(pngfile):
+    """Encode png into a <img> html element"""
+    with open(pngfile, 'rb') as f:
+        data = f.read()
+
+    b64_data = base64.b64encode(data)
+    return '<img src="data:image/png;base64,' + b64_data.decode('utf-8') + '">\n'
+
+
+def mangle_html_report(infile, outfile, pngs):
+    """Mangle html file into a email compatible format"""
+    paste = True
+    png_dir = os.path.dirname(outfile)
+    with open(infile) as f_in:
+        with open(outfile, 'w') as f_out:
+            for line in f_in.readlines():
+                stripped = line.strip()
+                # Strip out scripts
+                if stripped == '<!--START-OF-SCRIPTS-->':
+                    paste = False
+                elif stripped == '<!--END-OF-SCRIPTS-->':
+                    paste = True
+                elif paste:
+                    if re.match('^.+href="data:image/png;base64', stripped):
+                        # Strip out encoded pngs (as they're huge in size)
+                        continue
+                    elif 'www.gstatic.com' in stripped:
+                        # HACK: drop references to external static pages
+                        continue
+
+                    # Replace charts with <img> elements
+                    match = re.match('<div id="(?P<id>\w+)"', stripped)
+                    if match and match.group('id') in pngs:
+                        #f_out.write('<img src="{}">\n'.format(match.group('id') + '.png'))
+                        png_file = os.path.join(png_dir, match.group('id') + '.png')
+                        f_out.write(encode_png(png_file))
+                    else:
+                        f_out.write(line)
+
+
+def scrape_html_report(report, outdir, phantomjs_extra_args=None):
+    """Scrape html report into a format sendable by email"""
+    tmpdir = tempfile.mkdtemp(dir='.')
+    log.debug("Using tmpdir %s for phantomjs output", tmpdir)
+
+    if not os.path.isdir(outdir):
+        os.mkdir(outdir)
+    if os.path.splitext(report)[1] not in ('.html', '.htm'):
+        raise ReportError("Invalid file extension for report, needs to be "
+                          "'.html' or '.htm'")
+
+    try:
+        log.info("Scraping HTML report with PhangomJS")
+        extra_args = phantomjs_extra_args if phantomjs_extra_args else []
+        subprocess.check_output(['phantomjs', '--debug=true'] + extra_args +
+                                [SCRAPE_JS, report, tmpdir],
+                                stderr=subprocess.STDOUT)
+
+        pngs = []
+        attachments = []
+        for fname in os.listdir(tmpdir):
+            base, ext = os.path.splitext(fname)
+            if ext == '.png':
+                log.debug("Decoding %s", fname)
+                decode_png(os.path.join(tmpdir, fname),
+                           os.path.join(outdir, fname))
+                pngs.append(base)
+                attachments.append(fname)
+            elif ext in ('.html', '.htm'):
+                report_file = fname
+            else:
+                log.warning("Unknown file extension: '%s'", ext)
+                #shutil.move(os.path.join(tmpdir, fname), outdir)
+
+        log.debug("Mangling html report file %s", report_file)
+        mangle_html_report(os.path.join(tmpdir, report_file),
+                           os.path.join(outdir, report_file), pngs)
+        return report_file, attachments
+    finally:
+        shutil.rmtree(tmpdir)
+
+def send_email(text_fn, html_fn, subject, recipients):
+    """Send email"""
+    # Generate email message
+    text_msg = html_msg = None
+    if text_fn:
+        with open(text_fn) as f:
+            text_msg = MIMEText("Yocto build performance test report.\n" +
+                                f.read(), 'plain')
+    if html_fn:
+        with open(html_fn) as f:
+            html_msg = MIMEText(f.read(), 'html')
+
+    if text_msg and html_msg:
+        msg = MIMEMultipart('alternative')
+        msg.attach(text_msg)
+        msg.attach(html_msg)
+    elif text_msg:
+        msg = text_msg
+    elif html_msg:
+        msg = html_msg
+    else:
+        raise ReportError("Neither plain text nor html body specified")
+
+    pw_data = pwd.getpwuid(os.getuid())
+    full_name = pw_data.pw_gecos.split(',')[0]
+    email = os.environ.get('EMAIL',
+                           '{}@{}'.format(pw_data.pw_name, socket.getfqdn()))
+    msg['From'] = "{} <{}>".format(full_name, email)
+    msg['To'] = ', '.join(recipients)
+    msg['Subject'] = subject
+
+    # Send email
+    with smtplib.SMTP('localhost') as smtp:
+        smtp.send_message(msg)
+
+
+def main(argv=None):
+    """Script entry point"""
+    args = parse_args(argv)
+    if args.quiet:
+        log.setLevel(logging.ERROR)
+    if args.debug:
+        log.setLevel(logging.DEBUG)
+
+    check_utils()
+
+    if args.outdir:
+        outdir = args.outdir
+        if not os.path.exists(outdir):
+            os.mkdir(outdir)
+    else:
+        outdir = tempfile.mkdtemp(dir='.')
+
+    try:
+        log.debug("Storing email parts in %s", outdir)
+        html_report = None
+        if args.html:
+            scrape_html_report(args.html, outdir, args.phantomjs_args)
+            html_report = os.path.join(outdir, os.path.basename(args.html))
+
+        if args.to:
+            log.info("Sending email to %s", ', '.join(args.to))
+            send_email(args.text, html_report, args.subject, args.to)
+    except subprocess.CalledProcessError as err:
+        log.error("%s, with output:\n%s", str(err), err.output.decode())
+        return 1
+    except ReportError as err:
+        log.error(err)
+        return 1
+    finally:
+        if not args.outdir:
+            log.debug("Wiping %s", outdir)
+            shutil.rmtree(outdir)
+
+    return 0
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-2.7.py b/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-2.7.py
index f2ecf8d..8c3655d 100755
--- a/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-2.7.py
+++ b/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-2.7.py
@@ -9,10 +9,14 @@
 #  * Updated to no longer generate special -dbg package, instead use the
 #    single system -dbg
 #  * Update version with ".1" to indicate this change
+#
+# February 26, 2017 -- Ming Liu <peter.x.liu@external.atlascopco.com>
+#  * Updated to support generating manifest for native python
 
 import os
 import sys
 import time
+import argparse
 
 VERSION = "2.7.2"
 
@@ -21,16 +25,16 @@
 
 class MakefileMaker:
 
-    def __init__( self, outfile ):
+    def __init__( self, outfile, isNative ):
         """initialize"""
         self.packages = {}
         self.targetPrefix = "${libdir}/python%s/" % VERSION[:3]
+        self.isNative = isNative
         self.output = outfile
         self.out( """
 # WARNING: This file is AUTO GENERATED: Manual edits will be lost next time I regenerate the file.
-# Generator: '%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
-# Visit the Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy
-""" % ( sys.argv[0], __version__ ) )
+# Generator: '%s%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
+""" % ( sys.argv[0], ' --native' if isNative else '', __version__ ) )
 
     #
     # helper functions
@@ -66,6 +70,20 @@
         global VERSION
 
         #
+        # generate rprovides line for native
+        #
+
+        if self.isNative:
+            rprovideLine = 'RPROVIDES+="'
+            for name in sorted(self.packages):
+                rprovideLine += "%s-native " % name.replace( '${PN}', 'python' )
+            rprovideLine += '"'
+
+            self.out( rprovideLine )
+            self.out( "" )
+            return
+
+        #
         # generate provides line
         #
 
@@ -147,17 +165,21 @@
         self.doEpilog()
 
 if __name__ == "__main__":
+    parser = argparse.ArgumentParser( description='generate python manifest' )
+    parser.add_argument( '-n', '--native', help='generate manifest for native python', action='store_true' )
+    parser.add_argument( 'outfile', metavar='OUTPUT_FILE', nargs='?', default='', help='Output file (defaults to stdout)' )
+    args = parser.parse_args()
 
-    if len( sys.argv ) > 1:
+    if args.outfile:
         try:
-            os.unlink(sys.argv[1])
+            os.unlink( args.outfile )
         except Exception:
             sys.exc_clear()
-        outfile = open( sys.argv[1], "w" )
+        outfile = open( args.outfile, "w" )
     else:
         outfile = sys.stdout
 
-    m = MakefileMaker( outfile )
+    m = MakefileMaker( outfile, args.native )
 
     # Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies!
     # Parameters: revision, name, description, dependencies, filenames
@@ -171,7 +193,7 @@
     "UserDict.* UserList.* UserString.* " +
     "lib-dynload/binascii.so lib-dynload/_struct.so lib-dynload/time.so " +
     "lib-dynload/xreadlines.so types.* platform.* ${bindir}/python* "  +
-    "_weakrefset.* sysconfig.* _sysconfigdata.* config/Makefile " +
+    "_weakrefset.* sysconfig.* _sysconfigdata.* " +
     "${includedir}/python${PYTHON_MAJMIN}/pyconfig*.h " +
     "${libdir}/python${PYTHON_MAJMIN}/sitecustomize.py ")
 
@@ -185,7 +207,8 @@
     "${base_libdir}/*.a " +
     "${base_libdir}/*.o " +
     "${datadir}/aclocal " +
-    "${datadir}/pkgconfig " )
+    "${datadir}/pkgconfig " +
+    "config/Makefile ")
 
     m.addPackage( "${PN}-2to3", "Python automated Python 2 to 3 code translator", "${PN}-core",
     "${bindir}/2to3 lib2to3" ) # package
@@ -360,7 +383,7 @@
     m.addPackage( "${PN}-terminal", "Python terminal controlling support", "${PN}-core ${PN}-io",
     "pty.* tty.*" )
 
-    m.addPackage( "${PN}-tests", "Python tests", "${PN}-core",
+    m.addPackage( "${PN}-tests", "Python tests", "${PN}-core ${PN}-modules",
     "test" ) # package
 
     m.addPackage( "${PN}-threading", "Python threading & synchronization support", "${PN}-core ${PN}-lang",
diff --git a/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-3.5.py b/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-3.5.py
index 71a71f7..075860c 100755
--- a/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-3.5.py
+++ b/import-layers/yocto-poky/scripts/contrib/python/generate-manifest-3.5.py
@@ -13,9 +13,13 @@
 # 2014 Khem Raj <raj.khem@gmail.com>
 # Added python3 support
 #
+# February 26, 2017 -- Ming Liu <peter.x.liu@external.atlascopco.com>
+# * Updated to support generating manifest for native python3
+
 import os
 import sys
 import time
+import argparse
 
 VERSION = "3.5.0"
 
@@ -24,16 +28,16 @@
 
 class MakefileMaker:
 
-    def __init__( self, outfile ):
+    def __init__( self, outfile, isNative ):
         """initialize"""
         self.packages = {}
         self.targetPrefix = "${libdir}/python%s/" % VERSION[:3]
+        self.isNative = isNative
         self.output = outfile
         self.out( """
 # WARNING: This file is AUTO GENERATED: Manual edits will be lost next time I regenerate the file.
-# Generator: '%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
-# Visit the Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy
-""" % ( sys.argv[0], __version__ ) )
+# Generator: '%s%s' Version %s (C) 2002-2010 Michael 'Mickey' Lauer <mlauer@vanille-media.de>
+""" % ( sys.argv[0], ' --native' if isNative else '', __version__ ) )
 
     #
     # helper functions
@@ -59,16 +63,40 @@
         for filename in filenames:
             if filename[0] != "$":
                 fullFilenames.append( "%s%s" % ( self.targetPrefix, filename ) )
+                fullFilenames.append( "%s%s" % ( self.targetPrefix,
+                                                 self.pycachePath( filename ) ) )
             else:
                 fullFilenames.append( filename )
         self.packages[name] = description, dependencies, fullFilenames
 
+    def pycachePath( self, filename ):
+        dirname = os.path.dirname( filename )
+        basename = os.path.basename( filename )
+        if '.' in basename:
+            return os.path.join( dirname, '__pycache__', basename )
+        else:
+            return os.path.join( dirname, basename, '__pycache__' )
+
     def doBody( self ):
         """generate body of Makefile"""
 
         global VERSION
 
         #
+        # generate rprovides line for native
+        #
+
+        if self.isNative:
+            rprovideLine = 'RPROVIDES+="'
+            for name in sorted(self.packages):
+                rprovideLine += "%s-native " % name.replace( '${PN}', 'python3' )
+            rprovideLine += '"'
+
+            self.out( rprovideLine )
+            self.out( "" )
+            return
+
+        #
         # generate provides line
         #
 
@@ -150,17 +178,21 @@
         self.doEpilog()
 
 if __name__ == "__main__":
+    parser = argparse.ArgumentParser( description='generate python3 manifest' )
+    parser.add_argument( '-n', '--native', help='generate manifest for native python3', action='store_true' )
+    parser.add_argument( 'outfile', metavar='OUTPUT_FILE', nargs='?', default='', help='Output file (defaults to stdout)' )
+    args = parser.parse_args()
 
-    if len( sys.argv ) > 1:
+    if args.outfile:
         try:
-            os.unlink(sys.argv[1])
+            os.unlink( args.outfile )
         except Exception:
             sys.exc_clear()
-        outfile = open( sys.argv[1], "w" )
+        outfile = open( args.outfile, "w" )
     else:
         outfile = sys.stdout
 
-    m = MakefileMaker( outfile )
+    m = MakefileMaker( outfile, args.native )
 
     # Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies!
     # Parameters: revision, name, description, dependencies, filenames
@@ -174,7 +206,7 @@
     "UserDict.* UserList.* UserString.* " +
     "lib-dynload/binascii.*.so lib-dynload/_struct.*.so lib-dynload/time.*.so " +
     "lib-dynload/xreadlines.*.so types.* platform.* ${bindir}/python* "  + 
-    "_weakrefset.* sysconfig.* _sysconfigdata.* config/Makefile " +
+    "_weakrefset.* sysconfig.* _sysconfigdata.* " +
     "${includedir}/python${PYTHON_BINABI}/pyconfig*.h " +
     "${libdir}/python${PYTHON_MAJMIN}/collections " +
     "${libdir}/python${PYTHON_MAJMIN}/_collections_abc.* " +
@@ -191,7 +223,8 @@
     "${base_libdir}/*.a " +
     "${base_libdir}/*.o " +
     "${datadir}/aclocal " +
-    "${datadir}/pkgconfig " )
+    "${datadir}/pkgconfig " +
+    "config/Makefile ")
 
     m.addPackage( "${PN}-2to3", "Python automated Python 2 to 3 code translator", "${PN}-core",
     "lib2to3" ) # package
@@ -266,7 +299,7 @@
     "lib-dynload/fcntl.*.so" )
 
     m.addPackage( "${PN}-html", "Python HTML processing support", "${PN}-core",
-    "formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* HTMLParser.* " )
+    "formatter.* htmlentitydefs.* html htmllib.* markupbase.* sgmllib.* HTMLParser.* " )
 
     m.addPackage( "${PN}-importlib", "Python import implementation library", "${PN}-core ${PN}-lang",
     "importlib imp.*" )
@@ -279,7 +312,7 @@
 
     m.addPackage( "${PN}-io", "Python low-level I/O", "${PN}-core ${PN}-math",
     "lib-dynload/_socket.*.so lib-dynload/_io.*.so lib-dynload/_ssl.*.so lib-dynload/select.*.so lib-dynload/termios.*.so lib-dynload/cStringIO.*.so " +
-    "pipes.* socket.* ssl.* tempfile.* StringIO.* io.* _pyio.*" )
+    "ipaddress.* pipes.* socket.* ssl.* tempfile.* StringIO.* io.* _pyio.*" )
 
     m.addPackage( "${PN}-json", "Python JSON support", "${PN}-core ${PN}-math ${PN}-re",
     "json lib-dynload/_json.*.so" ) # package
@@ -308,18 +341,18 @@
     m.addPackage( "${PN}-multiprocessing", "Python multiprocessing support", "${PN}-core ${PN}-io ${PN}-lang ${PN}-pickle ${PN}-threading ${PN}-ctypes ${PN}-mmap",
     "lib-dynload/_multiprocessing.*.so multiprocessing" ) # package
 
-    m.addPackage( "${PN}-netclient", "Python Internet Protocol clients", "${PN}-core ${PN}-crypt ${PN}-datetime ${PN}-io ${PN}-lang ${PN}-logging ${PN}-mime",
+    m.addPackage( "${PN}-netclient", "Python Internet Protocol clients", "${PN}-argparse ${PN}-core ${PN}-crypt ${PN}-datetime ${PN}-io ${PN}-lang ${PN}-logging ${PN}-mime ${PN}-html",
     "*Cookie*.* " +
-    "base64.* cookielib.* ftplib.* gopherlib.* hmac.* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib  uuid.* rfc822.* mimetools.*" )
+    "base64.* cookielib.* ftplib.* gopherlib.* hmac.* http* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib  uuid.* rfc822.* mimetools.*" )
 
     m.addPackage( "${PN}-netserver", "Python Internet Protocol servers", "${PN}-core ${PN}-netclient ${PN}-shell ${PN}-threading",
-    "cgi.* *HTTPServer.* SocketServer.*" )
+    "cgi.* socketserver.* *HTTPServer.* SocketServer.*" )
 
     m.addPackage( "${PN}-numbers", "Python number APIs", "${PN}-core ${PN}-lang ${PN}-re",
     "decimal.* fractions.* numbers.*" )
 
     m.addPackage( "${PN}-pickle", "Python serialisation/persistence support", "${PN}-core ${PN}-codecs ${PN}-io ${PN}-re",
-    "pickle.* shelve.* lib-dynload/cPickle.*.so pickletools.*" )
+    "_compat_pickle.* pickle.* shelve.* lib-dynload/cPickle.*.so pickletools.*" )
 
     m.addPackage( "${PN}-pkgutil", "Python package extension utility support", "${PN}-core",
     "pkgutil.*")
@@ -378,6 +411,9 @@
     m.addPackage( "${PN}-tkinter", "Python Tcl/Tk bindings", "${PN}-core",
     "lib-dynload/_tkinter.*.so lib-tk tkinter" ) # package
 
+    m.addPackage( "${PN}-typing", "Python typing support", "${PN}-core",
+    "typing.*" )
+
     m.addPackage( "${PN}-unittest", "Python unit testing framework", "${PN}-core ${PN}-stringold ${PN}-lang ${PN}-io ${PN}-difflib ${PN}-pprint ${PN}-shell",
     "unittest/" )
 
@@ -387,7 +423,7 @@
     m.addPackage( "${PN}-xml", "Python basic XML support", "${PN}-core ${PN}-re",
     "lib-dynload/_elementtree.*.so lib-dynload/pyexpat.*.so xml xmllib.*" ) # package
 
-    m.addPackage( "${PN}-xmlrpc", "Python XML-RPC support", "${PN}-core ${PN}-xml ${PN}-netserver ${PN}-lang",
+    m.addPackage( "${PN}-xmlrpc", "Python XML-RPC support", "${PN}-core ${PN}-xml ${PN}-netserver ${PN}-lang ${PN}-pydoc",
     "xmlrpclib.* SimpleXMLRPCServer.* DocXMLRPCServer.* xmlrpc" )
 
     m.addPackage( "${PN}-mailbox", "Python mailbox format support", "${PN}-core ${PN}-mime",
diff --git a/import-layers/yocto-poky/scripts/contrib/verify-homepage.py b/import-layers/yocto-poky/scripts/contrib/verify-homepage.py
index d39dd1d..76f1749 100755
--- a/import-layers/yocto-poky/scripts/contrib/verify-homepage.py
+++ b/import-layers/yocto-poky/scripts/contrib/verify-homepage.py
@@ -44,7 +44,7 @@
             if realfn in checked:
                 continue
             data = bbhandler.parse_recipe_file(realfn)
-            homepage = data.getVar("HOMEPAGE", True)
+            homepage = data.getVar("HOMEPAGE")
             if homepage:
                 try:
                     urllib.request.urlopen(homepage, timeout=5)
diff --git a/import-layers/yocto-poky/scripts/contrib/yocto-bsp-kernel-update.sh b/import-layers/yocto-poky/scripts/contrib/yocto-bsp-kernel-update.sh
new file mode 100755
index 0000000..b3aa705
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/contrib/yocto-bsp-kernel-update.sh
@@ -0,0 +1,60 @@
+#!/bin/sh
+#
+# Copyright (c) 2017, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software;  you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY;  without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+# the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program;  if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+# Description: creates a new set of kernel templates based on version
+#
+
+set -o nounset
+set -o errexit
+
+if [ $# -ne 4 ]; then
+    cat << EOF
+usage: $0 from_mayor from_minor to_mayor to_minor
+EOF
+    exit 1
+else
+    fma=$1 # from mayor
+    fmi=$2 # from minor
+    tma=$3 # to mayor
+    tmi=$4 # to minor
+fi
+
+poky=$(readlink -e $(dirname $(dirname $(dirname $0))))
+arch=$poky/scripts/lib/bsp/substrate/target/arch
+
+
+# copy/rename templates
+for from in $(ls -1 $arch/*/recipes-kernel/linux/linux-yocto*_$fma\.$fmi.bbappend)
+do
+    to=$(echo $from | sed s/$fma\.$fmi/$tma\.$tmi/)
+    cp $from $to
+done
+
+# replace versions string inside new templates
+for bbappend in $(ls -1 $arch/*/recipes-kernel/linux/linux-yocto*_$tma\.$tmi.bbappend)
+do
+    sed -i 1s/$fma\.$fmi/$tma\.$tmi/ $bbappend
+    sed -i \$s/$fma\.$fmi/$tma\.$tmi/ $bbappend
+done
+
+# update the noinstall files
+for noinstall in $(ls -1 $arch/*/recipes-kernel/linux/kernel-list.noinstall)
+do
+    sed -i s/$fma\.$fmi/$tma\.$tmi/g $noinstall;
+done
diff --git a/import-layers/yocto-poky/scripts/create-pull-request b/import-layers/yocto-poky/scripts/create-pull-request
index a88f35a..e82858b 100755
--- a/import-layers/yocto-poky/scripts/create-pull-request
+++ b/import-layers/yocto-poky/scripts/create-pull-request
@@ -168,7 +168,7 @@
 		WEB_URL="http://git.pokylinux.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH"
 		;;
 	*git.openembedded.org*)
-		WEB_URL="http://cgit.openembedded.org/cgit.cgi/$REMOTE_REPO/log/?h=$BRANCH"
+		WEB_URL="http://cgit.openembedded.org/$REMOTE_REPO/log/?h=$BRANCH"
 		;;
 	*github.com*)
 		WEB_URL="https://github.com/$REMOTE_REPO/tree/$BRANCH"
diff --git a/import-layers/yocto-poky/scripts/crosstap b/import-layers/yocto-poky/scripts/crosstap
index 58317cf..39739bb 100755
--- a/import-layers/yocto-poky/scripts/crosstap
+++ b/import-layers/yocto-poky/scripts/crosstap
@@ -104,13 +104,19 @@
   | cut -d '=' -f2 | cut -d '"' -f2)
 STAGING_BINDIR_TOOLPREFIX=$(echo "$BITBAKE_VARS" | grep ^TARGET_PREFIX \
   | cut -d '=' -f2 | cut -d '"' -f2)
-SYSTEMTAP_HOST_INSTALLDIR=$(echo "$BITBAKE_VARS" | grep ^STAGING_DIR_NATIVE \
-  | cut -d '=' -f2 | cut -d '"' -f2)
 TARGET_ARCH=$(echo "$BITBAKE_VARS" | grep ^TRANSLATED_TARGET_ARCH \
   | cut -d '=' -f2 | cut -d '"' -f2)
 TARGET_KERNEL_BUILDDIR=$(echo "$BITBAKE_VARS" | grep ^B= \
   | cut -d '=' -f2 | cut -d '"' -f2)
 
+# Build and populate the recipe-sysroot-native with systemtap-native
+pushd $PWD
+cd $BUILDDIR
+BITBAKE_VARS=`bitbake -e systemtap-native`
+popd
+SYSTEMTAP_HOST_INSTALLDIR=$(echo "$BITBAKE_VARS" | grep ^STAGING_DIR_NATIVE \
+  | cut -d '=' -f2 | cut -d '"' -f2)
+
 systemtap_target_arch "$TARGET_ARCH"
 
 if [ ! -d $TARGET_KERNEL_BUILDDIR ] ||
@@ -125,6 +131,7 @@
     echo -e "\nError: Native (host) systemtap not found."
     echo -e "Did you accidentally build a local non-sdk image? (or forget to"
     echo -e "add 'tools-profile' to EXTRA_IMAGE_FEATURES in your local.conf)?"
+    echo -e "You can also: bitbake -c addto_recipe_sysroot systemtap-native"
     setup_usage
     exit 1
 fi
diff --git a/import-layers/yocto-poky/scripts/devtool b/import-layers/yocto-poky/scripts/devtool
index 0c32c50..c9ad9dd 100755
--- a/import-layers/yocto-poky/scripts/devtool
+++ b/import-layers/yocto-poky/scripts/devtool
@@ -215,6 +215,9 @@
     global config
     global context
 
+    if sys.getfilesystemencoding() != "utf-8":
+        sys.exit("Please use a locale setting which supports utf-8.\nPython can't change the filesystem locale after loading so we need a utf-8 when python starts or things won't work.")
+
     context = Context(fixed_setup=False)
 
     # Default basepath
@@ -288,13 +291,17 @@
     scriptutils.logger_setup_color(logger, global_args.color)
 
     if global_args.bbpath is None:
-        tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
         try:
-            global_args.bbpath = tinfoil.config_data.getVar('BBPATH', True)
-        finally:
-            tinfoil.shutdown()
+            tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
+            try:
+                global_args.bbpath = tinfoil.config_data.getVar('BBPATH')
+            finally:
+                tinfoil.shutdown()
+        except bb.BBHandledException:
+            return 2
 
-    for path in [scripts_path] + global_args.bbpath.split(':'):
+    # Search BBPATH first to allow layers to override plugins in scripts_path
+    for path in global_args.bbpath.split(':') + [scripts_path]:
         pluginpath = os.path.join(path, 'lib', 'devtool')
         scriptutils.load_plugins(logger, plugins, pluginpath)
 
diff --git a/import-layers/yocto-poky/scripts/gen-lockedsig-cache b/import-layers/yocto-poky/scripts/gen-lockedsig-cache
index 49de74e..6765891 100755
--- a/import-layers/yocto-poky/scripts/gen-lockedsig-cache
+++ b/import-layers/yocto-poky/scripts/gen-lockedsig-cache
@@ -62,7 +62,11 @@
         os.remove(dst)
     if (os.stat(src).st_dev == os.stat(destdir).st_dev):
         print('linking')
-        os.link(src, dst)
+        try:
+            os.link(src, dst)
+        except OSError as e:
+            print('hard linking failed, copying')
+            shutil.copyfile(src, dst)
     else:
         print('copying')
         shutil.copyfile(src, dst)
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/kernel.py b/import-layers/yocto-poky/scripts/lib/bsp/kernel.py
index 32cab3b..a3ee325 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/kernel.py
+++ b/import-layers/yocto-poky/scripts/lib/bsp/kernel.py
@@ -331,7 +331,6 @@
     patch list [${machine}-user-patches.scc].
     """
     f = open_user_file(scripts_path, machine, machine+"-user-patches.scc", "w")
-    f.write("mark patching start\n")
     for item in patch_items:
         f.write("patch " + item + "\n")
     f.close()
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/conf/machine/machine.conf b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/conf/machine/machine.conf
index 588367a..624750c 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/conf/machine/machine.conf
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/conf/machine/machine.conf
@@ -8,10 +8,8 @@
 {{ if xserver == "y": }}
 PREFERRED_PROVIDER_virtual/xserver ?= "xserver-xorg"
 XSERVER ?= "xserver-xorg \
-           xf86-input-evdev \
-           xf86-input-mouse \
            xf86-video-fbdev \
-           xf86-input-keyboard"
+           "
 
 MACHINE_EXTRA_RRECOMMENDS = " kernel-modules kernel-devicetree"
 
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/kernel-list.noinstall b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/kernel-list.noinstall
index 0120ed0..20f2059 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/kernel-list.noinstall
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/kernel-list.noinstall
@@ -1,5 +1,5 @@
 {{ if kernel_choice != "custom": }}
-{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.8) kernel? (y/n)" default:"y"}}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.10) kernel? (y/n)" default:"y"}}
 
 {{ if kernel_choice != "custom" and use_default_kernel == "n": }}
-{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.8"}}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.10"}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
index f4fc219..d15a178 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
@@ -28,6 +28,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.1"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend
new file mode 100644
index 0000000..5cc82e8
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend
@@ -0,0 +1,35 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.10": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}}  = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+            file://{{=machine}}-user-config.cfg \
+            file://{{=machine}}-user-patches.scc \
+            file://{{=machine}}-user-features.scc \
+           "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.10"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
index 4ec3e48..070bd87 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
@@ -28,6 +28,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.4"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
index 51cb012..c391322 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
@@ -28,6 +28,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.8"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.1.bbappend
index 1d78865..4e7d7cb 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.1.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.1.bbappend
@@ -27,6 +27,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#LINUX_VERSION = "4.1"
\ No newline at end of file
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.1"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.10.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.10.bbappend
new file mode 100644
index 0000000..eaf4367
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.10.bbappend
@@ -0,0 +1,34 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.10": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}}  = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+            file://{{=machine}}-user-config.cfg \
+            file://{{=machine}}-user-features.scc \
+           "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.10"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.4.bbappend
index 1725bf4..56e8ad3 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.4.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.4.bbappend
@@ -27,6 +27,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.4"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.8.bbappend
index 15d6431..59752a9 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.8.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/arm/recipes-kernel/linux/linux-yocto_4.8.bbappend
@@ -27,6 +27,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.8"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine.cfg b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine.cfg
index 3b168b7..fe5b882 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine.cfg
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/files/machine.cfg
@@ -1,5 +1,8 @@
 # yocto-bsp-filename {{=machine}}.cfg
 CONFIG_X86_32=y
+# Must explicitly disable 64BIT
+# CONFIG_64BIT is not set
+
 CONFIG_MATOM=y
 CONFIG_PRINTK=y
 
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/kernel-list.noinstall b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/kernel-list.noinstall
index 0120ed0..20f2059 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/kernel-list.noinstall
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/kernel-list.noinstall
@@ -1,5 +1,5 @@
 {{ if kernel_choice != "custom": }}
-{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.8) kernel? (y/n)" default:"y"}}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.10) kernel? (y/n)" default:"y"}}
 
 {{ if kernel_choice != "custom" and use_default_kernel == "n": }}
-{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.8"}}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.10"}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
index f4fc219..d15a178 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
@@ -28,6 +28,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.1"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend
new file mode 100644
index 0000000..5cc82e8
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend
@@ -0,0 +1,35 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.10": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}}  = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+            file://{{=machine}}-user-config.cfg \
+            file://{{=machine}}-user-patches.scc \
+            file://{{=machine}}-user-features.scc \
+           "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.10"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
index 4ec3e48..070bd87 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
@@ -28,6 +28,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.4"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
index 51cb012..c391322 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
@@ -28,6 +28,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.8"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.1.bbappend
index 8d7e24e..5ed144b 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.1.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.1.bbappend
@@ -27,6 +27,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.1"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.10.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.10.bbappend
new file mode 100644
index 0000000..0205920
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.10.bbappend
@@ -0,0 +1,34 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.10": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard:standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard:standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}}  = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+            file://{{=machine}}-user-config.cfg \
+            file://{{=machine}}-user-features.scc \
+           "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.10"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.4.bbappend
index 16a24f0..ab644bd 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.4.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.4.bbappend
@@ -27,6 +27,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.4"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.8.bbappend
index 9a97de9..a535aea 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.8.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/i386/recipes-kernel/linux/linux-yocto_4.8.bbappend
@@ -27,6 +27,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.8"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1.bb b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1.bb
index 5fbf594..e534d36 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1.bb
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/layer/recipes-example/example/example-recipe-0.1.bb
@@ -14,7 +14,7 @@
 S = "${WORKDIR}"
 
 do_compile() {
-	     ${CC} helloworld.c -o helloworld
+	     ${CC} ${LDFLAGS} helloworld.c -o helloworld
 }
 
 do_install() {
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/conf/machine/machine.conf b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/conf/machine/machine.conf
index b319d62..37da253 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/conf/machine/machine.conf
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/conf/machine/machine.conf
@@ -27,7 +27,6 @@
 {{ if xserver == "y": }}
 PREFERRED_PROVIDER_virtual/xserver ?= "xserver-xorg"
 XSERVER ?= "xserver-xorg \
-            xf86-input-evdev \
             xf86-video-fbdev"
 
 SERIAL_CONSOLE = "115200 ttyS0"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/kernel-list.noinstall b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/kernel-list.noinstall
index 0120ed0..20f2059 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/kernel-list.noinstall
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/kernel-list.noinstall
@@ -1,5 +1,5 @@
 {{ if kernel_choice != "custom": }}
-{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.8) kernel? (y/n)" default:"y"}}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.10) kernel? (y/n)" default:"y"}}
 
 {{ if kernel_choice != "custom" and use_default_kernel == "n": }}
-{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.8"}}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.10"}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
index f4fc219..d15a178 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
@@ -28,6 +28,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.1"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend
new file mode 100644
index 0000000..5cc82e8
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend
@@ -0,0 +1,35 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.10": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}}  = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+            file://{{=machine}}-user-config.cfg \
+            file://{{=machine}}-user-patches.scc \
+            file://{{=machine}}-user-features.scc \
+           "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.10"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
index 4ec3e48..070bd87 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
@@ -28,6 +28,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.4"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
index 51cb012..c391322 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
@@ -28,6 +28,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.8"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.1.bbappend
index f4efb75..4e7d7cb 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.1.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.1.bbappend
@@ -27,6 +27,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.1"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.10.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.10.bbappend
new file mode 100644
index 0000000..eaf4367
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.10.bbappend
@@ -0,0 +1,34 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.10": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}}  = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+            file://{{=machine}}-user-config.cfg \
+            file://{{=machine}}-user-features.scc \
+           "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.10"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.4.bbappend
index 1725bf4..56e8ad3 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.4.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.4.bbappend
@@ -27,6 +27,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.4"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.8.bbappend
index 15d6431..59752a9 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.8.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips/recipes-kernel/linux/linux-yocto_4.8.bbappend
@@ -27,6 +27,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.8"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/conf/machine/machine.conf b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/conf/machine/machine.conf
index 3afc5e0..a8eea2c 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/conf/machine/machine.conf
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/conf/machine/machine.conf
@@ -27,7 +27,6 @@
 {{ if xserver == "y": }}
 PREFERRED_PROVIDER_virtual/xserver ?= "xserver-xorg"
 XSERVER ?= "xserver-xorg \
-            xf86-input-evdev \
             xf86-video-fbdev"
 
 SERIAL_CONSOLE = "115200 ttyS0"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/kernel-list.noinstall b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/kernel-list.noinstall
index 0120ed0..20f2059 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/kernel-list.noinstall
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/kernel-list.noinstall
@@ -1,5 +1,5 @@
 {{ if kernel_choice != "custom": }}
-{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.8) kernel? (y/n)" default:"y"}}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.10) kernel? (y/n)" default:"y"}}
 
 {{ if kernel_choice != "custom" and use_default_kernel == "n": }}
-{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.8"}}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.10"}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
index f4fc219..d15a178 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
@@ -28,6 +28,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.1"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend
new file mode 100644
index 0000000..5cc82e8
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend
@@ -0,0 +1,35 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.10": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}}  = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+            file://{{=machine}}-user-config.cfg \
+            file://{{=machine}}-user-patches.scc \
+            file://{{=machine}}-user-features.scc \
+           "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.10"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
index 4ec3e48..070bd87 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
@@ -28,6 +28,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.4"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
index 51cb012..c391322 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
@@ -28,6 +28,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.8"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.1.bbappend
index 7d0f7df..802e5f4 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.1.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.1.bbappend
@@ -27,6 +27,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.1"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.10.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.10.bbappend
new file mode 100644
index 0000000..512b597
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.10.bbappend
@@ -0,0 +1,34 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.10": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/edgerouter" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/edgerouter" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}}  = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+            file://{{=machine}}-user-config.cfg \
+            file://{{=machine}}-user-features.scc \
+           "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.10"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.4.bbappend
index 4d89f5c..dbb0fd5 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.4.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.4.bbappend
@@ -27,6 +27,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.4"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.8.bbappend
index 17d1ea6..c2eb40d 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.8.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/mips64/recipes-kernel/linux/linux-yocto_4.8.bbappend
@@ -27,6 +27,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.8"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/conf/machine/machine.conf b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/conf/machine/machine.conf
index 583c5e4..352b972 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/conf/machine/machine.conf
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/conf/machine/machine.conf
@@ -71,7 +71,6 @@
 {{ if xserver == "y": }}
 PREFERRED_PROVIDER_virtual/xserver ?= "xserver-xorg"
 XSERVER ?= "xserver-xorg \
-           xf86-input-evdev \
            xf86-video-fbdev"
 
 PREFERRED_VERSION_u-boot ?= "v2016.01%"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/kernel-list.noinstall b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/kernel-list.noinstall
index 0120ed0..20f2059 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/kernel-list.noinstall
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/kernel-list.noinstall
@@ -1,5 +1,5 @@
 {{ if kernel_choice != "custom": }}
-{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.8) kernel? (y/n)" default:"y"}}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.10) kernel? (y/n)" default:"y"}}
 
 {{ if kernel_choice != "custom" and use_default_kernel == "n": }}
-{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.8"}}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.10"}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
index f4fc219..d15a178 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
@@ -28,6 +28,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.1"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend
new file mode 100644
index 0000000..5cc82e8
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend
@@ -0,0 +1,35 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.10": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}}  = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+            file://{{=machine}}-user-config.cfg \
+            file://{{=machine}}-user-patches.scc \
+            file://{{=machine}}-user-features.scc \
+           "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.10"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
index 4ec3e48..070bd87 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
@@ -28,6 +28,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.4"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
index 51cb012..c391322 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
@@ -28,6 +28,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.8"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.1.bbappend
index f4efb75..4e7d7cb 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.1.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.1.bbappend
@@ -27,6 +27,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.1"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.10.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.10.bbappend
new file mode 100644
index 0000000..eaf4367
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.10.bbappend
@@ -0,0 +1,34 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.10": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}}  = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+            file://{{=machine}}-user-config.cfg \
+            file://{{=machine}}-user-features.scc \
+           "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.10"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.4.bbappend
index 1725bf4..56e8ad3 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.4.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.4.bbappend
@@ -27,6 +27,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.4"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.8.bbappend
index 15d6431..59752a9 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.8.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/powerpc/recipes-kernel/linux/linux-yocto_4.8.bbappend
@@ -27,6 +27,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.8"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/conf/machine/machine.conf b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/conf/machine/machine.conf
index 67e1cbd..9188858 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/conf/machine/machine.conf
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/conf/machine/machine.conf
@@ -54,9 +54,6 @@
 SERIAL_CONSOLE = "115200 ttyS0"
 XSERVER = "xserver-xorg \
            ${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'mesa-driver-swrast', '', d)} \
-           xf86-input-vmmouse \
-           xf86-input-keyboard \
-           xf86-input-evdev \
            xf86-video-vmware"
 
 {{ if qemuarch == "arm": }}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/kernel-list.noinstall b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/kernel-list.noinstall
index 0120ed0..20f2059 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/kernel-list.noinstall
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/kernel-list.noinstall
@@ -1,5 +1,5 @@
 {{ if kernel_choice != "custom": }}
-{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.8) kernel? (y/n)" default:"y"}}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.10) kernel? (y/n)" default:"y"}}
 
 {{ if kernel_choice != "custom" and use_default_kernel == "n": }}
-{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.8"}}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.10"}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
index fe22d3e..81392ce 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
@@ -57,6 +57,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.1"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend
new file mode 100644
index 0000000..29ad17b
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend
@@ -0,0 +1,64 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.10": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "arm": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"arm" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "arm": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"arm" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "powerpc": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "powerpc": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "i386": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "i386": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/common-pc" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "x86_64": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "x86_64": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "mips": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "mips": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "mips64": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"mips64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "mips64": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips64" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}}  = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+            file://{{=machine}}-user-config.cfg \
+            file://{{=machine}}-user-patches.scc \
+            file://{{=machine}}-user-features.scc \
+           "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.10"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
index 828afe5..a73b1aa 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
@@ -57,6 +57,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.4"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
index df65fd0..7d40671 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
@@ -57,6 +57,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.8"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.1.bbappend
index 6895788..a9fd9ec 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.1.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.1.bbappend
@@ -56,6 +56,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.1"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.10.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.10.bbappend
new file mode 100644
index 0000000..5873da4
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.10.bbappend
@@ -0,0 +1,63 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.10": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "arm": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base your new BSP branch on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "arm": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose an existing machine branch to use for this BSP:" default:"standard/arm-versatile-926ejs" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "powerpc": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "powerpc": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"powerpc" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/qemuppc" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "i386": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "i386": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"i386" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "x86_64": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard"  prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "x86_64": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"x86_64" gen:"bsp.kernel.all_branches" branches_base:"standard"  prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "mips": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/mti-malta32" }}
+
+{{ if need_new_kbranch == "n" and qemuarch == "mips64": }}
+{{ input type:"choicelist" name:"existing_kbranch" nameappend:"mips64" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/mti-malta64" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "mips": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"mips" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "y" and qemuarch == "mips64": }}
+{{ input type:"choicelist" name:"new_kbranch" nameappend:"mips64" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}}  = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Would you like SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+            file://{{=machine}}-user-config.cfg \
+            file://{{=machine}}-user-features.scc \
+           "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.10"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.4.bbappend
index 3549de5..cdee773 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.4.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.4.bbappend
@@ -56,6 +56,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.4"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.8.bbappend
index 4176e5a..24c2880 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.8.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/qemu/recipes-kernel/linux/linux-yocto_4.8.bbappend
@@ -56,6 +56,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.8"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/kernel-list.noinstall b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/kernel-list.noinstall
index 0120ed0..20f2059 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/kernel-list.noinstall
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/kernel-list.noinstall
@@ -1,5 +1,5 @@
 {{ if kernel_choice != "custom": }}
-{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.8) kernel? (y/n)" default:"y"}}
+{{ input type:"boolean" name:"use_default_kernel" prio:"10" msg:"Would you like to use the default (4.10) kernel? (y/n)" default:"y"}}
 
 {{ if kernel_choice != "custom" and use_default_kernel == "n": }}
-{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.8"}}
+{{ input type:"choicelist" name:"kernel_choice" gen:"bsp.kernel.kernels" prio:"10" msg:"Please choose the kernel to use in this BSP:" default:"linux-yocto_4.10"}}
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
index f4fc219..d15a178 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.1.bbappend
@@ -28,6 +28,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.1"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend
new file mode 100644
index 0000000..5cc82e8
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.10.bbappend
@@ -0,0 +1,35 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto-tiny_4.10": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard/tiny" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/tiny/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}}  = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-tiny.scc \
+            file://{{=machine}}-user-config.cfg \
+            file://{{=machine}}-user-patches.scc \
+            file://{{=machine}}-user-features.scc \
+           "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.10"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
index 4ec3e48..070bd87 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.4.bbappend
@@ -28,6 +28,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.4"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
index 51cb012..c391322 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto-tiny_4.8.bbappend
@@ -28,6 +28,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto-tiny_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.8"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.1.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.1.bbappend
index f4efb75..4e7d7cb 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.1.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.1.bbappend
@@ -27,6 +27,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.1"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.10.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.10.bbappend
new file mode 100644
index 0000000..eaf4367
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.10.bbappend
@@ -0,0 +1,34 @@
+# yocto-bsp-filename {{ if kernel_choice == "linux-yocto_4.10": }} this
+FILESEXTRAPATHS_prepend := "${THISDIR}/files:"
+
+PR := "${PR}.1"
+
+COMPATIBLE_MACHINE_{{=machine}} = "{{=machine}}"
+
+{{ input type:"boolean" name:"need_new_kbranch" prio:"20" msg:"Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n]" default:"y" }}
+
+{{ if need_new_kbranch == "y": }}
+{{ input type:"choicelist" name:"new_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+{{ input type:"choicelist" name:"existing_kbranch" gen:"bsp.kernel.all_branches" branches_base:"standard" prio:"20" msg:"Please choose a machine branch to base this BSP on:" default:"standard/base" }}
+
+{{ if need_new_kbranch == "n": }}
+KBRANCH_{{=machine}}  = "{{=existing_kbranch}}"
+
+{{ input type:"boolean" name:"smp" prio:"30" msg:"Do you need SMP support? (y/n)" default:"y"}}
+{{ if smp == "y": }}
+KERNEL_FEATURES_append_{{=machine}} += " cfg/smp.scc"
+
+SRC_URI += "file://{{=machine}}-standard.scc \
+            file://{{=machine}}-user-config.cfg \
+            file://{{=machine}}-user-features.scc \
+           "
+
+# replace these SRCREVs with the real commit ids once you've had
+# the appropriate changes committed to the upstream linux-yocto repo
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+#LINUX_VERSION = "4.10"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.4.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.4.bbappend
index 1725bf4..56e8ad3 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.4.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.4.bbappend
@@ -27,6 +27,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.4"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.8.bbappend b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.8.bbappend
index 15d6431..59752a9 100644
--- a/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.8.bbappend
+++ b/import-layers/yocto-poky/scripts/lib/bsp/substrate/target/arch/x86_64/recipes-kernel/linux/linux-yocto_4.8.bbappend
@@ -27,6 +27,8 @@
 
 # replace these SRCREVs with the real commit ids once you've had
 # the appropriate changes committed to the upstream linux-yocto repo
-#SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
-#SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_machine_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
+SRCREV_meta_pn-linux-yocto_{{=machine}} ?= "${AUTOREV}"
 #LINUX_VERSION = "4.8"
+#Remove the following line once AUTOREV is locked to a certain SRCREV
+KERNEL_VERSION_SANITY_SKIP = "1"
diff --git a/import-layers/yocto-poky/scripts/lib/build_perf/__init__.py b/import-layers/yocto-poky/scripts/lib/build_perf/__init__.py
new file mode 100644
index 0000000..1f8b729
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/build_perf/__init__.py
@@ -0,0 +1,31 @@
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+"""Build performance test library functions"""
+
+def print_table(rows, row_fmt=None):
+    """Print data table"""
+    if not rows:
+        return
+    if not row_fmt:
+        row_fmt = ['{:{wid}} '] * len(rows[0])
+
+    # Go through the data to get maximum cell widths
+    num_cols = len(row_fmt)
+    col_widths = [0] * num_cols
+    for row in rows:
+        for i, val in enumerate(row):
+            col_widths[i] = max(col_widths[i], len(str(val)))
+
+    for row in rows:
+        print(*[row_fmt[i].format(col, wid=col_widths[i]) for i, col in enumerate(row)])
+
diff --git a/import-layers/yocto-poky/scripts/lib/build_perf/html.py b/import-layers/yocto-poky/scripts/lib/build_perf/html.py
new file mode 100644
index 0000000..578bb16
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/build_perf/html.py
@@ -0,0 +1,19 @@
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+"""Helper module for HTML reporting"""
+from jinja2 import Environment, PackageLoader
+
+
+env = Environment(loader=PackageLoader('build_perf', 'html'))
+
+template = env.get_template('report.html')
diff --git a/import-layers/yocto-poky/scripts/lib/build_perf/html/measurement_chart.html b/import-layers/yocto-poky/scripts/lib/build_perf/html/measurement_chart.html
new file mode 100644
index 0000000..65f1a22
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/build_perf/html/measurement_chart.html
@@ -0,0 +1,50 @@
+<script type="text/javascript">
+  chartsDrawing += 1;
+  google.charts.setOnLoadCallback(drawChart_{{ chart_elem_id }});
+  function drawChart_{{ chart_elem_id }}() {
+    var data = new google.visualization.DataTable();
+
+    // Chart options
+    var options = {
+      theme : 'material',
+      legend: 'none',
+      hAxis: { format: '', title: 'Commit number',
+               minValue: {{ chart_opts.haxis.min }},
+               maxValue: {{ chart_opts.haxis.max }} },
+      {% if measurement.type == 'time' %}
+      vAxis: { format: 'h:mm:ss' },
+      {% else %}
+      vAxis: { format: '' },
+      {% endif %}
+      pointSize: 5,
+      chartArea: { left: 80, right: 15 },
+    };
+
+    // Define data columns
+    data.addColumn('number', 'Commit');
+    data.addColumn('{{ measurement.value_type.gv_data_type }}',
+                   '{{ measurement.value_type.quantity }}');
+    // Add data rows
+    data.addRows([
+      {% for sample in measurement.samples %}
+        [{{ sample.commit_num }}, {{ sample.mean.gv_value() }}],
+      {% endfor %}
+    ]);
+
+    // Finally, draw the chart
+    chart_div = document.getElementById('{{ chart_elem_id }}');
+    var chart = new google.visualization.LineChart(chart_div);
+    google.visualization.events.addListener(chart, 'ready', function () {
+      //chart_div = document.getElementById('{{ chart_elem_id }}');
+      //chart_div.innerHTML = '<img src="' + chart.getImageURI() + '">';
+      png_div = document.getElementById('{{ chart_elem_id }}_png');
+      png_div.outerHTML = '<a id="{{ chart_elem_id }}_png" href="' + chart.getImageURI() + '">PNG</a>';
+      console.log("CHART READY: {{ chart_elem_id }}");
+      chartsDrawing -= 1;
+      if (chartsDrawing == 0)
+        console.log("ALL CHARTS READY");
+    });
+    chart.draw(data, options);
+}
+</script>
+
diff --git a/import-layers/yocto-poky/scripts/lib/build_perf/html/report.html b/import-layers/yocto-poky/scripts/lib/build_perf/html/report.html
new file mode 100644
index 0000000..165cbb8
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/build_perf/html/report.html
@@ -0,0 +1,206 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+{# Scripts, for visualization#}
+<!--START-OF-SCRIPTS-->
+<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
+<script type="text/javascript">
+google.charts.load('current', {'packages':['corechart']});
+var chartsDrawing = 0;
+</script>
+
+{# Render measurement result charts #}
+{% for test in test_data %}
+  {% if test.status == 'SUCCESS' %}
+    {% for measurement in test.measurements %}
+      {% set chart_elem_id = test.name + '_' + measurement.name + '_chart' %}
+      {% include 'measurement_chart.html' %}
+    {% endfor %}
+  {% endif %}
+{% endfor %}
+
+<!--END-OF-SCRIPTS-->
+
+{# Styles #}
+<style>
+.meta-table {
+  font-size: 14px;
+  text-align: left;
+  border-collapse: collapse;
+}
+.meta-table tr:nth-child(even){background-color: #f2f2f2}
+meta-table th, .meta-table td {
+  padding: 4px;
+}
+.summary {
+  margin: 0;
+  font-size: 14px;
+  text-align: left;
+  border-collapse: collapse;
+}
+summary th, .meta-table td {
+  padding: 4px;
+}
+.measurement {
+  padding: 8px 0px 8px 8px;
+  border: 2px solid #f0f0f0;
+  margin-bottom: 10px;
+}
+.details {
+  margin: 0;
+  font-size: 12px;
+  text-align: left;
+  border-collapse: collapse;
+}
+.details th {
+  font-weight: normal;
+  padding-right: 8px;
+}
+.preformatted {
+  font-family: monospace;
+  white-space: pre-wrap;
+  background-color: #f0f0f0;
+  margin-left: 10px;
+}
+hr {
+  color: #f0f0f0;
+}
+h2 {
+  font-size: 20px;
+  margin-bottom: 0px;
+  color: #707070;
+}
+h3 {
+  font-size: 16px;
+  margin: 0px;
+  color: #707070;
+}
+</style>
+
+<title>{{ title }}</title>
+</head>
+
+{% macro poky_link(commit) -%}
+    <a href="http://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?id={{ commit }}">{{ commit[0:11] }}</a>
+{%- endmacro %}
+
+<body><div style="width: 700px">
+  {# Test metadata #}
+  <h2>General</h2>
+  <hr>
+  <table class="meta-table" style="width: 100%">
+    <tr>
+      <th></th>
+      <th>Current commit</th>
+      <th>Comparing with</th>
+    </tr>
+    {% for key, item in metadata.items() %}
+    <tr>
+      <th>{{ item.title }}</th>
+      {%if key == 'commit' %}
+        <td>{{ poky_link(item.value) }}</td>
+        <td>{{ poky_link(item.value_old) }}</td>
+      {% else %}
+        <td>{{ item.value }}</td>
+        <td>{{ item.value_old }}</td>
+      {% endif %}
+    </tr>
+    {% endfor %}
+  </table>
+
+  {# Test result summary #}
+  <h2>Test result summary</h2>
+  <hr>
+  <table class="summary" style="width: 100%">
+    {% for test in test_data %}
+      {% if loop.index is even %}
+        {% set row_style = 'style="background-color: #f2f2f2"' %}
+      {% else %}
+        {% set row_style = 'style="background-color: #ffffff"' %}
+      {% endif %}
+      <tr {{ row_style }}><td>{{ test.name }}: {{ test.description }}</td>
+      {% if test.status == 'SUCCESS' %}
+        {% for measurement in test.measurements %}
+          {# add empty cell in place of the test name#}
+          {% if loop.index > 1 %}<td></td>{% endif %}
+          {% if measurement.absdiff > 0 %}
+            {% set result_style = "color: red" %}
+          {% elif measurement.absdiff == measurement.absdiff %}
+            {% set result_style = "color: green" %}
+          {% else %}
+            {% set result_style = "color: orange" %}
+          {%endif %}
+          <td>{{ measurement.description }}</td>
+          <td style="font-weight: bold">{{ measurement.value.mean }}</td>
+          <td style="{{ result_style }}">{{ measurement.absdiff_str }}</td>
+          <td style="{{ result_style }}">{{ measurement.reldiff }}</td>
+          </tr><tr {{ row_style }}>
+        {% endfor %}
+      {% else %}
+        <td style="font-weight: bold; color: red;">{{test.status }}</td>
+        <td></td> <td></td> <td></td> <td></td>
+      {% endif %}
+      </tr>
+    {% endfor %}
+  </table>
+
+  {# Detailed test results #}
+  {% for test in test_data %}
+  <h2>{{ test.name }}: {{ test.description }}</h2>
+  <hr>
+    {% if test.status == 'SUCCESS' %}
+      {% for measurement in test.measurements %}
+        <div class="measurement">
+          <h3>{{ measurement.description }}</h3>
+          <div style="font-weight:bold;">
+            <span style="font-size: 23px;">{{ measurement.value.mean }}</span>
+            <span style="font-size: 20px; margin-left: 12px">
+            {% if measurement.absdiff > 0 %}
+            <span style="color: red">
+            {% elif measurement.absdiff == measurement.absdiff %}
+            <span style="color: green">
+            {% else %}
+            <span style="color: orange">
+            {% endif %}
+            {{ measurement.absdiff_str }} ({{measurement.reldiff}})
+            </span></span>
+          </div>
+          <table style="width: 100%">
+            <tr>
+              <td style="width: 75%">
+                {# Linechart #}
+                <div id="{{ test.name }}_{{ measurement.name }}_chart"></div>
+              </td>
+              <td>
+                {# Measurement statistics #}
+                <table class="details">
+                  <tr>
+                    <th>Test runs</th><td>{{ measurement.value.sample_cnt }}</td>
+                  </tr><tr>
+                    <th>-/+</th><td>-{{ measurement.value.minus }} / +{{ measurement.value.plus }}</td>
+                  </tr><tr>
+                    <th>Min</th><td>{{ measurement.value.min }}</td>
+                  </tr><tr>
+                    <th>Max</th><td>{{ measurement.value.max }}</td>
+                  </tr><tr>
+                    <th>Stdev</th><td>{{ measurement.value.stdev }}</td>
+                  </tr><tr>
+                    <th><div id="{{ test.name }}_{{ measurement.name }}_chart_png"></div></th>
+                  </tr>
+                </table>
+              </td>
+            </tr>
+          </table>
+        </div>
+      {% endfor %}
+    {# Unsuccessful test #}
+    {% else %}
+      <span style="font-size: 150%; font-weight: bold; color: red;">{{ test.status }}
+      {% if test.err_type %}<span style="font-size: 75%; font-weight: normal">({{ test.err_type }})</span>{% endif %}
+      </span>
+      <div class="preformatted">{{ test.message }}</div>
+    {% endif %}
+  {% endfor %}
+</div></body>
+</html>
+
diff --git a/import-layers/yocto-poky/scripts/lib/build_perf/report.py b/import-layers/yocto-poky/scripts/lib/build_perf/report.py
new file mode 100644
index 0000000..eb00ccc
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/build_perf/report.py
@@ -0,0 +1,342 @@
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+"""Handling of build perf test reports"""
+from collections import OrderedDict, Mapping
+from datetime import datetime, timezone
+from numbers import Number
+from statistics import mean, stdev, variance
+
+
+def isofmt_to_timestamp(string):
+    """Convert timestamp string in ISO 8601 format into unix timestamp"""
+    if '.' in string:
+        dt = datetime.strptime(string, '%Y-%m-%dT%H:%M:%S.%f')
+    else:
+        dt = datetime.strptime(string, '%Y-%m-%dT%H:%M:%S')
+    return dt.replace(tzinfo=timezone.utc).timestamp()
+
+
+def metadata_xml_to_json(elem):
+    """Convert metadata xml into JSON format"""
+    assert elem.tag == 'metadata', "Invalid metadata file format"
+
+    def _xml_to_json(elem):
+        """Convert xml element to JSON object"""
+        out = OrderedDict()
+        for child in elem.getchildren():
+            key = child.attrib.get('name', child.tag)
+            if len(child):
+                out[key] = _xml_to_json(child)
+            else:
+                out[key] = child.text
+        return out
+    return _xml_to_json(elem)
+
+
+def results_xml_to_json(elem):
+    """Convert results xml into JSON format"""
+    rusage_fields = ('ru_utime', 'ru_stime', 'ru_maxrss', 'ru_minflt',
+                     'ru_majflt', 'ru_inblock', 'ru_oublock', 'ru_nvcsw',
+                     'ru_nivcsw')
+    iostat_fields = ('rchar', 'wchar', 'syscr', 'syscw', 'read_bytes',
+                     'write_bytes', 'cancelled_write_bytes')
+
+    def _read_measurement(elem):
+        """Convert measurement to JSON"""
+        data = OrderedDict()
+        data['type'] = elem.tag
+        data['name'] = elem.attrib['name']
+        data['legend'] = elem.attrib['legend']
+        values = OrderedDict()
+
+        # SYSRES measurement
+        if elem.tag == 'sysres':
+            for subel in elem:
+                if subel.tag == 'time':
+                    values['start_time'] = isofmt_to_timestamp(subel.attrib['timestamp'])
+                    values['elapsed_time'] = float(subel.text)
+                elif subel.tag == 'rusage':
+                    rusage = OrderedDict()
+                    for field in rusage_fields:
+                        if 'time' in field:
+                            rusage[field] = float(subel.attrib[field])
+                        else:
+                            rusage[field] = int(subel.attrib[field])
+                    values['rusage'] = rusage
+                elif subel.tag == 'iostat':
+                    values['iostat'] = OrderedDict([(f, int(subel.attrib[f]))
+                        for f in iostat_fields])
+                elif subel.tag == 'buildstats_file':
+                    values['buildstats_file'] = subel.text
+                else:
+                    raise TypeError("Unknown sysres value element '{}'".format(subel.tag))
+        # DISKUSAGE measurement
+        elif elem.tag == 'diskusage':
+            values['size'] = int(elem.find('size').text)
+        else:
+            raise Exception("Unknown measurement tag '{}'".format(elem.tag))
+        data['values'] = values
+        return data
+
+    def _read_testcase(elem):
+        """Convert testcase into JSON"""
+        assert elem.tag == 'testcase', "Expecting 'testcase' element instead of {}".format(elem.tag)
+
+        data = OrderedDict()
+        data['name'] = elem.attrib['name']
+        data['description'] = elem.attrib['description']
+        data['status'] = 'SUCCESS'
+        data['start_time'] = isofmt_to_timestamp(elem.attrib['timestamp'])
+        data['elapsed_time'] = float(elem.attrib['time'])
+        measurements = OrderedDict()
+
+        for subel in elem.getchildren():
+            if subel.tag == 'error' or subel.tag == 'failure':
+                data['status'] = subel.tag.upper()
+                data['message'] = subel.attrib['message']
+                data['err_type'] = subel.attrib['type']
+                data['err_output'] = subel.text
+            elif subel.tag == 'skipped':
+                data['status'] = 'SKIPPED'
+                data['message'] = subel.text
+            else:
+                measurements[subel.attrib['name']] = _read_measurement(subel)
+        data['measurements'] = measurements
+        return data
+
+    def _read_testsuite(elem):
+        """Convert suite to JSON"""
+        assert elem.tag == 'testsuite', \
+                "Expecting 'testsuite' element instead of {}".format(elem.tag)
+
+        data = OrderedDict()
+        if 'hostname' in elem.attrib:
+            data['tester_host'] = elem.attrib['hostname']
+        data['start_time'] = isofmt_to_timestamp(elem.attrib['timestamp'])
+        data['elapsed_time'] = float(elem.attrib['time'])
+        tests = OrderedDict()
+
+        for case in elem.getchildren():
+            tests[case.attrib['name']] = _read_testcase(case)
+        data['tests'] = tests
+        return data
+
+    # Main function
+    assert elem.tag == 'testsuites', "Invalid test report format"
+    assert len(elem) == 1, "Too many testsuites"
+
+    return _read_testsuite(elem.getchildren()[0])
+
+
+def aggregate_metadata(metadata):
+    """Aggregate metadata into one, basically a sanity check"""
+    mutable_keys = ('pretty_name', 'version_id')
+
+    def aggregate_obj(aggregate, obj, assert_str=True):
+        """Aggregate objects together"""
+        assert type(aggregate) is type(obj), \
+                "Type mismatch: {} != {}".format(type(aggregate), type(obj))
+        if isinstance(obj, Mapping):
+            assert set(aggregate.keys()) == set(obj.keys())
+            for key, val in obj.items():
+                aggregate_obj(aggregate[key], val, key not in mutable_keys)
+        elif isinstance(obj, list):
+            assert len(aggregate) == len(obj)
+            for i, val in enumerate(obj):
+                aggregate_obj(aggregate[i], val)
+        elif not isinstance(obj, str) or (isinstance(obj, str) and assert_str):
+            assert aggregate == obj, "Data mismatch {} != {}".format(aggregate, obj)
+
+    if not metadata:
+        return {}
+
+    # Do the aggregation
+    aggregate = metadata[0].copy()
+    for testrun in metadata[1:]:
+        aggregate_obj(aggregate, testrun)
+    aggregate['testrun_count'] = len(metadata)
+    return aggregate
+
+
+def aggregate_data(data):
+    """Aggregate multiple test results JSON structures into one"""
+
+    mutable_keys = ('status', 'message', 'err_type', 'err_output')
+
+    class SampleList(list):
+        """Container for numerical samples"""
+        pass
+
+    def new_aggregate_obj(obj):
+        """Create new object for aggregate"""
+        if isinstance(obj, Number):
+            new_obj = SampleList()
+            new_obj.append(obj)
+        elif isinstance(obj, str):
+            new_obj = obj
+        else:
+            # Lists and and dicts are kept as is
+            new_obj = obj.__class__()
+            aggregate_obj(new_obj, obj)
+        return new_obj
+
+    def aggregate_obj(aggregate, obj, assert_str=True):
+        """Recursive "aggregation" of JSON objects"""
+        if isinstance(obj, Number):
+            assert isinstance(aggregate, SampleList)
+            aggregate.append(obj)
+            return
+
+        assert type(aggregate) == type(obj), \
+                "Type mismatch: {} != {}".format(type(aggregate), type(obj))
+        if isinstance(obj, Mapping):
+            for key, val in obj.items():
+                if not key in aggregate:
+                    aggregate[key] = new_aggregate_obj(val)
+                else:
+                    aggregate_obj(aggregate[key], val, key not in mutable_keys)
+        elif isinstance(obj, list):
+            for i, val in enumerate(obj):
+                if i >= len(aggregate):
+                    aggregate[key] = new_aggregate_obj(val)
+                else:
+                    aggregate_obj(aggregate[i], val)
+        elif isinstance(obj, str):
+            # Sanity check for data
+            if assert_str:
+                assert aggregate == obj, "Data mismatch {} != {}".format(aggregate, obj)
+        else:
+            raise Exception("BUG: unable to aggregate '{}' ({})".format(type(obj), str(obj)))
+
+    if not data:
+        return {}
+
+    # Do the aggregation
+    aggregate = data[0].__class__()
+    for testrun in data:
+        aggregate_obj(aggregate, testrun)
+    return aggregate
+
+
+class MeasurementVal(float):
+    """Base class representing measurement values"""
+    gv_data_type = 'number'
+
+    def gv_value(self):
+        """Value formatting for visualization"""
+        if self != self:
+            return "null"
+        else:
+            return self
+
+
+class TimeVal(MeasurementVal):
+    """Class representing time values"""
+    quantity = 'time'
+    gv_title = 'elapsed time'
+    gv_data_type = 'timeofday'
+
+    def hms(self):
+        """Split time into hours, minutes and seconeds"""
+        hhh = int(abs(self) / 3600)
+        mmm = int((abs(self) % 3600) / 60)
+        sss = abs(self) % 60
+        return hhh, mmm, sss
+
+    def __str__(self):
+        if self != self:
+            return "nan"
+        hh, mm, ss = self.hms()
+        sign = '-' if self < 0 else ''
+        if hh > 0:
+            return '{}{:d}:{:02d}:{:02.0f}'.format(sign, hh, mm, ss)
+        elif mm > 0:
+            return '{}{:d}:{:04.1f}'.format(sign, mm, ss)
+        elif ss > 1:
+            return '{}{:.1f} s'.format(sign, ss)
+        else:
+            return '{}{:.2f} s'.format(sign, ss)
+
+    def gv_value(self):
+        """Value formatting for visualization"""
+        if self != self:
+            return "null"
+        hh, mm, ss = self.hms()
+        return [hh, mm, int(ss), int(ss*1000) % 1000]
+
+
+class SizeVal(MeasurementVal):
+    """Class representing time values"""
+    quantity = 'size'
+    gv_title = 'size in MiB'
+    gv_data_type = 'number'
+
+    def __str__(self):
+        if self != self:
+            return "nan"
+        if abs(self) < 1024:
+            return '{:.1f} kiB'.format(self)
+        elif abs(self) < 1048576:
+            return '{:.2f} MiB'.format(self / 1024)
+        else:
+            return '{:.2f} GiB'.format(self / 1048576)
+
+    def gv_value(self):
+        """Value formatting for visualization"""
+        if self != self:
+            return "null"
+        return self / 1024
+
+def measurement_stats(meas, prefix=''):
+    """Get statistics of a measurement"""
+    if not meas:
+        return {prefix + 'sample_cnt': 0,
+                prefix + 'mean': MeasurementVal('nan'),
+                prefix + 'stdev': MeasurementVal('nan'),
+                prefix + 'variance': MeasurementVal('nan'),
+                prefix + 'min': MeasurementVal('nan'),
+                prefix + 'max': MeasurementVal('nan'),
+                prefix + 'minus': MeasurementVal('nan'),
+                prefix + 'plus': MeasurementVal('nan')}
+
+    stats = {'name': meas['name']}
+    if meas['type'] == 'sysres':
+        val_cls = TimeVal
+        values = meas['values']['elapsed_time']
+    elif meas['type'] == 'diskusage':
+        val_cls = SizeVal
+        values = meas['values']['size']
+    else:
+        raise Exception("Unknown measurement type '{}'".format(meas['type']))
+    stats['val_cls'] = val_cls
+    stats['quantity'] = val_cls.quantity
+    stats[prefix + 'sample_cnt'] = len(values)
+
+    mean_val = val_cls(mean(values))
+    min_val = val_cls(min(values))
+    max_val = val_cls(max(values))
+
+    stats[prefix + 'mean'] = mean_val
+    if len(values) > 1:
+        stats[prefix + 'stdev'] = val_cls(stdev(values))
+        stats[prefix + 'variance'] = val_cls(variance(values))
+    else:
+        stats[prefix + 'stdev'] = float('nan')
+        stats[prefix + 'variance'] = float('nan')
+    stats[prefix + 'min'] = min_val
+    stats[prefix + 'max'] = max_val
+    stats[prefix + 'minus'] = val_cls(mean_val - min_val)
+    stats[prefix + 'plus'] = val_cls(max_val - mean_val)
+
+    return stats
+
diff --git a/import-layers/yocto-poky/scripts/lib/build_perf/scrape-html-report.js b/import-layers/yocto-poky/scripts/lib/build_perf/scrape-html-report.js
new file mode 100644
index 0000000..05a1f57
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/build_perf/scrape-html-report.js
@@ -0,0 +1,56 @@
+var fs = require('fs');
+var system = require('system');
+var page = require('webpage').create();
+
+// Examine console log for message from chart drawing
+page.onConsoleMessage = function(msg) {
+    console.log(msg);
+    if (msg === "ALL CHARTS READY") {
+        window.charts_ready = true;
+    }
+    else if (msg.slice(0, 11) === "CHART READY") {
+        var chart_id = msg.split(" ")[2];
+        console.log('grabbing ' + chart_id);
+        var png_data = page.evaluate(function (chart_id) {
+            var chart_div = document.getElementById(chart_id + '_png');
+            return chart_div.outerHTML;
+        }, chart_id);
+        fs.write(args[2] + '/' + chart_id + '.png', png_data, 'w');
+    }
+};
+
+// Check command line arguments
+var args = system.args;
+if (args.length != 3) {
+    console.log("USAGE: " + args[0] + " REPORT_HTML OUT_DIR\n");
+    phantom.exit(1);
+}
+
+// Open the web page
+page.open(args[1], function(status) {
+    if (status == 'fail') {
+        console.log("Failed to open file '" + args[1] + "'");
+        phantom.exit(1);
+    }
+});
+
+// Check status every 100 ms
+interval = window.setInterval(function () {
+    //console.log('waiting');
+    if (window.charts_ready) {
+        clearTimeout(timer);
+        clearInterval(interval);
+
+        var fname = args[1].replace(/\/+$/, "").split("/").pop()
+        console.log("saving " + fname);
+        fs.write(args[2] + '/' + fname, page.content, 'w');
+        phantom.exit(0);
+    }
+}, 100);
+
+// Time-out after 10 seconds
+timer = window.setTimeout(function () {
+    clearInterval(interval);
+    console.log("ERROR: timeout");
+    phantom.exit(1);
+}, 10000);
diff --git a/import-layers/yocto-poky/scripts/lib/compatlayer/__init__.py b/import-layers/yocto-poky/scripts/lib/compatlayer/__init__.py
new file mode 100644
index 0000000..7197e85
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/compatlayer/__init__.py
@@ -0,0 +1,392 @@
+# Yocto Project compatibility layer tool
+#
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import re
+import subprocess
+from enum import Enum
+
+import bb.tinfoil
+
+class LayerType(Enum):
+    BSP = 0
+    DISTRO = 1
+    SOFTWARE = 2
+    ERROR_NO_LAYER_CONF = 98
+    ERROR_BSP_DISTRO = 99
+
+def _get_configurations(path):
+    configs = []
+
+    for f in os.listdir(path):
+        file_path = os.path.join(path, f)
+        if os.path.isfile(file_path) and f.endswith('.conf'):
+            configs.append(f[:-5]) # strip .conf
+    return configs
+
+def _get_layer_collections(layer_path, lconf=None, data=None):
+    import bb.parse
+    import bb.data
+
+    if lconf is None:
+        lconf = os.path.join(layer_path, 'conf', 'layer.conf')
+
+    if data is None:
+        ldata = bb.data.init()
+        bb.parse.init_parser(ldata)
+    else:
+        ldata = data.createCopy()
+
+    ldata.setVar('LAYERDIR', layer_path)
+    try:
+        ldata = bb.parse.handle(lconf, ldata, include=True)
+    except BaseException as exc:
+        raise LayerError(exc)
+    ldata.expandVarref('LAYERDIR')
+
+    collections = (ldata.getVar('BBFILE_COLLECTIONS', True) or '').split()
+    if not collections:
+        name = os.path.basename(layer_path)
+        collections = [name]
+
+    collections = {c: {} for c in collections}
+    for name in collections:
+        priority = ldata.getVar('BBFILE_PRIORITY_%s' % name, True)
+        pattern = ldata.getVar('BBFILE_PATTERN_%s' % name, True)
+        depends = ldata.getVar('LAYERDEPENDS_%s' % name, True)
+        collections[name]['priority'] = priority
+        collections[name]['pattern'] = pattern
+        collections[name]['depends'] = depends
+
+    return collections
+
+def _detect_layer(layer_path):
+    """
+        Scans layer directory to detect what type of layer
+        is BSP, Distro or Software.
+
+        Returns a dictionary with layer name, type and path.
+    """
+
+    layer = {}
+    layer_name = os.path.basename(layer_path)
+
+    layer['name'] = layer_name
+    layer['path'] = layer_path
+    layer['conf'] = {}
+
+    if not os.path.isfile(os.path.join(layer_path, 'conf', 'layer.conf')):
+        layer['type'] = LayerType.ERROR_NO_LAYER_CONF
+        return layer
+
+    machine_conf = os.path.join(layer_path, 'conf', 'machine')
+    distro_conf = os.path.join(layer_path, 'conf', 'distro')
+
+    is_bsp = False
+    is_distro = False
+
+    if os.path.isdir(machine_conf):
+        machines = _get_configurations(machine_conf)
+        if machines:
+            is_bsp = True
+
+    if os.path.isdir(distro_conf):
+        distros = _get_configurations(distro_conf)
+        if distros:
+            is_distro = True
+
+    if is_bsp and is_distro:
+        layer['type'] = LayerType.ERROR_BSP_DISTRO
+    elif is_bsp:
+        layer['type'] = LayerType.BSP
+        layer['conf']['machines'] = machines
+    elif is_distro:
+        layer['type'] = LayerType.DISTRO
+        layer['conf']['distros'] = distros
+    else:
+        layer['type'] = LayerType.SOFTWARE
+
+    layer['collections'] = _get_layer_collections(layer['path'])
+
+    return layer
+
+def detect_layers(layer_directories, no_auto):
+    layers = []
+
+    for directory in layer_directories:
+        directory = os.path.realpath(directory)
+        if directory[-1] == '/':
+            directory = directory[0:-1]
+
+        if no_auto:
+            conf_dir = os.path.join(directory, 'conf')
+            if os.path.isdir(conf_dir):
+                layer = _detect_layer(directory)
+                if layer:
+                    layers.append(layer)
+        else:
+            for root, dirs, files in os.walk(directory):
+                dir_name = os.path.basename(root)
+                conf_dir = os.path.join(root, 'conf')
+                if os.path.isdir(conf_dir):
+                    layer = _detect_layer(root)
+                    if layer:
+                        layers.append(layer)
+
+    return layers
+
+def _find_layer_depends(depend, layers):
+    for layer in layers:
+        for collection in layer['collections']:
+            if depend == collection:
+                return layer
+    return None
+
+def add_layer_dependencies(bblayersconf, layer, layers, logger):
+    def recurse_dependencies(depends, layer, layers, logger, ret = []):
+        logger.debug('Processing dependencies %s for layer %s.' % \
+                    (depends, layer['name']))
+
+        for depend in depends.split():
+            # core (oe-core) is suppose to be provided
+            if depend == 'core':
+                continue
+
+            layer_depend = _find_layer_depends(depend, layers)
+            if not layer_depend:
+                logger.error('Layer %s depends on %s and isn\'t found.' % \
+                        (layer['name'], depend))
+                ret = None
+                continue
+
+            # We keep processing, even if ret is None, this allows us to report
+            # multiple errors at once
+            if ret is not None and layer_depend not in ret:
+                ret.append(layer_depend)
+
+            # Recursively process...
+            if 'collections' not in layer_depend:
+                continue
+
+            for collection in layer_depend['collections']:
+                collect_deps = layer_depend['collections'][collection]['depends']
+                if not collect_deps:
+                    continue
+                ret = recurse_dependencies(collect_deps, layer_depend, layers, logger, ret)
+
+        return ret
+
+    layer_depends = []
+    for collection in layer['collections']:
+        depends = layer['collections'][collection]['depends']
+        if not depends:
+            continue
+
+        layer_depends = recurse_dependencies(depends, layer, layers, logger, layer_depends)
+
+    # Note: [] (empty) is allowed, None is not!
+    if layer_depends is None:
+        return False
+    else:
+        # Don't add a layer that is already present.
+        added = set()
+        output = check_command('Getting existing layers failed.', 'bitbake-layers show-layers').decode('utf-8')
+        for layer, path, pri in re.findall(r'^(\S+) +([^\n]*?) +(\d+)$', output, re.MULTILINE):
+            added.add(path)
+
+        for layer_depend in layer_depends:
+            name = layer_depend['name']
+            path = layer_depend['path']
+            if path in added:
+                continue
+            else:
+                added.add(path)
+            logger.info('Adding layer dependency %s' % name)
+            with open(bblayersconf, 'a+') as f:
+                f.write("\nBBLAYERS += \"%s\"\n" % path)
+    return True
+
+def add_layer(bblayersconf, layer, layers, logger):
+    logger.info('Adding layer %s' % layer['name'])
+    with open(bblayersconf, 'a+') as f:
+        f.write("\nBBLAYERS += \"%s\"\n" % layer['path'])
+
+    return True
+
+def check_command(error_msg, cmd):
+    '''
+    Run a command under a shell, capture stdout and stderr in a single stream,
+    throw an error when command returns non-zero exit code. Returns the output.
+    '''
+
+    p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+    output, _ = p.communicate()
+    if p.returncode:
+        msg = "%s\nCommand: %s\nOutput:\n%s" % (error_msg, cmd, output.decode('utf-8'))
+        raise RuntimeError(msg)
+    return output
+
+def get_signatures(builddir, failsafe=False, machine=None):
+    import re
+
+    # some recipes needs to be excluded like meta-world-pkgdata
+    # because a layer can add recipes to a world build so signature
+    # will be change
+    exclude_recipes = ('meta-world-pkgdata',)
+
+    sigs = {}
+    tune2tasks = {}
+
+    cmd = ''
+    if machine:
+        cmd += 'MACHINE=%s ' % machine
+    cmd += 'bitbake '
+    if failsafe:
+        cmd += '-k '
+    cmd += '-S none world'
+    sigs_file = os.path.join(builddir, 'locked-sigs.inc')
+    if os.path.exists(sigs_file):
+        os.unlink(sigs_file)
+    try:
+        check_command('Generating signatures failed. This might be due to some parse error and/or general layer incompatibilities.',
+                      cmd)
+    except RuntimeError as ex:
+        if failsafe and os.path.exists(sigs_file):
+            # Ignore the error here. Most likely some recipes active
+            # in a world build lack some dependencies. There is a
+            # separate test_machine_world_build which exposes the
+            # failure.
+            pass
+        else:
+            raise
+
+    sig_regex = re.compile("^(?P<task>.*:.*):(?P<hash>.*) .$")
+    tune_regex = re.compile("(^|\s)SIGGEN_LOCKEDSIGS_t-(?P<tune>\S*)\s*=\s*")
+    current_tune = None
+    with open(sigs_file, 'r') as f:
+        for line in f.readlines():
+            line = line.strip()
+            t = tune_regex.search(line)
+            if t:
+                current_tune = t.group('tune')
+            s = sig_regex.match(line)
+            if s:
+                exclude = False
+                for er in exclude_recipes:
+                    (recipe, task) = s.group('task').split(':')
+                    if er == recipe:
+                        exclude = True
+                        break
+                if exclude:
+                    continue
+
+                sigs[s.group('task')] = s.group('hash')
+                tune2tasks.setdefault(current_tune, []).append(s.group('task'))
+
+    if not sigs:
+        raise RuntimeError('Can\'t load signatures from %s' % sigs_file)
+
+    return (sigs, tune2tasks)
+
+def get_depgraph(targets=['world'], failsafe=False):
+    '''
+    Returns the dependency graph for the given target(s).
+    The dependency graph is taken directly from DepTreeEvent.
+    '''
+    depgraph = None
+    with bb.tinfoil.Tinfoil() as tinfoil:
+        tinfoil.prepare(config_only=False)
+        tinfoil.set_event_mask(['bb.event.NoProvider', 'bb.event.DepTreeGenerated', 'bb.command.CommandCompleted'])
+        if not tinfoil.run_command('generateDepTreeEvent', targets, 'do_build'):
+            raise RuntimeError('starting generateDepTreeEvent failed')
+        while True:
+            event = tinfoil.wait_event(timeout=1000)
+            if event:
+                if isinstance(event, bb.command.CommandFailed):
+                    raise RuntimeError('Generating dependency information failed: %s' % event.error)
+                elif isinstance(event, bb.command.CommandCompleted):
+                    break
+                elif isinstance(event, bb.event.NoProvider):
+                    if failsafe:
+                        # The event is informational, we will get information about the
+                        # remaining dependencies eventually and thus can ignore this
+                        # here like we do in get_signatures(), if desired.
+                        continue
+                    if event._reasons:
+                        raise RuntimeError('Nothing provides %s: %s' % (event._item, event._reasons))
+                    else:
+                        raise RuntimeError('Nothing provides %s.' % (event._item))
+                elif isinstance(event, bb.event.DepTreeGenerated):
+                    depgraph = event._depgraph
+
+    if depgraph is None:
+        raise RuntimeError('Could not retrieve the depgraph.')
+    return depgraph
+
+def compare_signatures(old_sigs, curr_sigs):
+    '''
+    Compares the result of two get_signatures() calls. Returns None if no
+    problems found, otherwise a string that can be used as additional
+    explanation in self.fail().
+    '''
+    # task -> (old signature, new signature)
+    sig_diff = {}
+    for task in old_sigs:
+        if task in curr_sigs and \
+           old_sigs[task] != curr_sigs[task]:
+            sig_diff[task] = (old_sigs[task], curr_sigs[task])
+
+    if not sig_diff:
+        return None
+
+    # Beware, depgraph uses task=<pn>.<taskname> whereas get_signatures()
+    # uses <pn>:<taskname>. Need to convert sometimes. The output follows
+    # the convention from get_signatures() because that seems closer to
+    # normal bitbake output.
+    def sig2graph(task):
+        pn, taskname = task.rsplit(':', 1)
+        return pn + '.' + taskname
+    def graph2sig(task):
+        pn, taskname = task.rsplit('.', 1)
+        return pn + ':' + taskname
+    depgraph = get_depgraph(failsafe=True)
+    depends = depgraph['tdepends']
+
+    # If a task A has a changed signature, but none of its
+    # dependencies, then we need to report it because it is
+    # the one which introduces a change. Any task depending on
+    # A (directly or indirectly) will also have a changed
+    # signature, but we don't need to report it. It might have
+    # its own changes, which will become apparent once the
+    # issues that we do report are fixed and the test gets run
+    # again.
+    sig_diff_filtered = []
+    for task, (old_sig, new_sig) in sig_diff.items():
+        deps_tainted = False
+        for dep in depends.get(sig2graph(task), ()):
+            if graph2sig(dep) in sig_diff:
+                deps_tainted = True
+                break
+        if not deps_tainted:
+            sig_diff_filtered.append((task, old_sig, new_sig))
+
+    msg = []
+    msg.append('%d signatures changed, initial differences (first hash before, second after):' %
+               len(sig_diff))
+    for diff in sorted(sig_diff_filtered):
+        recipe, taskname = diff[0].rsplit(':', 1)
+        cmd = 'bitbake-diffsigs --task %s %s --signature %s %s' % \
+              (recipe, taskname, diff[1], diff[2])
+        msg.append('   %s: %s -> %s' % diff)
+        msg.append('      %s' % cmd)
+        try:
+            output = check_command('Determining signature difference failed.',
+                                   cmd).decode('utf-8')
+        except RuntimeError as error:
+            output = str(error)
+        if output:
+            msg.extend(['      ' + line for line in output.splitlines()])
+            msg.append('')
+    return '\n'.join(msg)
diff --git a/import-layers/yocto-poky/scripts/lib/compatlayer/case.py b/import-layers/yocto-poky/scripts/lib/compatlayer/case.py
new file mode 100644
index 0000000..54ce78a
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/compatlayer/case.py
@@ -0,0 +1,7 @@
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+from oeqa.core.case import OETestCase
+
+class OECompatLayerTestCase(OETestCase):
+    pass
diff --git a/import-layers/yocto-poky/scripts/lib/wic/imager/__init__.py b/import-layers/yocto-poky/scripts/lib/compatlayer/cases/__init__.py
similarity index 100%
rename from import-layers/yocto-poky/scripts/lib/wic/imager/__init__.py
rename to import-layers/yocto-poky/scripts/lib/compatlayer/cases/__init__.py
diff --git a/import-layers/yocto-poky/scripts/lib/compatlayer/cases/bsp.py b/import-layers/yocto-poky/scripts/lib/compatlayer/cases/bsp.py
new file mode 100644
index 0000000..43efae4
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/compatlayer/cases/bsp.py
@@ -0,0 +1,204 @@
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import unittest
+
+from compatlayer import LayerType, get_signatures, check_command, get_depgraph
+from compatlayer.case import OECompatLayerTestCase
+
+class BSPCompatLayer(OECompatLayerTestCase):
+    @classmethod
+    def setUpClass(self):
+        if self.tc.layer['type'] != LayerType.BSP:
+            raise unittest.SkipTest("BSPCompatLayer: Layer %s isn't BSP one." %\
+                self.tc.layer['name'])
+
+    def test_bsp_defines_machines(self):
+        self.assertTrue(self.tc.layer['conf']['machines'], 
+                "Layer is BSP but doesn't defines machines.")
+
+    def test_bsp_no_set_machine(self):
+        from oeqa.utils.commands import get_bb_var
+
+        machine = get_bb_var('MACHINE')
+        self.assertEqual(self.td['bbvars']['MACHINE'], machine,
+                msg="Layer %s modified machine %s -> %s" % \
+                    (self.tc.layer['name'], self.td['bbvars']['MACHINE'], machine))
+
+
+    def test_machine_world(self):
+        '''
+        "bitbake world" is expected to work regardless which machine is selected.
+        BSP layers sometimes break that by enabling a recipe for a certain machine
+        without checking whether that recipe actually can be built in the current
+        distro configuration (for example, OpenGL might not enabled).
+
+        This test iterates over all machines. It would be nicer to instantiate
+        it once per machine. It merely checks for errors during parse
+        time. It does not actually attempt to build anything.
+        '''
+
+        if not self.td['machines']:
+            self.skipTest('No machines set with --machines.')
+        msg = []
+        for machine in self.td['machines']:
+            # In contrast to test_machine_signatures() below, errors are fatal here.
+            try:
+                get_signatures(self.td['builddir'], failsafe=False, machine=machine)
+            except RuntimeError as ex:
+                msg.append(str(ex))
+        if msg:
+            msg.insert(0, 'The following machines broke a world build:')
+            self.fail('\n'.join(msg))
+
+    def test_machine_signatures(self):
+        '''
+        Selecting a machine may only affect the signature of tasks that are specific
+        to that machine. In other words, when MACHINE=A and MACHINE=B share a recipe
+        foo and the output of foo, then both machine configurations must build foo
+        in exactly the same way. Otherwise it is not possible to use both machines
+        in the same distribution.
+
+        This criteria can only be tested by testing different machines in combination,
+        i.e. one main layer, potentially several additional BSP layers and an explicit
+        choice of machines:
+        yocto-compat-layer --additional-layers .../meta-intel --machines intel-corei7-64 imx6slevk -- .../meta-freescale
+        '''
+
+        if not self.td['machines']:
+            self.skipTest('No machines set with --machines.')
+
+        # Collect signatures for all machines that we are testing
+        # and merge that into a hash:
+        # tune -> task -> signature -> list of machines with that combination
+        #
+        # It is an error if any tune/task pair has more than one signature,
+        # because that implies that the machines that caused those different
+        # signatures do not agree on how to execute the task.
+        tunes = {}
+        # Preserve ordering of machines as chosen by the user.
+        for machine in self.td['machines']:
+            curr_sigs, tune2tasks = get_signatures(self.td['builddir'], failsafe=True, machine=machine)
+            # Invert the tune -> [tasks] mapping.
+            tasks2tune = {}
+            for tune, tasks in tune2tasks.items():
+                for task in tasks:
+                    tasks2tune[task] = tune
+            for task, sighash in curr_sigs.items():
+                tunes.setdefault(tasks2tune[task], {}).setdefault(task, {}).setdefault(sighash, []).append(machine)
+
+        msg = []
+        pruned = 0
+        last_line_key = None
+        # do_fetch, do_unpack, ..., do_build
+        taskname_list = []
+        if tunes:
+            # The output below is most useful when we start with tasks that are at
+            # the bottom of the dependency chain, i.e. those that run first. If
+            # those tasks differ, the rest also does.
+            #
+            # To get an ordering of tasks, we do a topological sort of the entire
+            # depgraph for the base configuration, then on-the-fly flatten that list by stripping
+            # out the recipe names and removing duplicates. The base configuration
+            # is not necessarily representative, but should be close enough. Tasks
+            # that were not encountered get a default priority.
+            depgraph = get_depgraph()
+            depends = depgraph['tdepends']
+            WHITE = 1
+            GRAY = 2
+            BLACK = 3
+            color = {}
+            found = set()
+            def visit(task):
+                color[task] = GRAY
+                for dep in depends.get(task, ()):
+                    if color.setdefault(dep, WHITE) == WHITE:
+                        visit(dep)
+                color[task] = BLACK
+                pn, taskname = task.rsplit('.', 1)
+                if taskname not in found:
+                    taskname_list.append(taskname)
+                    found.add(taskname)
+            for task in depends.keys():
+                if color.setdefault(task, WHITE) == WHITE:
+                    visit(task)
+
+        taskname_order = dict([(task, index) for index, task in enumerate(taskname_list) ])
+        def task_key(task):
+            pn, taskname = task.rsplit(':', 1)
+            return (pn, taskname_order.get(taskname, len(taskname_list)), taskname)
+
+        for tune in sorted(tunes.keys()):
+            tasks = tunes[tune]
+            # As for test_signatures it would be nicer to sort tasks
+            # by dependencies here, but that is harder because we have
+            # to report on tasks from different machines, which might
+            # have different dependencies. We resort to pruning the
+            # output by reporting only one task per recipe if the set
+            # of machines matches.
+            #
+            # "bitbake-diffsigs -t -s" is intelligent enough to print
+            # diffs recursively, so often it does not matter that much
+            # if we don't pick the underlying difference
+            # here. However, sometimes recursion fails
+            # (https://bugzilla.yoctoproject.org/show_bug.cgi?id=6428).
+            #
+            # To mitigate that a bit, we use a hard-coded ordering of
+            # tasks that represents how they normally run and prefer
+            # to print the ones that run first.
+            for task in sorted(tasks.keys(), key=task_key):
+                signatures = tasks[task]
+                # do_build can be ignored: it is know to have
+                # different signatures in some cases, for example in
+                # the allarch ca-certificates due to RDEPENDS=openssl.
+                # That particular dependency is whitelisted via
+                # SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS, but still shows up
+                # in the sstate signature hash because filtering it
+                # out would be hard and running do_build multiple
+                # times doesn't really matter.
+                if len(signatures.keys()) > 1 and \
+                   not task.endswith(':do_build'):
+                    # Error!
+                    #
+                    # Sort signatures by machines, because the hex values don't mean anything.
+                    # => all-arch adwaita-icon-theme:do_build: 1234... (beaglebone, qemux86) != abcdf... (qemux86-64)
+                    #
+                    # Skip the line if it is covered already by the predecessor (same pn, same sets of machines).
+                    pn, taskname = task.rsplit(':', 1)
+                    next_line_key = (pn, sorted(signatures.values()))
+                    if next_line_key != last_line_key:
+                        line = '   %s %s: ' % (tune, task)
+                        line += ' != '.join(['%s (%s)' % (signature, ', '.join([m for m in signatures[signature]])) for
+                                             signature in sorted(signatures.keys(), key=lambda s: signatures[s])])
+                        last_line_key = next_line_key
+                        msg.append(line)
+                        # Randomly pick two mismatched signatures and remember how to invoke
+                        # bitbake-diffsigs for them.
+                        iterator = iter(signatures.items())
+                        a = next(iterator)
+                        b = next(iterator)
+                        diffsig_machines = '(%s) != (%s)' % (', '.join(a[1]), ', '.join(b[1]))
+                        diffsig_params = '-t %s %s -s %s %s' % (pn, taskname, a[0], b[0])
+                    else:
+                        pruned += 1
+
+        if msg:
+            msg.insert(0, 'The machines have conflicting signatures for some shared tasks:')
+            if pruned > 0:
+                msg.append('')
+                msg.append('%d tasks where not listed because some other task of the recipe already differed.' % pruned)
+                msg.append('It is likely that differences from different recipes also have the same root cause.')
+            msg.append('')
+            # Explain how to investigate...
+            msg.append('To investigate, run bitbake-diffsigs -t recipename taskname -s fromsig tosig.')
+            cmd = 'bitbake-diffsigs %s' % diffsig_params
+            msg.append('Example: %s in the last line' % diffsig_machines)
+            msg.append('Command: %s' % cmd)
+            # ... and actually do it automatically for that example, but without aborting
+            # when that fails.
+            try:
+                output = check_command('Comparing signatures failed.', cmd).decode('utf-8')
+            except RuntimeError as ex:
+                output = str(ex)
+            msg.extend(['   ' + line for line in output.splitlines()])
+            self.fail('\n'.join(msg))
diff --git a/import-layers/yocto-poky/scripts/lib/compatlayer/cases/common.py b/import-layers/yocto-poky/scripts/lib/compatlayer/cases/common.py
new file mode 100644
index 0000000..55e8ba4
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/compatlayer/cases/common.py
@@ -0,0 +1,53 @@
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import glob
+import os
+import unittest
+from compatlayer import get_signatures, LayerType, check_command, get_depgraph, compare_signatures
+from compatlayer.case import OECompatLayerTestCase
+
+class CommonCompatLayer(OECompatLayerTestCase):
+    def test_readme(self):
+        # The top-level README file may have a suffix (like README.rst or README.txt).
+        readme_files = glob.glob(os.path.join(self.tc.layer['path'], 'README*'))
+        self.assertTrue(len(readme_files) > 0,
+                        msg="Layer doesn't contains README file.")
+
+        # There might be more than one file matching the file pattern above
+        # (for example, README.rst and README-COPYING.rst). The one with the shortest
+        # name is considered the "main" one.
+        readme_file = sorted(readme_files)[0]
+        data = ''
+        with open(readme_file, 'r') as f:
+            data = f.read()
+        self.assertTrue(data,
+                msg="Layer contains a README file but it is empty.")
+
+    def test_parse(self):
+        check_command('Layer %s failed to parse.' % self.tc.layer['name'],
+                      'bitbake -p')
+
+    def test_show_environment(self):
+        check_command('Layer %s failed to show environment.' % self.tc.layer['name'],
+                      'bitbake -e')
+
+    def test_world(self):
+        '''
+        "bitbake world" is expected to work. test_signatures does not cover that
+        because it is more lenient and ignores recipes in a world build that
+        are not actually buildable, so here we fail when "bitbake -S none world"
+        fails.
+        '''
+        get_signatures(self.td['builddir'], failsafe=False)
+
+    def test_signatures(self):
+        if self.tc.layer['type'] == LayerType.SOFTWARE and \
+           not self.tc.test_software_layer_signatures:
+            raise unittest.SkipTest("Not testing for signature changes in a software layer %s." \
+                     % self.tc.layer['name'])
+
+        curr_sigs, _ = get_signatures(self.td['builddir'], failsafe=True)
+        msg = compare_signatures(self.td['sigs'], curr_sigs)
+        if msg is not None:
+            self.fail('Adding layer %s changed signatures.\n%s' % (self.tc.layer['name'], msg))
diff --git a/import-layers/yocto-poky/scripts/lib/compatlayer/cases/distro.py b/import-layers/yocto-poky/scripts/lib/compatlayer/cases/distro.py
new file mode 100644
index 0000000..523acc1
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/compatlayer/cases/distro.py
@@ -0,0 +1,26 @@
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import unittest
+
+from compatlayer import LayerType
+from compatlayer.case import OECompatLayerTestCase
+
+class DistroCompatLayer(OECompatLayerTestCase):
+    @classmethod
+    def setUpClass(self):
+        if self.tc.layer['type'] != LayerType.DISTRO:
+            raise unittest.SkipTest("DistroCompatLayer: Layer %s isn't Distro one." %\
+                self.tc.layer['name'])
+
+    def test_distro_defines_distros(self):
+        self.assertTrue(self.tc.layer['conf']['distros'], 
+                "Layer is BSP but doesn't defines machines.")
+
+    def test_distro_no_set_distros(self):
+        from oeqa.utils.commands import get_bb_var
+
+        distro = get_bb_var('DISTRO')
+        self.assertEqual(self.td['bbvars']['DISTRO'], distro,
+                msg="Layer %s modified distro %s -> %s" % \
+                    (self.tc.layer['name'], self.td['bbvars']['DISTRO'], distro))
diff --git a/import-layers/yocto-poky/scripts/lib/compatlayer/context.py b/import-layers/yocto-poky/scripts/lib/compatlayer/context.py
new file mode 100644
index 0000000..7811d4a
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/compatlayer/context.py
@@ -0,0 +1,15 @@
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import sys
+import glob
+import re
+
+from oeqa.core.context import OETestContext
+
+class CompatLayerTestContext(OETestContext):
+    def __init__(self, td=None, logger=None, layer=None, test_software_layer_signatures=True):
+        super(CompatLayerTestContext, self).__init__(td, logger)
+        self.layer = layer
+        self.test_software_layer_signatures = test_software_layer_signatures
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/__init__.py b/import-layers/yocto-poky/scripts/lib/devtool/__init__.py
index e675133..d646b0c 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/__init__.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/__init__.py
@@ -23,6 +23,7 @@
 import subprocess
 import logging
 import re
+import codecs
 
 logger = logging.getLogger('devtool')
 
@@ -67,10 +68,10 @@
         cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **options
     )
 
+    reader = codecs.getreader('utf-8')(process.stdout)
     buf = ''
     while True:
-        out = process.stdout.read(1)
-        out = out.decode('utf-8')
+        out = reader.read(1, 1)
         if out:
             sys.stdout.write(out)
             sys.stdout.flush()
@@ -86,13 +87,13 @@
 def exec_fakeroot(d, cmd, **kwargs):
     """Run a command under fakeroot (pseudo, in fact) so that it picks up the appropriate file permissions"""
     # Grab the command and check it actually exists
-    fakerootcmd = d.getVar('FAKEROOTCMD', True)
+    fakerootcmd = d.getVar('FAKEROOTCMD')
     if not os.path.exists(fakerootcmd):
         logger.error('pseudo executable %s could not be found - have you run a build yet? pseudo-native should install this and if you have run any build then that should have been built')
         return 2
     # Set up the appropriate environment
     newenv = dict(os.environ)
-    fakerootenv = d.getVar('FAKEROOTENV', True)
+    fakerootenv = d.getVar('FAKEROOTENV')
     for varvalue in fakerootenv.split():
         if '=' in varvalue:
             splitval = varvalue.split('=', 1)
@@ -113,40 +114,40 @@
 
         import bb.tinfoil
         tinfoil = bb.tinfoil.Tinfoil(tracking=tracking)
-        tinfoil.prepare(config_only)
-        tinfoil.logger.setLevel(logger.getEffectiveLevel())
+        try:
+            tinfoil.prepare(config_only)
+            tinfoil.logger.setLevel(logger.getEffectiveLevel())
+        except bb.tinfoil.TinfoilUIException:
+            tinfoil.shutdown()
+            raise DevtoolError('Failed to start bitbake environment')
+        except:
+            tinfoil.shutdown()
+            raise
     finally:
         os.chdir(orig_cwd)
     return tinfoil
 
-def get_recipe_file(cooker, pn):
-    """Find recipe file corresponding a package name"""
-    import oe.recipeutils
-    recipefile = oe.recipeutils.pn_to_recipe(cooker, pn)
-    if not recipefile:
-        skipreasons = oe.recipeutils.get_unavailable_reasons(cooker, pn)
-        if skipreasons:
-            logger.error('\n'.join(skipreasons))
-        else:
-            logger.error("Unable to find any recipe file matching %s" % pn)
-    return recipefile
-
 def parse_recipe(config, tinfoil, pn, appends, filter_workspace=True):
-    """Parse recipe of a package"""
-    import oe.recipeutils
-    recipefile = get_recipe_file(tinfoil.cooker, pn)
-    if not recipefile:
-        # Error already logged
+    """Parse the specified recipe"""
+    try:
+        recipefile = tinfoil.get_recipe_file(pn)
+    except bb.providers.NoProvider as e:
+        logger.error(str(e))
         return None
     if appends:
-        append_files = tinfoil.cooker.collection.get_file_appends(recipefile)
+        append_files = tinfoil.get_file_appends(recipefile)
         if filter_workspace:
             # Filter out appends from the workspace
             append_files = [path for path in append_files if
                             not path.startswith(config.workspace_path)]
     else:
         append_files = None
-    return oe.recipeutils.parse_recipe(tinfoil.cooker, recipefile, append_files)
+    try:
+        rd = tinfoil.parse_recipe_file(recipefile, appends, append_files)
+    except Exception as e:
+        logger.error(str(e))
+        return None
+    return rd
 
 def check_workspace_recipe(workspace, pn, checksrc=True, bbclassextend=False):
     """
@@ -190,7 +191,7 @@
         logger.info('Using source tree as build directory since --same-dir specified')
     elif bb.data.inherits_class('autotools-brokensep', d):
         logger.info('Using source tree as build directory since recipe inherits autotools-brokensep')
-    elif d.getVar('B', True) == os.path.abspath(d.getVar('S', True)):
+    elif d.getVar('B') == os.path.abspath(d.getVar('S')):
         logger.info('Using source tree as build directory since that would be the default for this recipe')
     else:
         b_is_s = False
@@ -260,23 +261,28 @@
                 targets.append('%s-%s' % (pn, variant))
     return targets
 
-def ensure_npm(config, basepath, fixed_setup=False):
+def ensure_npm(config, basepath, fixed_setup=False, check_exists=True):
     """
     Ensure that npm is available and either build it or show a
     reasonable error message
     """
-    tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
-    try:
-        nativepath = tinfoil.config_data.getVar('STAGING_BINDIR_NATIVE', True)
-    finally:
-        tinfoil.shutdown()
+    if check_exists:
+        tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
+        try:
+            rd = tinfoil.parse_recipe('nodejs-native')
+            nativepath = rd.getVar('STAGING_BINDIR_NATIVE')
+        finally:
+            tinfoil.shutdown()
+        npmpath = os.path.join(nativepath, 'npm')
+        build_npm = not os.path.exists(npmpath)
+    else:
+        build_npm = True
 
-    npmpath = os.path.join(nativepath, 'npm')
-    if not os.path.exists(npmpath):
+    if build_npm:
         logger.info('Building nodejs-native')
         try:
             exec_build_env_command(config.init_path, basepath,
-                                'bitbake -q nodejs-native', watch=True)
+                                'bitbake -q nodejs-native -c addto_recipe_sysroot', watch=True)
         except bb.process.ExecutionError as e:
             if "Nothing PROVIDES 'nodejs-native'" in e.stdout:
                 if fixed_setup:
@@ -286,5 +292,3 @@
                 raise DevtoolError(msg)
             else:
                 raise
-        if not os.path.exists(npmpath):
-            raise DevtoolError('Built nodejs-native but npm binary still could not be found at %s' % npmpath)
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/build.py b/import-layers/yocto-poky/scripts/lib/devtool/build.py
index 6be549d..252379e 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/build.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/build.py
@@ -80,7 +80,7 @@
     """Register devtool subcommands from this plugin"""
     parser_build = subparsers.add_parser('build', help='Build a recipe',
                                          description='Builds the specified recipe using bitbake (up to and including %s)' % ', '.join(_get_build_tasks(context.config)),
-                                         group='working')
+                                         group='working', order=50)
     parser_build.add_argument('recipename', help='Recipe to build')
     parser_build.add_argument('-s', '--disable-parallel-make', action="store_true", help='Disable make parallelism')
     parser_build.set_defaults(func=build)
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/build_image.py b/import-layers/yocto-poky/scripts/lib/devtool/build_image.py
index ae75511..e581038 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/build_image.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/build_image.py
@@ -34,8 +34,8 @@
     result = []
     for recipe in workspace:
         data = parse_recipe(config, tinfoil, recipe, True)
-        if 'class-target' in data.getVar('OVERRIDES', True).split(':'):
-            if recipe in data.getVar('PACKAGES', True).split():
+        if 'class-target' in data.getVar('OVERRIDES').split(':'):
+            if recipe in data.getVar('PACKAGES').split():
                 result.append(recipe)
             else:
                 logger.warning("Skipping recipe %s as it doesn't produce a "
@@ -95,7 +95,7 @@
             raise TargetNotImageError()
 
         # Get the actual filename used and strip the .bb and full path
-        target_basename = rd.getVar('FILE', True)
+        target_basename = rd.getVar('FILE')
         target_basename = os.path.splitext(os.path.basename(target_basename))[0]
         config.set('SDK', 'target_basename', target_basename)
         config.write()
@@ -132,9 +132,9 @@
                             afile.write('%s\n' % line)
 
             if task in ['populate_sdk', 'populate_sdk_ext']:
-                outputdir = rd.getVar('SDK_DEPLOY', True)
+                outputdir = rd.getVar('SDK_DEPLOY')
             else:
-                outputdir = rd.getVar('DEPLOY_DIR_IMAGE', True)
+                outputdir = rd.getVar('DEPLOY_DIR_IMAGE')
 
             tmp_tinfoil = tinfoil
             tinfoil = None
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/deploy.py b/import-layers/yocto-poky/scripts/lib/devtool/deploy.py
index c4c7bf6..b3730ae 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/deploy.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/deploy.py
@@ -156,11 +156,11 @@
     tinfoil = setup_tinfoil(basepath=basepath)
     try:
         try:
-            rd = oe.recipeutils.parse_recipe_simple(tinfoil.cooker, args.recipename, tinfoil.config_data)
+            rd = tinfoil.parse_recipe(args.recipename)
         except Exception as e:
             raise DevtoolError('Exception parsing recipe %s: %s' %
                             (args.recipename, e))
-        recipe_outdir = rd.getVar('D', True)
+        recipe_outdir = rd.getVar('D')
         if not os.path.exists(recipe_outdir) or not os.listdir(recipe_outdir):
             raise DevtoolError('No files to deploy - have you built the %s '
                             'recipe? If so, the install step has not installed '
@@ -192,6 +192,14 @@
         if not args.show_status:
             extraoptions += ' -q'
 
+        scp_port = ''
+        ssh_port = ''
+        if not args.port:
+            raise DevtoolError("If you specify -P/--port then you must provide the port to be used to connect to the target")
+        else:
+            scp_port = "-P %s" % args.port
+            ssh_port = "-p %s" % args.port
+
         # In order to delete previously deployed files and have the manifest file on
         # the target, we write out a shell script and then copy it to the target
         # so we can then run it (piping tar output to it).
@@ -213,7 +221,7 @@
                 for fpath, fsize in filelist:
                     f.write('%s %d\n' % (fpath, fsize))
             # Copy them to the target
-            ret = subprocess.call("scp %s %s/* %s:%s" % (extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
+            ret = subprocess.call("scp %s %s %s/* %s:%s" % (scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
             if ret != 0:
                 raise DevtoolError('Failed to copy script to %s - rerun with -s to '
                                 'get a complete error message' % args.target)
@@ -221,7 +229,7 @@
             shutil.rmtree(tmpdir)
 
         # Now run the script
-        ret = exec_fakeroot(rd, 'tar cf - . | ssh %s %s \'sh %s %s %s %s\'' % (extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
+        ret = exec_fakeroot(rd, 'tar cf - . | ssh  %s %s %s \'sh %s %s %s %s\'' % (ssh_port, extraoptions, args.target, tmpscript, args.recipename, destdir, tmpfilelist), cwd=recipe_outdir, shell=True)
         if ret != 0:
             raise DevtoolError('Deploy failed - rerun with -s to get a complete '
                             'error message')
@@ -251,6 +259,14 @@
     if not args.show_status:
         extraoptions += ' -q'
 
+    scp_port = ''
+    ssh_port = ''
+    if not args.port:
+        raise DevtoolError("If you specify -P/--port then you must provide the port to be used to connect to the target")
+    else:
+        scp_port = "-P %s" % args.port
+        ssh_port = "-p %s" % args.port
+
     args.target = args.target.split(':')[0]
 
     tmpdir = tempfile.mkdtemp(prefix='devtool')
@@ -261,7 +277,7 @@
         with open(os.path.join(tmpdir, os.path.basename(tmpscript)), 'w') as f:
             f.write(shellscript)
         # Copy it to the target
-        ret = subprocess.call("scp %s %s/* %s:%s" % (extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
+        ret = subprocess.call("scp %s %s %s/* %s:%s" % (scp_port, extraoptions, tmpdir, args.target, os.path.dirname(tmpscript)), shell=True)
         if ret != 0:
             raise DevtoolError('Failed to copy script to %s - rerun with -s to '
                                 'get a complete error message' % args.target)
@@ -269,7 +285,7 @@
         shutil.rmtree(tmpdir)
 
     # Now run the script
-    ret = subprocess.call('ssh %s %s \'sh %s %s\'' % (extraoptions, args.target, tmpscript, args.recipename), shell=True)
+    ret = subprocess.call('ssh %s %s %s \'sh %s %s\'' % (ssh_port, extraoptions, args.target, tmpscript, args.recipename), shell=True)
     if ret != 0:
         raise DevtoolError('Undeploy failed - rerun with -s to get a complete '
                            'error message')
@@ -292,6 +308,7 @@
     parser_deploy.add_argument('-n', '--dry-run', help='List files to be deployed only', action='store_true')
     parser_deploy.add_argument('-p', '--no-preserve', help='Do not preserve existing files', action='store_true')
     parser_deploy.add_argument('--no-check-space', help='Do not check for available space before deploying', action='store_true')
+    parser_deploy.add_argument('-P', '--port', default='22', help='Port to use for connection to the target')
     parser_deploy.set_defaults(func=deploy)
 
     parser_undeploy = subparsers.add_parser('undeploy-target',
@@ -304,4 +321,5 @@
     parser_undeploy.add_argument('-s', '--show-status', help='Show progress/status output', action='store_true')
     parser_undeploy.add_argument('-a', '--all', help='Undeploy all recipes deployed on the target', action='store_true')
     parser_undeploy.add_argument('-n', '--dry-run', help='List files to be undeployed only', action='store_true')
+    parser_undeploy.add_argument('-P', '--port', default='22', help='Port to use for connection to the target')
     parser_undeploy.set_defaults(func=undeploy)
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/package.py b/import-layers/yocto-poky/scripts/lib/devtool/package.py
index afb5809..af9e8f1 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/package.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/package.py
@@ -28,15 +28,13 @@
     """Entry point for the devtool 'package' subcommand"""
     check_workspace_recipe(workspace, args.recipename)
 
-    tinfoil = setup_tinfoil(basepath=basepath)
+    tinfoil = setup_tinfoil(basepath=basepath, config_only=True)
     try:
-        tinfoil.prepare(config_only=True)
-
         image_pkgtype = config.get('Package', 'image_pkgtype', '')
         if not image_pkgtype:
-            image_pkgtype = tinfoil.config_data.getVar('IMAGE_PKGTYPE', True)
+            image_pkgtype = tinfoil.config_data.getVar('IMAGE_PKGTYPE')
 
-        deploy_dir_pkg = tinfoil.config_data.getVar('DEPLOY_DIR_%s' % image_pkgtype.upper(), True)
+        deploy_dir_pkg = tinfoil.config_data.getVar('DEPLOY_DIR_%s' % image_pkgtype.upper())
     finally:
         tinfoil.shutdown()
 
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/runqemu.py b/import-layers/yocto-poky/scripts/lib/devtool/runqemu.py
index ae25cee..e26cf28 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/runqemu.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/runqemu.py
@@ -31,8 +31,10 @@
 
     tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
     try:
-        machine = tinfoil.config_data.getVar('MACHINE', True)
-        bindir_native = tinfoil.config_data.getVar('STAGING_BINDIR_NATIVE', True)
+        machine = tinfoil.config_data.getVar('MACHINE')
+        bindir_native = os.path.join(tinfoil.config_data.getVar('STAGING_DIR'),
+                                     tinfoil.config_data.getVar('BUILD_ARCH'),
+                                     tinfoil.config_data.getVar('bindir_native').lstrip(os.path.sep))
     finally:
         tinfoil.shutdown()
 
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/sdk.py b/import-layers/yocto-poky/scripts/lib/devtool/sdk.py
index 922277b..e8bf0ad 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/sdk.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/sdk.py
@@ -132,9 +132,9 @@
     # Grab variable values
     tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
     try:
-        stamps_dir = tinfoil.config_data.getVar('STAMPS_DIR', True)
-        sstate_mirrors = tinfoil.config_data.getVar('SSTATE_MIRRORS', True)
-        site_conf_version = tinfoil.config_data.getVar('SITE_CONF_VERSION', True)
+        stamps_dir = tinfoil.config_data.getVar('STAMPS_DIR')
+        sstate_mirrors = tinfoil.config_data.getVar('SSTATE_MIRRORS')
+        site_conf_version = tinfoil.config_data.getVar('SITE_CONF_VERSION')
     finally:
         tinfoil.shutdown()
 
@@ -273,7 +273,7 @@
             rd = parse_recipe(config, tinfoil, recipe, True)
             if not rd:
                 return 1
-            stampprefixes[recipe] = '%s.%s' % (rd.getVar('STAMP', True), tasks[0])
+            stampprefixes[recipe] = '%s.%s' % (rd.getVar('STAMP'), tasks[0])
             if checkstamp(recipe):
                 logger.info('%s is already installed' % recipe)
             else:
@@ -306,6 +306,12 @@
         if failed:
             return 2
 
+        try:
+            exec_build_env_command(config.init_path, basepath, 'bitbake build-sysroots', watch=True)
+        except bb.process.ExecutionError as e:
+            raise DevtoolError('Failed to bitbake build-sysroots:\n%s' % (str(e)))
+
+
 def register_commands(subparsers, context):
     """Register devtool subcommands from the sdk plugin"""
     if context.fixed_setup:
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/search.py b/import-layers/yocto-poky/scripts/lib/devtool/search.py
index b44bed7..054985b 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/search.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/search.py
@@ -31,7 +31,7 @@
 
     tinfoil = setup_tinfoil(config_only=False, basepath=basepath)
     try:
-        pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR', True)
+        pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
         defsummary = tinfoil.config_data.getVar('SUMMARY', False) or ''
 
         keyword_rc = re.compile(args.keyword)
@@ -70,7 +70,7 @@
 
             if match:
                 rd = parse_recipe(config, tinfoil, fn, True)
-                summary = rd.getVar('SUMMARY', True)
+                summary = rd.getVar('SUMMARY')
                 if summary == rd.expand(defsummary):
                     summary = ''
                 print("%s  %s" % (fn.ljust(20), summary))
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/standard.py b/import-layers/yocto-poky/scripts/lib/devtool/standard.py
index 4eff6f8..5ff1e23 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/standard.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/standard.py
@@ -150,25 +150,32 @@
         extracmdopts += ' --src-subdir "%s"' % args.src_subdir
     if args.autorev:
         extracmdopts += ' -a'
+    if args.fetch_dev:
+        extracmdopts += ' --fetch-dev'
 
     tempdir = tempfile.mkdtemp(prefix='devtool')
     try:
+        builtnpm = False
         while True:
             try:
-                stdout, _ = exec_build_env_command(config.init_path, basepath, 'recipetool --color=%s create -o %s "%s" %s' % (color, tempdir, source, extracmdopts))
+                stdout, _ = exec_build_env_command(config.init_path, basepath, 'recipetool --color=%s create --devtool -o %s \'%s\' %s' % (color, tempdir, source, extracmdopts), watch=True)
             except bb.process.ExecutionError as e:
                 if e.exitcode == 14:
+                    if builtnpm:
+                        raise DevtoolError('Re-running recipetool still failed to find npm')
                     # FIXME this is a horrible hack that is unfortunately
                     # necessary due to the fact that we can't run bitbake from
                     # inside recipetool since recipetool keeps tinfoil active
                     # with references to it throughout the code, so we have
                     # to exit out and come back here to do it.
-                    ensure_npm(config, basepath, args.fixed_setup)
+                    ensure_npm(config, basepath, args.fixed_setup, check_exists=False)
+                    logger.info('Re-running recipe creation process after building nodejs')
+                    builtnpm = True
                     continue
                 elif e.exitcode == 15:
                     raise DevtoolError('Could not auto-determine recipe name, please specify it on the command line')
                 else:
-                    raise DevtoolError('Command \'%s\' failed:\n%s' % (e.command, e.stdout))
+                    raise DevtoolError('Command \'%s\' failed' % e.command)
             break
 
         recipes = glob.glob(os.path.join(tempdir, '*.bb'))
@@ -223,8 +230,17 @@
 
     tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
     try:
-        rd = oe.recipeutils.parse_recipe(tinfoil.cooker, recipefile, None)
+        try:
+            rd = tinfoil.parse_recipe_file(recipefile, False)
+        except Exception as e:
+            logger.error(str(e))
+            rd = None
         if not rd:
+            # Parsing failed. We just created this recipe and we shouldn't
+            # leave it in the workdir or it'll prevent bitbake from starting
+            movefn = '%s.parsefailed' % recipefile
+            logger.error('Parsing newly created recipe failed, moving recipe to %s for reference. If this looks to be caused by the recipe itself, please report this error.' % movefn)
+            shutil.move(recipefile, movefn)
             return 1
 
         if args.fetchuri and not args.no_git:
@@ -302,7 +318,7 @@
         raise DevtoolError("The %s recipe is a meta-recipe, and therefore is "
                            "not supported by this tool" % pn, 4)
 
-    if bb.data.inherits_class('externalsrc', d) and d.getVar('EXTERNALSRC', True):
+    if bb.data.inherits_class('externalsrc', d) and d.getVar('EXTERNALSRC'):
         # Not an incompatibility error per se, so we don't pass the error code
         raise DevtoolError("externalsrc is currently enabled for the %s "
                            "recipe. This prevents the normal do_patch task "
@@ -331,10 +347,11 @@
         cmd.append('-r')
     out, _ = bb.process.run(cmd, cwd=repodir)
     ret = {}
-    for line in out.split('\0'):
-        if line:
-            split = line.split(None, 4)
-            ret[split[3]] = split[0:3]
+    if out:
+        for line in out.split('\0'):
+            if line:
+                split = line.split(None, 4)
+                ret[split[3]] = split[0:3]
     return ret
 
 def _git_exclude_path(srctree, path):
@@ -376,7 +393,7 @@
             return 1
 
         srctree = os.path.abspath(args.srctree)
-        initial_rev = _extract_source(srctree, args.keep_temp, args.branch, False, rd)
+        initial_rev = _extract_source(srctree, args.keep_temp, args.branch, False, rd, tinfoil)
         logger.info('Source tree extracted to %s' % srctree)
 
         if initial_rev:
@@ -400,7 +417,7 @@
             return 1
 
         srctree = os.path.abspath(args.srctree)
-        initial_rev = _extract_source(srctree, args.keep_temp, args.branch, True, rd)
+        initial_rev = _extract_source(srctree, args.keep_temp, args.branch, True, rd, tinfoil)
         logger.info('Source tree %s synchronized' % srctree)
 
         if initial_rev:
@@ -410,70 +427,6 @@
     finally:
         tinfoil.shutdown()
 
-class BbTaskExecutor(object):
-    """Class for executing bitbake tasks for a recipe
-
-    FIXME: This is very awkward. Unfortunately it's not currently easy to
-    properly execute tasks outside of bitbake itself, until then this has to
-    suffice if we are to handle e.g. linux-yocto's extra tasks
-    """
-
-    def __init__(self, rdata):
-        self.rdata = rdata
-        self.executed = []
-
-    def exec_func(self, func, report):
-        """Run bitbake task function"""
-        if not func in self.executed:
-            deps = self.rdata.getVarFlag(func, 'deps', False)
-            if deps:
-                for taskdepfunc in deps:
-                    self.exec_func(taskdepfunc, True)
-            if report:
-                logger.info('Executing %s...' % func)
-            fn = self.rdata.getVar('FILE', True)
-            localdata = bb.build._task_data(fn, func, self.rdata)
-            try:
-                bb.build.exec_func(func, localdata)
-            except bb.build.FuncFailed as e:
-                raise DevtoolError(str(e))
-            self.executed.append(func)
-
-
-class PatchTaskExecutor(BbTaskExecutor):
-    def __init__(self, rdata):
-        import oe.patch
-        self.check_git = False
-        self.useroptions = []
-        oe.patch.GitApplyTree.gitCommandUserOptions(self.useroptions, d=rdata)
-        super(PatchTaskExecutor, self).__init__(rdata)
-
-    def exec_func(self, func, report):
-        from oe.patch import GitApplyTree
-        srcsubdir = self.rdata.getVar('S', True)
-        haspatches = False
-        if func == 'do_patch':
-            patchdir = os.path.join(srcsubdir, 'patches')
-            if os.path.exists(patchdir):
-                if os.listdir(patchdir):
-                    haspatches = True
-                else:
-                    os.rmdir(patchdir)
-
-        super(PatchTaskExecutor, self).exec_func(func, report)
-        if self.check_git and os.path.exists(srcsubdir):
-            if func == 'do_patch':
-                if os.path.exists(patchdir):
-                    shutil.rmtree(patchdir)
-                    if haspatches:
-                        stdout, _ = bb.process.run('git status --porcelain patches', cwd=srcsubdir)
-                        if stdout:
-                            bb.process.run('git checkout patches', cwd=srcsubdir)
-
-            stdout, _ = bb.process.run('git status --porcelain', cwd=srcsubdir)
-            if stdout:
-                bb.process.run('git add .; git %s commit -a -m "Committing changes from %s\n\n%s"' % (' '.join(self.useroptions), func, GitApplyTree.ignore_commit_prefix + ' - from %s' % func), cwd=srcsubdir)
-
 
 def _prep_extract_operation(config, basepath, recipename, tinfoil=None):
     """HACK: Ugly workaround for making sure that requirements are met when
@@ -497,22 +450,11 @@
     return tinfoil
 
 
-def _extract_source(srctree, keep_temp, devbranch, sync, d):
+def _extract_source(srctree, keep_temp, devbranch, sync, d, tinfoil):
     """Extract sources of a recipe"""
-    import bb.event
     import oe.recipeutils
 
-    def eventfilter(name, handler, event, d):
-        """Bitbake event filter for devtool extract operation"""
-        if name == 'base_eventhandler':
-            return True
-        else:
-            return False
-
-    if hasattr(bb.event, 'set_eventfilter'):
-        bb.event.set_eventfilter(eventfilter)
-
-    pn = d.getVar('PN', True)
+    pn = d.getVar('PN')
 
     _check_compatible_recipe(pn, d)
 
@@ -537,45 +479,92 @@
         bb.utils.mkdirhier(srctree)
         os.rmdir(srctree)
 
-    # We don't want notes to be printed, they are too verbose
-    origlevel = bb.logger.getEffectiveLevel()
-    if logger.getEffectiveLevel() > logging.DEBUG:
-        bb.logger.setLevel(logging.WARNING)
-
     initial_rev = None
-    tempdir = tempfile.mkdtemp(prefix='devtool')
+    # We need to redirect WORKDIR, STAMPS_DIR etc. under a temporary
+    # directory so that:
+    # (a) we pick up all files that get unpacked to the WORKDIR, and
+    # (b) we don't disturb the existing build
+    # However, with recipe-specific sysroots the sysroots for the recipe
+    # will be prepared under WORKDIR, and if we used the system temporary
+    # directory (i.e. usually /tmp) as used by mkdtemp by default, then
+    # our attempts to hardlink files into the recipe-specific sysroots
+    # will fail on systems where /tmp is a different filesystem, and it
+    # would have to fall back to copying the files which is a waste of
+    # time. Put the temp directory under the WORKDIR to prevent that from
+    # being a problem.
+    tempbasedir = d.getVar('WORKDIR')
+    bb.utils.mkdirhier(tempbasedir)
+    tempdir = tempfile.mkdtemp(prefix='devtooltmp-', dir=tempbasedir)
     try:
+        tinfoil.logger.setLevel(logging.WARNING)
+
         crd = d.createCopy()
         # Make a subdir so we guard against WORKDIR==S
         workdir = os.path.join(tempdir, 'workdir')
         crd.setVar('WORKDIR', workdir)
-        crd.setVar('T', os.path.join(tempdir, 'temp'))
-        if not crd.getVar('S', True).startswith(workdir):
+        if not crd.getVar('S').startswith(workdir):
             # Usually a shared workdir recipe (kernel, gcc)
             # Try to set a reasonable default
             if bb.data.inherits_class('kernel', d):
                 crd.setVar('S', '${WORKDIR}/source')
             else:
-                crd.setVar('S', '${WORKDIR}/%s' % os.path.basename(d.getVar('S', True)))
+                crd.setVar('S', '${WORKDIR}/%s' % os.path.basename(d.getVar('S')))
         if bb.data.inherits_class('kernel', d):
             # We don't want to move the source to STAGING_KERNEL_DIR here
             crd.setVar('STAGING_KERNEL_DIR', '${S}')
 
-        task_executor = PatchTaskExecutor(crd)
+        is_kernel_yocto = bb.data.inherits_class('kernel-yocto', d)
+        if not is_kernel_yocto:
+            crd.setVar('PATCHTOOL', 'git')
+            crd.setVar('PATCH_COMMIT_FUNCTIONS', '1')
 
-        crd.setVar('EXTERNALSRC_forcevariable', '')
+        # Apply our changes to the datastore to the server's datastore
+        for key in crd.localkeys():
+            tinfoil.config_data.setVar('%s_pn-%s' % (key, pn), crd.getVar(key, False))
 
-        logger.info('Fetching %s...' % pn)
-        task_executor.exec_func('do_fetch', False)
-        logger.info('Unpacking...')
-        task_executor.exec_func('do_unpack', False)
+        tinfoil.config_data.setVar('STAMPS_DIR', os.path.join(tempdir, 'stamps'))
+        tinfoil.config_data.setVar('T', os.path.join(tempdir, 'temp'))
+        tinfoil.config_data.setVar('BUILDCFG_FUNCS', '')
+        tinfoil.config_data.setVar('BUILDCFG_HEADER', '')
+        tinfoil.config_data.setVar('BB_HASH_IGNORE_MISMATCH', '1')
+
+        tinfoil.set_event_mask(['bb.event.BuildStarted',
+                                'bb.event.BuildCompleted',
+                                'logging.LogRecord',
+                                'bb.command.CommandCompleted',
+                                'bb.command.CommandFailed',
+                                'bb.build.TaskStarted',
+                                'bb.build.TaskSucceeded',
+                                'bb.build.TaskFailed',
+                                'bb.build.TaskFailedSilent'])
+
+        def runtask(target, task):
+            if tinfoil.build_file(target, task):
+                while True:
+                    event = tinfoil.wait_event(0.25)
+                    if event:
+                        if isinstance(event, bb.command.CommandCompleted):
+                            break
+                        elif isinstance(event, bb.command.CommandFailed):
+                            raise DevtoolError('Task do_%s failed: %s' % (task, event.error))
+                        elif isinstance(event, bb.build.TaskFailed):
+                            raise DevtoolError('Task do_%s failed' % task)
+                        elif isinstance(event, bb.build.TaskStarted):
+                            logger.info('Executing %s...' % event._task)
+                        elif isinstance(event, logging.LogRecord):
+                            if event.levelno <= logging.INFO:
+                                continue
+                            logger.handle(event)
+
+        # we need virtual:native:/path/to/recipe if it's a BBCLASSEXTEND
+        fn = tinfoil.get_recipe_file(pn)
+        runtask(fn, 'unpack')
+
         if bb.data.inherits_class('kernel-yocto', d):
             # Extra step for kernel to populate the source directory
-            logger.info('Doing kernel checkout...')
-            task_executor.exec_func('do_kernel_checkout', False)
-        srcsubdir = crd.getVar('S', True)
+            runtask(fn, 'kernel_checkout')
 
-        task_executor.check_git = True
+        srcsubdir = crd.getVar('S')
 
         # Move local source files into separate subdir
         recipe_patches = [os.path.basename(patch) for patch in
@@ -605,7 +594,7 @@
                          os.path.basename(fname) not in recipe_patches]
             # Force separate S so that patch files can be left out from srctree
             srcsubdir = tempfile.mkdtemp(dir=workdir)
-            crd.setVar('S', srcsubdir)
+            tinfoil.config_data.setVar('S_task-patch', srcsubdir)
             # Move source files to S
             for path in src_files:
                 _move_file(os.path.join(workdir, path),
@@ -623,15 +612,13 @@
                            "doesn't use any source or the correct source "
                            "directory could not be determined" % pn)
 
-        setup_git_repo(srcsubdir, crd.getVar('PV', True), devbranch, d=d)
+        setup_git_repo(srcsubdir, crd.getVar('PV'), devbranch, d=d)
 
         (stdout, _) = bb.process.run('git rev-parse HEAD', cwd=srcsubdir)
         initial_rev = stdout.rstrip()
 
-        crd.setVar('PATCHTOOL', 'git')
-
         logger.info('Patching...')
-        task_executor.exec_func('do_patch', False)
+        runtask(fn, 'patch')
 
         bb.process.run('git tag -f devtool-patched', cwd=srcsubdir)
 
@@ -639,8 +626,8 @@
         if bb.data.inherits_class('kernel-yocto', d):
             # Store generate and store kernel config
             logger.info('Generating kernel config')
-            task_executor.exec_func('do_configure', False)
-            kconfig = os.path.join(crd.getVar('B', True), '.config')
+            runtask(fn, 'configure')
+            kconfig = os.path.join(crd.getVar('B'), '.config')
 
 
         tempdir_localdir = os.path.join(tempdir, 'oe-local-files')
@@ -672,13 +659,34 @@
 
             shutil.move(srcsubdir, srctree)
 
+            if os.path.abspath(d.getVar('S')) == os.path.abspath(d.getVar('WORKDIR')):
+                # If recipe extracts to ${WORKDIR}, symlink the files into the srctree
+                # (otherwise the recipe won't build as expected)
+                local_files_dir = os.path.join(srctree, 'oe-local-files')
+                addfiles = []
+                for root, _, files in os.walk(local_files_dir):
+                    relpth = os.path.relpath(root, local_files_dir)
+                    if relpth != '.':
+                        bb.utils.mkdirhier(os.path.join(srctree, relpth))
+                    for fn in files:
+                        if fn == '.gitignore':
+                            continue
+                        destpth = os.path.join(srctree, relpth, fn)
+                        if os.path.exists(destpth):
+                            os.unlink(destpth)
+                        os.symlink('oe-local-files/%s' % fn, destpth)
+                        addfiles.append(os.path.join(relpth, fn))
+                if addfiles:
+                    bb.process.run('git add %s' % ' '.join(addfiles), cwd=srctree)
+                useroptions = []
+                oe.patch.GitApplyTree.gitCommandUserOptions(useroptions, d=d)
+                bb.process.run('git %s commit -a -m "Committing local file symlinks\n\n%s"' % (' '.join(useroptions), oe.patch.GitApplyTree.ignore_commit_prefix), cwd=srctree)
+
         if kconfig:
             logger.info('Copying kernel config to srctree')
             shutil.copy2(kconfig, srctree)
 
     finally:
-        bb.logger.setLevel(origlevel)
-
         if keep_temp:
             logger.info('Preserving temporary directory %s' % tempdir)
         else:
@@ -748,7 +756,7 @@
         if not rd:
             return 1
 
-        pn = rd.getVar('PN', True)
+        pn = rd.getVar('PN')
         if pn != args.recipename:
             logger.info('Mapping %s to %s' % (args.recipename, pn))
         if pn in workspace:
@@ -769,8 +777,10 @@
             if not tinfoil:
                 # Error already shown
                 return 1
+            # We need to re-parse because tinfoil may have been re-initialised
+            rd = parse_recipe(config, tinfoil, args.recipename, True)
 
-        recipefile = rd.getVar('FILE', True)
+        recipefile = rd.getVar('FILE')
         appendfile = recipe_to_append(recipefile, config, args.wildcard)
         if os.path.exists(appendfile):
             raise DevtoolError("Another variant of recipe %s is already in your "
@@ -783,7 +793,7 @@
         initial_rev = None
         commits = []
         if not args.no_extract:
-            initial_rev = _extract_source(srctree, False, args.branch, False, rd)
+            initial_rev = _extract_source(srctree, args.keep_temp, args.branch, False, rd, tinfoil)
             if not initial_rev:
                 return 1
             logger.info('Source tree extracted to %s' % srctree)
@@ -807,8 +817,8 @@
                     initial_rev = stdout.rstrip()
 
         # Check that recipe isn't using a shared workdir
-        s = os.path.abspath(rd.getVar('S', True))
-        workdir = os.path.abspath(rd.getVar('WORKDIR', True))
+        s = os.path.abspath(rd.getVar('S'))
+        workdir = os.path.abspath(rd.getVar('WORKDIR'))
         if s.startswith(workdir) and s != workdir and os.path.dirname(s) != workdir:
             # Handle if S is set to a subdirectory of the source
             srcsubdir = os.path.relpath(s, workdir).split(os.sep, 1)[1]
@@ -851,6 +861,199 @@
 
     return 0
 
+
+def rename(args, config, basepath, workspace):
+    """Entry point for the devtool 'rename' subcommand"""
+    import bb
+    import oe.recipeutils
+
+    check_workspace_recipe(workspace, args.recipename)
+
+    if not (args.newname or args.version):
+        raise DevtoolError('You must specify a new name, a version with -V/--version, or both')
+
+    recipefile = workspace[args.recipename]['recipefile']
+    if not recipefile:
+        raise DevtoolError('devtool rename can only be used where the recipe file itself is in the workspace (e.g. after devtool add)')
+
+    if args.newname and args.newname != args.recipename:
+        reason = oe.recipeutils.validate_pn(args.newname)
+        if reason:
+            raise DevtoolError(reason)
+        newname = args.newname
+    else:
+        newname = args.recipename
+
+    append = workspace[args.recipename]['bbappend']
+    appendfn = os.path.splitext(os.path.basename(append))[0]
+    splitfn = appendfn.split('_')
+    if len(splitfn) > 1:
+        origfnver = appendfn.split('_')[1]
+    else:
+        origfnver = ''
+
+    recipefilemd5 = None
+    tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
+    try:
+        rd = parse_recipe(config, tinfoil, args.recipename, True)
+        if not rd:
+            return 1
+
+        bp = rd.getVar('BP')
+        bpn = rd.getVar('BPN')
+        if newname != args.recipename:
+            localdata = rd.createCopy()
+            localdata.setVar('PN', newname)
+            newbpn = localdata.getVar('BPN')
+        else:
+            newbpn = bpn
+        s = rd.getVar('S', False)
+        src_uri = rd.getVar('SRC_URI', False)
+        pv = rd.getVar('PV')
+
+        # Correct variable values that refer to the upstream source - these
+        # values must stay the same, so if the name/version are changing then
+        # we need to fix them up
+        new_s = s
+        new_src_uri = src_uri
+        if newbpn != bpn:
+            # ${PN} here is technically almost always incorrect, but people do use it
+            new_s = new_s.replace('${BPN}', bpn)
+            new_s = new_s.replace('${PN}', bpn)
+            new_s = new_s.replace('${BP}', '%s-${PV}' % bpn)
+            new_src_uri = new_src_uri.replace('${BPN}', bpn)
+            new_src_uri = new_src_uri.replace('${PN}', bpn)
+            new_src_uri = new_src_uri.replace('${BP}', '%s-${PV}' % bpn)
+        if args.version and origfnver == pv:
+            new_s = new_s.replace('${PV}', pv)
+            new_s = new_s.replace('${BP}', '${BPN}-%s' % pv)
+            new_src_uri = new_src_uri.replace('${PV}', pv)
+            new_src_uri = new_src_uri.replace('${BP}', '${BPN}-%s' % pv)
+        patchfields = {}
+        if new_s != s:
+            patchfields['S'] = new_s
+        if new_src_uri != src_uri:
+            patchfields['SRC_URI'] = new_src_uri
+        if patchfields:
+            recipefilemd5 = bb.utils.md5_file(recipefile)
+            oe.recipeutils.patch_recipe(rd, recipefile, patchfields)
+            newrecipefilemd5 = bb.utils.md5_file(recipefile)
+    finally:
+        tinfoil.shutdown()
+
+    if args.version:
+        newver = args.version
+    else:
+        newver = origfnver
+
+    if newver:
+        newappend = '%s_%s.bbappend' % (newname, newver)
+        newfile =  '%s_%s.bb' % (newname, newver)
+    else:
+        newappend = '%s.bbappend' % newname
+        newfile = '%s.bb' % newname
+
+    oldrecipedir = os.path.dirname(recipefile)
+    newrecipedir = os.path.join(config.workspace_path, 'recipes', newname)
+    if oldrecipedir != newrecipedir:
+        bb.utils.mkdirhier(newrecipedir)
+
+    newappend = os.path.join(os.path.dirname(append), newappend)
+    newfile = os.path.join(newrecipedir, newfile)
+
+    # Rename bbappend
+    logger.info('Renaming %s to %s' % (append, newappend))
+    os.rename(append, newappend)
+    # Rename recipe file
+    logger.info('Renaming %s to %s' % (recipefile, newfile))
+    os.rename(recipefile, newfile)
+
+    # Rename source tree if it's the default path
+    appendmd5 = None
+    if not args.no_srctree:
+        srctree = workspace[args.recipename]['srctree']
+        if os.path.abspath(srctree) == os.path.join(config.workspace_path, 'sources', args.recipename):
+            newsrctree = os.path.join(config.workspace_path, 'sources', newname)
+            logger.info('Renaming %s to %s' % (srctree, newsrctree))
+            shutil.move(srctree, newsrctree)
+            # Correct any references (basically EXTERNALSRC*) in the .bbappend
+            appendmd5 = bb.utils.md5_file(newappend)
+            appendlines = []
+            with open(newappend, 'r') as f:
+                for line in f:
+                    appendlines.append(line)
+            with open(newappend, 'w') as f:
+                for line in appendlines:
+                    if srctree in line:
+                        line = line.replace(srctree, newsrctree)
+                    f.write(line)
+            newappendmd5 = bb.utils.md5_file(newappend)
+
+    bpndir = None
+    newbpndir = None
+    if newbpn != bpn:
+        bpndir = os.path.join(oldrecipedir, bpn)
+        if os.path.exists(bpndir):
+            newbpndir = os.path.join(newrecipedir, newbpn)
+            logger.info('Renaming %s to %s' % (bpndir, newbpndir))
+            shutil.move(bpndir, newbpndir)
+
+    bpdir = None
+    newbpdir = None
+    if newver != origfnver or newbpn != bpn:
+        bpdir = os.path.join(oldrecipedir, bp)
+        if os.path.exists(bpdir):
+            newbpdir = os.path.join(newrecipedir, '%s-%s' % (newbpn, newver))
+            logger.info('Renaming %s to %s' % (bpdir, newbpdir))
+            shutil.move(bpdir, newbpdir)
+
+    if oldrecipedir != newrecipedir:
+        # Move any stray files and delete the old recipe directory
+        for entry in os.listdir(oldrecipedir):
+            oldpath = os.path.join(oldrecipedir, entry)
+            newpath = os.path.join(newrecipedir, entry)
+            logger.info('Renaming %s to %s' % (oldpath, newpath))
+            shutil.move(oldpath, newpath)
+        os.rmdir(oldrecipedir)
+
+    # Now take care of entries in .devtool_md5
+    md5entries = []
+    with open(os.path.join(config.workspace_path, '.devtool_md5'), 'r') as f:
+        for line in f:
+            md5entries.append(line)
+
+    if bpndir and newbpndir:
+        relbpndir = os.path.relpath(bpndir, config.workspace_path) + '/'
+    else:
+        relbpndir = None
+    if bpdir and newbpdir:
+        relbpdir = os.path.relpath(bpdir, config.workspace_path) + '/'
+    else:
+        relbpdir = None
+
+    with open(os.path.join(config.workspace_path, '.devtool_md5'), 'w') as f:
+        for entry in md5entries:
+            splitentry = entry.rstrip().split('|')
+            if len(splitentry) > 2:
+                if splitentry[0] == args.recipename:
+                    splitentry[0] = newname
+                    if splitentry[1] == os.path.relpath(append, config.workspace_path):
+                        splitentry[1] = os.path.relpath(newappend, config.workspace_path)
+                        if appendmd5 and splitentry[2] == appendmd5:
+                            splitentry[2] = newappendmd5
+                    elif splitentry[1] == os.path.relpath(recipefile, config.workspace_path):
+                        splitentry[1] = os.path.relpath(newfile, config.workspace_path)
+                        if recipefilemd5 and splitentry[2] == recipefilemd5:
+                            splitentry[2] = newrecipefilemd5
+                    elif relbpndir and splitentry[1].startswith(relbpndir):
+                        splitentry[1] = os.path.relpath(os.path.join(newbpndir, splitentry[1][len(relbpndir):]), config.workspace_path)
+                    elif relbpdir and splitentry[1].startswith(relbpdir):
+                        splitentry[1] = os.path.relpath(os.path.join(newbpdir, splitentry[1][len(relbpdir):]), config.workspace_path)
+                    entry = '|'.join(splitentry) + '\n'
+            f.write(entry)
+    return 0
+
+
 def _get_patchset_revs(srctree, recipe_path, initial_rev=None):
     """Get initial and update rev of a recipe. These are the start point of the
     whole patchset and start point for the patches to be re-generated/updated.
@@ -909,6 +1112,15 @@
                 break
     return entries, remaining
 
+def _replace_srcuri_entry(srcuri, filename, newentry):
+    """Replace entry corresponding to specified file with a new entry"""
+    basename = os.path.basename(filename)
+    for i in range(len(srcuri)):
+        if os.path.basename(srcuri[i].split(';')[0]) == basename:
+            srcuri.pop(i)
+            srcuri.insert(i, newentry)
+            break
+
 def _remove_source_files(append, files, destpath):
     """Unlink existing patch files"""
     for path in files:
@@ -933,7 +1145,7 @@
                     raise
 
 
-def _export_patches(srctree, rd, start_rev, destdir):
+def _export_patches(srctree, rd, start_rev, destdir, changed_revs=None):
     """Export patches from srctree to given location.
        Returns three-tuple of dicts:
          1. updated - patches that already exist in SRCURI
@@ -962,18 +1174,44 @@
         # revision This does assume that people are using unique shortlog
         # values, but they ought to be anyway...
         new_basename = seqpatch_re.match(new_patch).group(2)
-        found = False
+        match_name = None
         for old_patch in existing_patches:
             old_basename = seqpatch_re.match(old_patch).group(2)
-            if new_basename == old_basename:
-                updated[new_patch] = existing_patches.pop(old_patch)
-                found = True
-                # Rename patch files
-                if new_patch != old_patch:
-                    os.rename(os.path.join(destdir, new_patch),
-                              os.path.join(destdir, old_patch))
+            old_basename_splitext = os.path.splitext(old_basename)
+            if old_basename.endswith(('.gz', '.bz2', '.Z')) and old_basename_splitext[0] == new_basename:
+                old_patch_noext = os.path.splitext(old_patch)[0]
+                match_name = old_patch_noext
                 break
-        if not found:
+            elif new_basename == old_basename:
+                match_name = old_patch
+                break
+        if match_name:
+            # Rename patch files
+            if new_patch != match_name:
+                os.rename(os.path.join(destdir, new_patch),
+                          os.path.join(destdir, match_name))
+            # Need to pop it off the list now before checking changed_revs
+            oldpath = existing_patches.pop(old_patch)
+            if changed_revs is not None:
+                # Avoid updating patches that have not actually changed
+                with open(os.path.join(destdir, match_name), 'r') as f:
+                    firstlineitems = f.readline().split()
+                    # Looking for "From <hash>" line
+                    if len(firstlineitems) > 1 and len(firstlineitems[1]) == 40:
+                        if not firstlineitems[1] in changed_revs:
+                            continue
+            # Recompress if necessary
+            if oldpath.endswith(('.gz', '.Z')):
+                bb.process.run(['gzip', match_name], cwd=destdir)
+                if oldpath.endswith('.gz'):
+                    match_name += '.gz'
+                else:
+                    match_name += '.Z'
+            elif oldpath.endswith('.bz2'):
+                bb.process.run(['bzip2', match_name], cwd=destdir)
+                match_name += '.bz2'
+            updated[match_name] = oldpath
+        else:
             added[new_patch] = None
     return (updated, added, existing_patches)
 
@@ -991,7 +1229,7 @@
         stdout, stderr = pipe.communicate()
         if pipe.returncode == 1:
             logger.info("Updating config fragment %s" % outfile)
-            with open(outfile, 'w') as fobj:
+            with open(outfile, 'wb') as fobj:
                 fobj.write(stdout)
         elif pipe.returncode == 0:
             logger.info("Would remove config fragment %s" % outfile)
@@ -1072,8 +1310,8 @@
             elif fname != '.gitignore':
                 added[fname] = None
 
-        workdir = rd.getVar('WORKDIR', True)
-        s = rd.getVar('S', True)
+        workdir = rd.getVar('WORKDIR')
+        s = rd.getVar('S')
         if not s.endswith(os.sep):
             s += os.sep
 
@@ -1095,14 +1333,14 @@
 
 def _determine_files_dir(rd):
     """Determine the appropriate files directory for a recipe"""
-    recipedir = rd.getVar('FILE_DIRNAME', True)
-    for entry in rd.getVar('FILESPATH', True).split(':'):
+    recipedir = rd.getVar('FILE_DIRNAME')
+    for entry in rd.getVar('FILESPATH').split(':'):
         relpth = os.path.relpath(entry, recipedir)
         if not os.sep in relpth:
             # One (or zero) levels below only, so we don't put anything in machine-specific directories
             if os.path.isdir(entry):
                 return entry
-    return os.path.join(recipedir, rd.getVar('BPN', True))
+    return os.path.join(recipedir, rd.getVar('BPN'))
 
 
 def _update_recipe_srcrev(srctree, rd, appendlayerdir, wildcard_version, no_remove):
@@ -1110,7 +1348,7 @@
     import bb
     import oe.recipeutils
 
-    recipefile = rd.getVar('FILE', True)
+    recipefile = rd.getVar('FILE')
     logger.info('Updating SRCREV in recipe %s' % os.path.basename(recipefile))
 
     # Get HEAD revision
@@ -1192,7 +1430,7 @@
     import bb
     import oe.recipeutils
 
-    recipefile = rd.getVar('FILE', True)
+    recipefile = rd.getVar('FILE')
     append = workspace[recipename]['bbappend']
     if not os.path.exists(append):
         raise DevtoolError('unable to find workspace bbappend for recipe %s' %
@@ -1203,6 +1441,10 @@
         raise DevtoolError('Unable to find initial revision - please specify '
                            'it with --initial-rev')
 
+    dl_dir = rd.getVar('DL_DIR')
+    if not dl_dir.endswith('/'):
+        dl_dir += '/'
+
     tempdir = tempfile.mkdtemp(prefix='devtool')
     try:
         local_files_dir = tempfile.mkdtemp(dir=tempdir)
@@ -1220,7 +1462,7 @@
         # Get updated patches from source tree
         patches_dir = tempfile.mkdtemp(dir=tempdir)
         upd_p, new_p, del_p = _export_patches(srctree, rd, update_rev,
-                                              patches_dir)
+                                              patches_dir, changed_revs)
         updatefiles = False
         updaterecipe = False
         destpath = None
@@ -1247,6 +1489,7 @@
                 logger.info('No patches or local source files needed updating')
         else:
             # Update existing files
+            files_dir = _determine_files_dir(rd)
             for basepath, path in upd_f.items():
                 logger.info('Updating file %s' % basepath)
                 if os.path.isabs(basepath):
@@ -1258,18 +1501,19 @@
                 updatefiles = True
             for basepath, path in upd_p.items():
                 patchfn = os.path.join(patches_dir, basepath)
-                if changed_revs is not None:
-                    # Avoid updating patches that have not actually changed
-                    with open(patchfn, 'r') as f:
-                        firstlineitems = f.readline().split()
-                        if len(firstlineitems) > 1 and len(firstlineitems[1]) == 40:
-                            if not firstlineitems[1] in changed_revs:
-                                continue
-                logger.info('Updating patch %s' % basepath)
+                if os.path.dirname(path) + '/' == dl_dir:
+                    # This is a a downloaded patch file - we now need to
+                    # replace the entry in SRC_URI with our local version
+                    logger.info('Replacing remote patch %s with updated local version' % basepath)
+                    path = os.path.join(files_dir, basepath)
+                    _replace_srcuri_entry(srcuri, basepath, 'file://%s' % basepath)
+                    updaterecipe = True
+                else:
+                    logger.info('Updating patch %s' % basepath)
+                logger.debug('Moving new patch %s to %s' % (patchfn, path))
                 _move_file(patchfn, path)
                 updatefiles = True
             # Add any new files
-            files_dir = _determine_files_dir(rd)
             for basepath, path in new_f.items():
                 logger.info('Adding new file %s' % basepath)
                 _move_file(os.path.join(local_files_dir, basepath),
@@ -1356,7 +1600,7 @@
         updated = _update_recipe(args.recipename, workspace, rd, args.mode, args.append, args.wildcard_version, args.no_remove, args.initial_rev)
 
         if updated:
-            rf = rd.getVar('FILE', True)
+            rf = rd.getVar('FILE')
             if rf.startswith(config.workspace_path):
                 logger.warn('Recipe file %s has been updated but is inside the workspace - you will need to move it (and any associated files next to it) out to the desired layer before using "devtool reset" in order to keep any changes' % rf)
     finally:
@@ -1460,7 +1704,7 @@
 
 def _get_layer(layername, d):
     """Determine the base layer path for the specified layer name/path"""
-    layerdirs = d.getVar('BBLAYERS', True).split()
+    layerdirs = d.getVar('BBLAYERS').split()
     layers = {os.path.basename(p): p for p in layerdirs}
     # Provide some shortcuts
     if layername.lower() in ['oe-core', 'openembedded-core']:
@@ -1478,6 +1722,7 @@
 
     check_workspace_recipe(workspace, args.recipename)
 
+    no_clean = False
     tinfoil = setup_tinfoil(basepath=basepath, tracking=True)
     try:
         rd = parse_recipe(config, tinfoil, args.recipename, True)
@@ -1485,7 +1730,7 @@
             return 1
 
         destlayerdir = _get_layer(args.destination, tinfoil.config_data)
-        origlayerdir = oe.recipeutils.find_layerdir(rd.getVar('FILE', True))
+        origlayerdir = oe.recipeutils.find_layerdir(rd.getVar('FILE'))
 
         if not os.path.isdir(destlayerdir):
             raise DevtoolError('Unable to find layer or directory matching "%s"' % args.destination)
@@ -1515,6 +1760,11 @@
             destpath = oe.recipeutils.get_bbfile_path(rd, destlayerdir, origrelpath)
             if not destpath:
                 raise DevtoolError("Unable to determine destination layer path - check that %s specifies an actual layer and %s/conf/layer.conf specifies BBFILES. You may also need to specify a more complete path." % (args.destination, destlayerdir))
+            # Warn if the layer isn't in bblayers.conf (the code to create a bbappend will do this in other cases)
+            layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS').split()]
+            if not os.path.abspath(destlayerdir) in layerdirs:
+                bb.warn('Specified destination layer is not currently enabled in bblayers.conf, so the %s recipe will now be unavailable in your current configuration until you add the layer there' % args.recipename)
+
         elif destlayerdir == origlayerdir:
             # Same layer, update the original recipe
             appendlayerdir = None
@@ -1539,8 +1789,9 @@
         if origlayerdir == config.workspace_path and destpath:
             # Recipe file itself is in the workspace - need to move it and any
             # associated files to the specified layer
+            no_clean = True
             logger.info('Moving recipe file to %s' % destpath)
-            recipedir = os.path.dirname(rd.getVar('FILE', True))
+            recipedir = os.path.dirname(rd.getVar('FILE'))
             for root, _, files in os.walk(recipedir):
                 for fn in files:
                     srcpath = os.path.join(root, fn)
@@ -1553,7 +1804,7 @@
         tinfoil.shutdown()
 
     # Everything else has succeeded, we can now reset
-    _reset([args.recipename], no_clean=False, config=config, basepath=basepath, workspace=workspace)
+    _reset([args.recipename], no_clean=no_clean, config=config, basepath=basepath, workspace=workspace)
 
     return 0
 
@@ -1580,6 +1831,7 @@
     group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
     group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
     parser_add.add_argument('--fetch', '-f', help='Fetch the specified URI and extract it to create the source tree (deprecated - pass as positional argument instead)', metavar='URI')
+    parser_add.add_argument('--fetch-dev', help='For npm, also fetch devDependencies', action="store_true")
     parser_add.add_argument('--version', '-V', help='Version to use within recipe (PV)')
     parser_add.add_argument('--no-git', '-g', help='If fetching source, do not set up source tree as a git repository', action="store_true")
     parser_add.add_argument('--autorev', '-a', help='When fetching from a git repository, set SRCREV in the recipe to a floating revision instead of fixed', action="store_true")
@@ -1601,6 +1853,7 @@
     group.add_argument('--same-dir', '-s', help='Build in same directory as source', action="store_true")
     group.add_argument('--no-same-dir', help='Force build in a separate build directory', action="store_true")
     parser_modify.add_argument('--branch', '-b', default="devtool", help='Name for development branch to checkout (when not using -n/--no-extract) (default "%(default)s")')
+    parser_modify.add_argument('--keep-temp', help='Keep temporary directory (for debugging)', action="store_true")
     parser_modify.set_defaults(func=modify)
 
     parser_extract = subparsers.add_parser('extract', help='Extract the source for an existing recipe',
@@ -1622,6 +1875,15 @@
     parser_sync.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
     parser_sync.set_defaults(func=sync)
 
+    parser_rename = subparsers.add_parser('rename', help='Rename a recipe file in the workspace',
+                                       description='Renames the recipe file for a recipe in the workspace, changing the name or version part or both, ensuring that all references within the workspace are updated at the same time. Only works when the recipe file itself is in the workspace, e.g. after devtool add. Particularly useful when devtool add did not automatically determine the correct name.',
+                                       group='working', order=10)
+    parser_rename.add_argument('recipename', help='Current name of recipe to rename')
+    parser_rename.add_argument('newname', nargs='?', help='New name for recipe (optional, not needed if you only want to change the version)')
+    parser_rename.add_argument('--version', '-V', help='Change the version (NOTE: this does not change the version fetched by the recipe, just the version in the recipe file name)')
+    parser_rename.add_argument('--no-srctree', '-s', action='store_true', help='Do not rename the source tree directory (if the default source tree path has been used) - keeping the old name may be desirable if there are internal/other external references to this path')
+    parser_rename.set_defaults(func=rename)
+
     parser_update_recipe = subparsers.add_parser('update-recipe', help='Apply changes from external source tree to recipe',
                                        description='Applies changes from external source tree to a recipe (updating/adding/removing patches as necessary, or by updating SRCREV). Note that these changes need to have been committed to the git repository in order to be recognised.',
                                        group='working', order=-90)
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/upgrade.py b/import-layers/yocto-poky/scripts/lib/devtool/upgrade.py
index a4239f1..05fb9e5 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/upgrade.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/upgrade.py
@@ -27,6 +27,10 @@
 import scriptutils
 import errno
 import bb
+
+devtool_path = os.path.dirname(os.path.realpath(__file__)) + '/../../../meta/lib'
+sys.path = sys.path + [devtool_path]
+
 import oe.recipeutils
 from devtool import standard
 from devtool import exec_build_env_command, setup_tinfoil, DevtoolError, parse_recipe, use_external_build
@@ -68,7 +72,7 @@
             shutil.rmtree(os.path.join(root,d))
 
 def _recipe_contains(rd, var):
-    rf = rd.getVar('FILE', True)
+    rf = rd.getVar('FILE')
     varfiles = oe.recipeutils.get_var_files(rf, [var], rd)
     for var, fn in varfiles.items():
         if fn and fn.startswith(os.path.dirname(rf) + os.sep):
@@ -117,7 +121,7 @@
     brf = os.path.basename(os.path.splitext(rc)[0]) # rc basename
 
     srctree = os.path.abspath(srctree)
-    pn = d.getVar('PN',True)
+    pn = d.getVar('PN')
     af = os.path.join(appendpath, '%s.bbappend' % brf)
     with open(af, 'w') as f:
         f.write('FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n\n')
@@ -132,7 +136,7 @@
         if rev:
             f.write('# initial_rev: %s\n' % rev)
         if copied:
-            f.write('# original_path: %s\n' % os.path.dirname(d.getVar('FILE', True)))
+            f.write('# original_path: %s\n' % os.path.dirname(d.getVar('FILE')))
             f.write('# original_files: %s\n' % ' '.join(copied))
     return af
 
@@ -154,7 +158,7 @@
     raise DevtoolError(e)
 
 def _get_uri(rd):
-    srcuris = rd.getVar('SRC_URI', True).split()
+    srcuris = rd.getVar('SRC_URI').split()
     if not len(srcuris):
         raise DevtoolError('SRC_URI not found on recipe')
     # Get first non-local entry in SRC_URI - usually by convention it's
@@ -185,7 +189,7 @@
 
     crd = rd.createCopy()
 
-    pv = crd.getVar('PV', True)
+    pv = crd.getVar('PV')
     crd.setVar('PV', newpv)
 
     tmpsrctree = None
@@ -270,15 +274,15 @@
 def _create_new_recipe(newpv, md5, sha256, srcrev, srcbranch, workspace, tinfoil, rd):
     """Creates the new recipe under workspace"""
 
-    bpn = rd.getVar('BPN', True)
+    bpn = rd.getVar('BPN')
     path = os.path.join(workspace, 'recipes', bpn)
     bb.utils.mkdirhier(path)
     copied, _ = oe.recipeutils.copy_recipe_files(rd, path)
 
-    oldpv = rd.getVar('PV', True)
+    oldpv = rd.getVar('PV')
     if not newpv:
         newpv = oldpv
-    origpath = rd.getVar('FILE', True)
+    origpath = rd.getVar('FILE')
     fullpath = _rename_recipe_files(origpath, bpn, oldpv, newpv, path)
     logger.debug('Upgraded %s => %s' % (origpath, fullpath))
 
@@ -320,7 +324,7 @@
         newvalues['SRC_URI[md5sum]'] = md5
         newvalues['SRC_URI[sha256sum]'] = sha256
 
-    rd = oe.recipeutils.parse_recipe(tinfoil.cooker, fullpath, None)
+    rd = tinfoil.parse_recipe_file(fullpath, False)
     oe.recipeutils.patch_recipe(rd, fullpath, newvalues)
 
     return fullpath, copied
@@ -341,7 +345,7 @@
         if not rd:
             return 1
 
-        pn = rd.getVar('PN', True)
+        pn = rd.getVar('PN')
         if pn != args.recipename:
             logger.info('Mapping %s to %s' % (args.recipename, pn))
         if pn in workspace:
@@ -353,17 +357,17 @@
             srctree = standard.get_default_srctree(config, pn)
 
         standard._check_compatible_recipe(pn, rd)
-        old_srcrev = rd.getVar('SRCREV', True)
+        old_srcrev = rd.getVar('SRCREV')
         if old_srcrev == 'INVALID':
             old_srcrev = None
         if old_srcrev and not args.srcrev:
             raise DevtoolError("Recipe specifies a SRCREV value; you must specify a new one when upgrading")
-        if rd.getVar('PV', True) == args.version and old_srcrev == args.srcrev:
+        if rd.getVar('PV') == args.version and old_srcrev == args.srcrev:
             raise DevtoolError("Current and upgrade versions are the same version")
 
         rf = None
         try:
-            rev1 = standard._extract_source(srctree, False, 'devtool-orig', False, rd)
+            rev1 = standard._extract_source(srctree, False, 'devtool-orig', False, rd, tinfoil)
             rev2, md5, sha256 = _extract_new_source(args.version, srctree, args.no_patch,
                                                     args.srcrev, args.branch, args.keep_temp,
                                                     tinfoil, rd)
diff --git a/import-layers/yocto-poky/scripts/lib/devtool/utilcmds.py b/import-layers/yocto-poky/scripts/lib/devtool/utilcmds.py
index b761a80..0437e64 100644
--- a/import-layers/yocto-poky/scripts/lib/devtool/utilcmds.py
+++ b/import-layers/yocto-poky/scripts/lib/devtool/utilcmds.py
@@ -39,7 +39,7 @@
             rd = parse_recipe(config, tinfoil, args.recipename, True)
             if not rd:
                 return 1
-            recipefile = rd.getVar('FILE', True)
+            recipefile = rd.getVar('FILE')
         finally:
             tinfoil.shutdown()
     else:
@@ -62,20 +62,20 @@
         rd = parse_recipe(config, tinfoil, args.recipename, appends=True, filter_workspace=False)
         if not rd:
             return 1
-        b = rd.getVar('B', True)
-        s = rd.getVar('S', True)
+        b = rd.getVar('B')
+        s = rd.getVar('S')
         configurescript = os.path.join(s, 'configure')
         confdisabled = 'noexec' in rd.getVarFlags('do_configure') or 'do_configure' not in (rd.getVar('__BBTASKS', False) or [])
-        configureopts = oe.utils.squashspaces(rd.getVar('CONFIGUREOPTS', True) or '')
-        extra_oeconf = oe.utils.squashspaces(rd.getVar('EXTRA_OECONF', True) or '')
-        extra_oecmake = oe.utils.squashspaces(rd.getVar('EXTRA_OECMAKE', True) or '')
-        do_configure = rd.getVar('do_configure', True) or ''
+        configureopts = oe.utils.squashspaces(rd.getVar('CONFIGUREOPTS') or '')
+        extra_oeconf = oe.utils.squashspaces(rd.getVar('EXTRA_OECONF') or '')
+        extra_oecmake = oe.utils.squashspaces(rd.getVar('EXTRA_OECMAKE') or '')
+        do_configure = rd.getVar('do_configure') or ''
         do_configure_noexpand = rd.getVar('do_configure', False) or ''
         packageconfig = rd.getVarFlags('PACKAGECONFIG') or []
         autotools = bb.data.inherits_class('autotools', rd) and ('oe_runconf' in do_configure or 'autotools_do_configure' in do_configure)
         cmake = bb.data.inherits_class('cmake', rd) and ('cmake_do_configure' in do_configure)
-        cmake_do_configure = rd.getVar('cmake_do_configure', True)
-        pn = rd.getVar('PN', True)
+        cmake_do_configure = rd.getVar('cmake_do_configure')
+        pn = rd.getVar('PN')
     finally:
         tinfoil.shutdown()
 
diff --git a/import-layers/yocto-poky/scripts/lib/recipetool/append.py b/import-layers/yocto-poky/scripts/lib/recipetool/append.py
index 1e0fc1e..69c8bb7 100644
--- a/import-layers/yocto-poky/scripts/lib/recipetool/append.py
+++ b/import-layers/yocto-poky/scripts/lib/recipetool/append.py
@@ -48,7 +48,7 @@
     """Find the recipe installing the specified target path, optionally limited to a select list of packages"""
     import json
 
-    pkgdata_dir = d.getVar('PKGDATA_DIR', True)
+    pkgdata_dir = d.getVar('PKGDATA_DIR')
 
     # The mix between /etc and ${sysconfdir} here may look odd, but it is just
     # being consistent with usage elsewhere
@@ -97,25 +97,12 @@
                             recipes[targetpath].append('!%s' % pn)
     return recipes
 
-def _get_recipe_file(cooker, pn):
-    import oe.recipeutils
-    recipefile = oe.recipeutils.pn_to_recipe(cooker, pn)
-    if not recipefile:
-        skipreasons = oe.recipeutils.get_unavailable_reasons(cooker, pn)
-        if skipreasons:
-            logger.error('\n'.join(skipreasons))
-        else:
-            logger.error("Unable to find any recipe file matching %s" % pn)
-    return recipefile
-
 def _parse_recipe(pn, tinfoil):
-    import oe.recipeutils
-    recipefile = _get_recipe_file(tinfoil.cooker, pn)
-    if not recipefile:
-        # Error already logged
+    try:
+        rd = tinfoil.parse_recipe(pn)
+    except bb.providers.NoProvider as e:
+        logger.error(str(e))
         return None
-    append_files = tinfoil.cooker.collection.get_file_appends(recipefile)
-    rd = oe.recipeutils.parse_recipe(tinfoil.cooker, recipefile, append_files)
     return rd
 
 def determine_file_source(targetpath, rd):
@@ -123,8 +110,8 @@
     import oe.recipeutils
 
     # See if it's in do_install for the recipe
-    workdir = rd.getVar('WORKDIR', True)
-    src_uri = rd.getVar('SRC_URI', True)
+    workdir = rd.getVar('WORKDIR')
+    src_uri = rd.getVar('SRC_URI')
     srcfile = ''
     modpatches = []
     elements = check_do_install(rd, targetpath)
@@ -134,7 +121,7 @@
         logger.debug('source path: %s' % srcpath)
         if not srcpath.startswith('/'):
             # Handle non-absolute path
-            srcpath = os.path.abspath(os.path.join(rd.getVarFlag('do_install', 'dirs', True).split()[-1], srcpath))
+            srcpath = os.path.abspath(os.path.join(rd.getVarFlag('do_install', 'dirs').split()[-1], srcpath))
         if srcpath.startswith(workdir):
             # OK, now we have the source file name, look for it in SRC_URI
             workdirfile = os.path.relpath(srcpath, workdir)
@@ -203,22 +190,22 @@
 
 def get_func_deps(func, d):
     """Find the function dependencies of a shell function"""
-    deps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func, True))
-    deps |= set((d.getVarFlag(func, "vardeps", True) or "").split())
+    deps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func))
+    deps |= set((d.getVarFlag(func, "vardeps") or "").split())
     funcdeps = []
     for dep in deps:
-        if d.getVarFlag(dep, 'func', True):
+        if d.getVarFlag(dep, 'func'):
             funcdeps.append(dep)
     return funcdeps
 
 def check_do_install(rd, targetpath):
     """Look at do_install for a command that installs/copies the specified target path"""
-    instpath = os.path.abspath(os.path.join(rd.getVar('D', True), targetpath.lstrip('/')))
-    do_install = rd.getVar('do_install', True)
+    instpath = os.path.abspath(os.path.join(rd.getVar('D'), targetpath.lstrip('/')))
+    do_install = rd.getVar('do_install')
     # Handle where do_install calls other functions (somewhat crudely, but good enough for this purpose)
     deps = get_func_deps('do_install', rd)
     for dep in deps:
-        do_install = do_install.replace(dep, rd.getVar(dep, True))
+        do_install = do_install.replace(dep, rd.getVar(dep))
 
     # Look backwards through do_install as we want to catch where a later line (perhaps
     # from a bbappend) is writing over the top
@@ -335,12 +322,12 @@
 def appendsrc(args, files, rd, extralines=None):
     import oe.recipeutils
 
-    srcdir = rd.getVar('S', True)
-    workdir = rd.getVar('WORKDIR', True)
+    srcdir = rd.getVar('S')
+    workdir = rd.getVar('WORKDIR')
 
     import bb.fetch
     simplified = {}
-    src_uri = rd.getVar('SRC_URI', True).split()
+    src_uri = rd.getVar('SRC_URI').split()
     for uri in src_uri:
         if uri.endswith(';'):
             uri = uri[:-1]
@@ -353,7 +340,7 @@
     for newfile, srcfile in files.items():
         src_destdir = os.path.dirname(srcfile)
         if not args.use_workdir:
-            if rd.getVar('S', True) == rd.getVar('STAGING_KERNEL_DIR', True):
+            if rd.getVar('S') == rd.getVar('STAGING_KERNEL_DIR'):
                 srcdir = os.path.join(workdir, 'git')
                 if not bb.data.inherits_class('kernel-yocto', rd):
                     logger.warn('S == STAGING_KERNEL_DIR and non-kernel-yocto, unable to determine path to srcdir, defaulting to ${WORKDIR}/git')
diff --git a/import-layers/yocto-poky/scripts/lib/recipetool/create.py b/import-layers/yocto-poky/scripts/lib/recipetool/create.py
index d427d32..4de52fc 100644
--- a/import-layers/yocto-poky/scripts/lib/recipetool/create.py
+++ b/import-layers/yocto-poky/scripts/lib/recipetool/create.py
@@ -26,12 +26,24 @@
 import scriptutils
 from urllib.parse import urlparse, urldefrag, urlsplit
 import hashlib
-
+import bb.fetch2
 logger = logging.getLogger('recipetool')
 
 tinfoil = None
 plugins = None
 
+def log_error_cond(message, debugonly):
+    if debugonly:
+        logger.debug(message)
+    else:
+        logger.error(message)
+
+def log_info_cond(message, debugonly):
+    if debugonly:
+        logger.debug(message)
+    else:
+        logger.info(message)
+
 def plugin_init(pluginlist):
     # Take a reference to the list so we can use it later
     global plugins
@@ -47,6 +59,9 @@
     recipecmakefilemap = {}
     recipebinmap = {}
 
+    def __init__(self):
+        self._devtool = False
+
     @staticmethod
     def load_libmap(d):
         '''Load library->recipe mapping'''
@@ -56,8 +71,8 @@
             return
         # First build up library->package mapping
         shlib_providers = oe.package.read_shlib_providers(d)
-        libdir = d.getVar('libdir', True)
-        base_libdir = d.getVar('base_libdir', True)
+        libdir = d.getVar('libdir')
+        base_libdir = d.getVar('base_libdir')
         libpaths = list(set([base_libdir, libdir]))
         libname_re = re.compile('^lib(.+)\.so.*$')
         pkglibmap = {}
@@ -73,7 +88,7 @@
                         logger.debug('unable to extract library name from %s' % lib)
 
         # Now turn it into a library->recipe mapping
-        pkgdata_dir = d.getVar('PKGDATA_DIR', True)
+        pkgdata_dir = d.getVar('PKGDATA_DIR')
         for libname, pkg in pkglibmap.items():
             try:
                 with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
@@ -97,9 +112,9 @@
         '''Build up development file->recipe mapping'''
         if RecipeHandler.recipeheadermap:
             return
-        pkgdata_dir = d.getVar('PKGDATA_DIR', True)
-        includedir = d.getVar('includedir', True)
-        cmakedir = os.path.join(d.getVar('libdir', True), 'cmake')
+        pkgdata_dir = d.getVar('PKGDATA_DIR')
+        includedir = d.getVar('includedir')
+        cmakedir = os.path.join(d.getVar('libdir'), 'cmake')
         for pkg in glob.glob(os.path.join(pkgdata_dir, 'runtime', '*-dev')):
             with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f:
                 pn = None
@@ -128,9 +143,9 @@
         '''Build up native binary->recipe mapping'''
         if RecipeHandler.recipebinmap:
             return
-        sstate_manifests = d.getVar('SSTATE_MANIFESTS', True)
-        staging_bindir_native = d.getVar('STAGING_BINDIR_NATIVE', True)
-        build_arch = d.getVar('BUILD_ARCH', True)
+        sstate_manifests = d.getVar('SSTATE_MANIFESTS')
+        staging_bindir_native = d.getVar('STAGING_BINDIR_NATIVE')
+        build_arch = d.getVar('BUILD_ARCH')
         fileprefix = 'manifest-%s-' % build_arch
         for fn in glob.glob(os.path.join(sstate_manifests, '%s*-native.populate_sysroot' % fileprefix)):
             with open(fn, 'r') as f:
@@ -222,7 +237,8 @@
         if deps:
             values['DEPENDS'] = ' '.join(deps)
 
-    def genfunction(self, outlines, funcname, content, python=False, forcespace=False):
+    @staticmethod
+    def genfunction(outlines, funcname, content, python=False, forcespace=False):
         if python:
             prefix = 'python '
         else:
@@ -323,7 +339,7 @@
                 pn = res.group(1).strip().replace('_', '-')
                 pv = res.group(2).strip().replace('_', '.')
 
-        if not pn and not pv:
+        if not pn and not pv and parseres.scheme not in ['git', 'gitsm', 'svn', 'hg']:
             srcfile = os.path.basename(parseres.path.rstrip('/'))
             pn, pv = determine_from_filename(srcfile)
 
@@ -335,7 +351,6 @@
     # This is a bit sad, but if you don't have this set there can be some
     # odd interactions with the urldata cache which lead to errors
     localdata.setVar('SRCREV', '${AUTOREV}')
-    bb.data.update_data(localdata)
     try:
         fetcher = bb.fetch2.Fetch([uri], localdata)
         urldata = fetcher.ud
@@ -353,14 +368,31 @@
     '''Convert any http[s]://....git URI into git://...;protocol=http[s]'''
     checkuri = uri.split(';', 1)[0]
     if checkuri.endswith('.git') or '/git/' in checkuri or re.match('https?://github.com/[^/]+/[^/]+/?$', checkuri):
-        res = re.match('(http|https|ssh)://([^;]+(\.git)?)(;.*)?$', uri)
-        if res:
-            # Need to switch the URI around so that the git fetcher is used
-            return 'git://%s;protocol=%s%s' % (res.group(2), res.group(1), res.group(4) or '')
-        elif '@' in checkuri:
-            # Catch e.g. git@git.example.com:repo.git
-            return 'git://%s;protocol=ssh' % checkuri.replace(':', '/', 1)
-    return uri
+        # Appends scheme if the scheme is missing
+        if not '://' in uri:
+            uri = 'git://' + uri
+        scheme, host, path, user, pswd, parms = bb.fetch2.decodeurl(uri)
+        # Detection mechanism, this is required due to certain URL are formatter with ":" rather than "/"
+        # which causes decodeurl to fail getting the right host and path
+        if len(host.split(':')) > 1:
+            splitslash = host.split(':')
+            host = splitslash[0]
+            path = '/' + splitslash[1] + path
+        #Algorithm:
+        # if user is defined, append protocol=ssh or if a protocol is defined, then honor the user-defined protocol
+        # if no user & password is defined, check for scheme type and append the protocol with the scheme type
+        # finally if protocols or if the url is well-formed, do nothing and rejoin everything back to normal
+        # Need to repackage the arguments for encodeurl, the format is: (scheme, host, path, user, password, OrderedDict([('key', 'value')]))
+        if user:
+            if not 'protocol' in parms:
+                parms.update({('protocol', 'ssh')})
+        elif (scheme == "http" or scheme == 'https' or scheme == 'ssh') and not ('protocol' in parms):
+            parms.update({('protocol', scheme)})
+        # Always append 'git://'
+        fUrl = bb.fetch2.encodeurl(('git', host, path, user, pswd, parms))
+        return fUrl
+    else:
+        return uri
 
 def is_package(url):
     '''Check if a URL points to a package'''
@@ -404,12 +436,14 @@
             srcuri = rev_re.sub('', srcuri)
         tempsrc = tempfile.mkdtemp(prefix='recipetool-')
         srctree = tempsrc
+        d = bb.data.createCopy(tinfoil.config_data)
         if fetchuri.startswith('npm://'):
             # Check if npm is available
-            check_npm(tinfoil.config_data)
+            npm_bindir = check_npm(tinfoil, args.devtool)
+            d.prependVar('PATH', '%s:' % npm_bindir)
         logger.info('Fetching %s...' % srcuri)
         try:
-            checksums = scriptutils.fetch_uri(tinfoil.config_data, fetchuri, srctree, srcrev)
+            checksums = scriptutils.fetch_uri(d, fetchuri, srctree, srcrev)
         except bb.fetch2.BBFetchException as e:
             logger.error(str(e).rstrip())
             sys.exit(1)
@@ -448,8 +482,8 @@
 
                 if pkgfile:
                     if pkgfile.endswith(('.deb', '.ipk')):
-                        stdout, _ = bb.process.run('ar x %s control.tar.gz' % pkgfile, cwd=tmpfdir)
-                        stdout, _ = bb.process.run('tar xf control.tar.gz ./control', cwd=tmpfdir)
+                        stdout, _ = bb.process.run('ar x %s' % pkgfile, cwd=tmpfdir)
+                        stdout, _ = bb.process.run('tar xf control.tar.gz', cwd=tmpfdir)
                         values = convert_debian(tmpfdir)
                         extravalues.update(values)
                     elif pkgfile.endswith(('.rpm', '.srpm')):
@@ -554,7 +588,6 @@
         if name_pv and not realpv:
             realpv = name_pv
 
-
     if not srcuri:
         lines_before.append('# No information for SRC_URI yet (only an external source tree was specified)')
     lines_before.append('SRC_URI = "%s"' % srcuri)
@@ -588,6 +621,11 @@
         lines_after.append('INSANE_SKIP_${PN} += "already-stripped"')
         lines_after.append('')
 
+    if args.fetch_dev:
+        extravalues['fetchdev'] = True
+    else:
+        extravalues['fetchdev'] = None
+
     # Find all plugins that want to register handlers
     logger.debug('Loading recipe handlers')
     raw_handlers = []
@@ -604,6 +642,7 @@
     handlers.sort(key=lambda item: (item[1], -item[2]), reverse=True)
     for handler, priority, _ in handlers:
         logger.debug('Handler: %s (priority %d)' % (handler.__class__.__name__, priority))
+        setattr(handler, '_devtool', args.devtool)
     handlers = [item[0] for item in handlers]
 
     # Apply the handlers
@@ -640,7 +679,7 @@
 
     if not outfile:
         if not pn:
-            logger.error('Unable to determine short program name from source tree - please specify name with -N/--name or output file name with -o/--outfile')
+            log_error_cond('Unable to determine short program name from source tree - please specify name with -N/--name or output file name with -o/--outfile', args.devtool)
             # devtool looks for this specific exit code, so don't change it
             sys.exit(15)
         else:
@@ -710,6 +749,15 @@
         if not bbclassextend:
             lines_after.append('BBCLASSEXTEND = "native"')
 
+    postinst = ("postinst", extravalues.pop('postinst', None))
+    postrm = ("postrm", extravalues.pop('postrm', None))
+    preinst = ("preinst", extravalues.pop('preinst', None))
+    prerm = ("prerm", extravalues.pop('prerm', None))
+    funcs = [postinst, postrm, preinst, prerm]
+    for func in funcs:
+        if func[1]:
+            RecipeHandler.genfunction(lines_after, 'pkg_%s_${PN}' % func[0], func[1])
+
     outlines = []
     outlines.extend(lines_before)
     if classes:
@@ -736,7 +784,7 @@
         shutil.move(srctree, args.extract_to)
         if tempsrc == srctree:
             tempsrc = None
-        logger.info('Source extracted to %s' % args.extract_to)
+        log_info_cond('Source extracted to %s' % args.extract_to, args.devtool)
 
     if outfile == '-':
         sys.stdout.write('\n'.join(outlines) + '\n')
@@ -749,7 +797,7 @@
                     continue
                 f.write('%s\n' % line)
                 lastline = line
-        logger.info('Recipe %s has been created; further editing may be required to make it fully functional' % outfile)
+        log_info_cond('Recipe %s has been created; further editing may be required to make it fully functional' % outfile, args.devtool)
 
     if tempsrc:
         if args.keep_temp:
@@ -775,10 +823,12 @@
         lines_before.append('# your responsibility to verify that the values are complete and correct.')
         if len(licvalues) > 1:
             lines_before.append('#')
-            lines_before.append('# NOTE: multiple licenses have been detected; if that is correct you should separate')
-            lines_before.append('# these in the LICENSE value using & if the multiple licenses all apply, or | if there')
-            lines_before.append('# is a choice between the multiple licenses. If in doubt, check the accompanying')
-            lines_before.append('# documentation to determine which situation is applicable.')
+            lines_before.append('# NOTE: multiple licenses have been detected; they have been separated with &')
+            lines_before.append('# in the LICENSE value for now since it is a reasonable assumption that all')
+            lines_before.append('# of the licenses apply. If instead there is a choice between the multiple')
+            lines_before.append('# licenses then you should change the value to separate the licenses with |')
+            lines_before.append('# instead of &. If there is any doubt, check the accompanying documentation')
+            lines_before.append('# to determine which situation is applicable.')
         if lic_unknown:
             lines_before.append('#')
             lines_before.append('# The following license files were not able to be identified and are')
@@ -802,7 +852,7 @@
             licenses = [pkg_license]
         else:
             lines_before.append('# NOTE: Original package metadata indicates license is: %s' % pkg_license)
-    lines_before.append('LICENSE = "%s"' % ' '.join(licenses))
+    lines_before.append('LICENSE = "%s"' % ' & '.join(licenses))
     lines_before.append('LIC_FILES_CHKSUM = "%s"' % ' \\\n                    '.join(lic_files_chksum))
     lines_before.append('')
     handled.append(('license', licvalues))
@@ -813,7 +863,7 @@
     md5sums = {}
     if not static_only:
         # Gather md5sums of license files in common license dir
-        commonlicdir = d.getVar('COMMON_LICENSE_DIR', True)
+        commonlicdir = d.getVar('COMMON_LICENSE_DIR')
         for fn in os.listdir(commonlicdir):
             md5value = bb.utils.md5_file(os.path.join(commonlicdir, fn))
             md5sums[md5value] = fn
@@ -983,7 +1033,7 @@
     return outlicenses
 
 def read_pkgconfig_provides(d):
-    pkgdatadir = d.getVar('PKGDATA_DIR', True)
+    pkgdatadir = d.getVar('PKGDATA_DIR')
     pkgmap = {}
     for fn in glob.glob(os.path.join(pkgdatadir, 'shlibs2', '*.pclist')):
         with open(fn, 'r') as f:
@@ -1044,6 +1094,25 @@
                     varname = value_map.get(key, None)
                     if varname:
                         values[varname] = value
+    postinst = os.path.join(debpath, 'postinst')
+    postrm = os.path.join(debpath, 'postrm')
+    preinst = os.path.join(debpath, 'preinst')
+    prerm = os.path.join(debpath, 'prerm')
+    sfiles = [postinst, postrm, preinst, prerm]
+    for sfile in sfiles:
+        if os.path.isfile(sfile):
+            logger.info("Converting %s file to recipe function..." %
+                    os.path.basename(sfile).upper())
+            content = []
+            with open(sfile) as f:
+                for line in f:
+                    if "#!/" in line:
+                        continue
+                    line = line.rstrip("\n")
+                    if line.strip():
+                        content.append(line)
+                if content:
+                    values[os.path.basename(f.name)] = content
 
     #if depends:
     #    values['DEPENDS'] = ' '.join(depends)
@@ -1073,10 +1142,21 @@
     return values
 
 
-def check_npm(d):
-    if not os.path.exists(os.path.join(d.getVar('STAGING_BINDIR_NATIVE', True), 'npm')):
-        logger.error('npm required to process specified source, but npm is not available - you need to build nodejs-native first')
+def check_npm(tinfoil, debugonly=False):
+    try:
+        rd = tinfoil.parse_recipe('nodejs-native')
+    except bb.providers.NoProvider:
+        # We still conditionally show the message and exit with the special
+        # return code, otherwise we can't show the proper message for eSDK
+        # users
+        log_error_cond('nodejs-native is required for npm but is not available - you will likely need to add a layer that provides nodejs', debugonly)
         sys.exit(14)
+    bindir = rd.getVar('STAGING_BINDIR_NATIVE')
+    npmpath = os.path.join(bindir, 'npm')
+    if not os.path.exists(npmpath):
+        log_error_cond('npm required to process specified source, but npm is not available - you need to run bitbake -c addto_recipe_sysroot nodejs-native first', debugonly)
+        sys.exit(14)
+    return bindir
 
 def register_commands(subparsers):
     parser_create = subparsers.add_parser('create',
@@ -1093,5 +1173,10 @@
     parser_create.add_argument('--src-subdir', help='Specify subdirectory within source tree to use', metavar='SUBDIR')
     parser_create.add_argument('-a', '--autorev', help='When fetching from a git repository, set SRCREV in the recipe to a floating revision instead of fixed', action="store_true")
     parser_create.add_argument('--keep-temp', action="store_true", help='Keep temporary directory (for debugging)')
-    parser_create.set_defaults(func=create_recipe)
+    parser_create.add_argument('--fetch-dev', action="store_true", help='For npm, also fetch devDependencies')
+    parser_create.add_argument('--devtool', action="store_true", help=argparse.SUPPRESS)
+    # FIXME I really hate having to set parserecipes for this, but given we may need
+    # to call into npm (and we don't know in advance if we will or not) and in order
+    # to do so we need to know npm's recipe sysroot path, there's not much alternative
+    parser_create.set_defaults(func=create_recipe, parserecipes=True)
 
diff --git a/import-layers/yocto-poky/scripts/lib/recipetool/create_buildsys_python.py b/import-layers/yocto-poky/scripts/lib/recipetool/create_buildsys_python.py
index 82a2be1..ec5449b 100644
--- a/import-layers/yocto-poky/scripts/lib/recipetool/create_buildsys_python.py
+++ b/import-layers/yocto-poky/scripts/lib/recipetool/create_buildsys_python.py
@@ -532,11 +532,11 @@
 
     def parse_pkgdata_for_python_packages(self):
         suffixes = [t[0] for t in imp.get_suffixes()]
-        pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR', True)
+        pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
 
         ldata = tinfoil.config_data.createCopy()
         bb.parse.handle('classes/python-dir.bbclass', ldata, True)
-        python_sitedir = ldata.getVar('PYTHON_SITEPACKAGES_DIR', True)
+        python_sitedir = ldata.getVar('PYTHON_SITEPACKAGES_DIR')
 
         dynload_dir = os.path.join(os.path.dirname(python_sitedir), 'lib-dynload')
         python_dirs = [python_sitedir + os.sep,
diff --git a/import-layers/yocto-poky/scripts/lib/recipetool/create_kernel.py b/import-layers/yocto-poky/scripts/lib/recipetool/create_kernel.py
index 7dac59f..ca4996c 100644
--- a/import-layers/yocto-poky/scripts/lib/recipetool/create_kernel.py
+++ b/import-layers/yocto-poky/scripts/lib/recipetool/create_kernel.py
@@ -41,7 +41,7 @@
         handled.append('buildsystem')
         del lines_after[:]
         del classes[:]
-        template = os.path.join(tinfoil.config_data.getVar('COREBASE', True), 'meta-skeleton', 'recipes-kernel', 'linux', 'linux-yocto-custom.bb')
+        template = os.path.join(tinfoil.config_data.getVar('COREBASE'), 'meta-skeleton', 'recipes-kernel', 'linux', 'linux-yocto-custom.bb')
         def handle_var(varname, origvalue, op, newlines):
             if varname in ['SRCREV', 'SRCREV_machine']:
                 while newlines[-1].startswith('#'):
@@ -85,7 +85,7 @@
             elif varname == 'COMPATIBLE_MACHINE':
                 while newlines[-1].startswith('#'):
                     del newlines[-1]
-                machine = tinfoil.config_data.getVar('MACHINE', True)
+                machine = tinfoil.config_data.getVar('MACHINE')
                 return machine, op, 0, True
             return origvalue, op, 0, True
         with open(template, 'r') as f:
diff --git a/import-layers/yocto-poky/scripts/lib/recipetool/create_npm.py b/import-layers/yocto-poky/scripts/lib/recipetool/create_npm.py
index 7bb844c..cb8f338 100644
--- a/import-layers/yocto-poky/scripts/lib/recipetool/create_npm.py
+++ b/import-layers/yocto-poky/scripts/lib/recipetool/create_npm.py
@@ -45,11 +45,29 @@
             license = data['license']
             if isinstance(license, dict):
                 license = license.get('type', None)
+            if license:
+                if 'OR' in license:
+                    license = license.replace('OR', '|')
+                    license = license.replace('AND', '&')
+                    license = license.replace(' ', '_')
+                    if not license[0] == '(':
+                        license = '(' + license + ')'
+                    print('LICENSE: {}'.format(license))
+                else:
+                    license = license.replace('AND', '&')
+                    if license[0] == '(':
+                        license = license[1:]
+                    if license[-1] == ')':
+                        license = license[:-1]
+                license = license.replace('MIT/X11', 'MIT')
+                license = license.replace('Public Domain', 'PD')
+                license = license.replace('SEE LICENSE IN EULA',
+                                          'SEE-LICENSE-IN-EULA')
         return license
 
-    def _shrinkwrap(self, srctree, localfilesdir, extravalues, lines_before):
+    def _shrinkwrap(self, srctree, localfilesdir, extravalues, lines_before, d):
         try:
-            runenv = dict(os.environ, PATH=tinfoil.config_data.getVar('PATH', True))
+            runenv = dict(os.environ, PATH=d.getVar('PATH'))
             bb.process.run('npm shrinkwrap', cwd=srctree, stderr=subprocess.STDOUT, env=runenv, shell=True)
         except bb.process.ExecutionError as e:
             logger.warn('npm shrinkwrap failed:\n%s' % e.stdout)
@@ -61,8 +79,8 @@
         extravalues['extrafiles']['npm-shrinkwrap.json'] = tmpfile
         lines_before.append('NPM_SHRINKWRAP := "${THISDIR}/${PN}/npm-shrinkwrap.json"')
 
-    def _lockdown(self, srctree, localfilesdir, extravalues, lines_before):
-        runenv = dict(os.environ, PATH=tinfoil.config_data.getVar('PATH', True))
+    def _lockdown(self, srctree, localfilesdir, extravalues, lines_before, d):
+        runenv = dict(os.environ, PATH=d.getVar('PATH'))
         if not NpmRecipeHandler.lockdownpath:
             NpmRecipeHandler.lockdownpath = tempfile.mkdtemp('recipetool-npm-lockdown')
             bb.process.run('npm install lockdown --prefix %s' % NpmRecipeHandler.lockdownpath,
@@ -83,7 +101,7 @@
         extravalues['extrafiles']['lockdown.json'] = tmpfile
         lines_before.append('NPM_LOCKDOWN := "${THISDIR}/${PN}/lockdown.json"')
 
-    def _handle_dependencies(self, d, deps, lines_before, srctree):
+    def _handle_dependencies(self, d, deps, optdeps, devdeps, lines_before, srctree):
         import scriptutils
         # If this isn't a single module we need to get the dependencies
         # and add them to SRC_URI
@@ -92,8 +110,21 @@
                 if not origvalue.startswith('npm://'):
                     src_uri = origvalue.split()
                     changed = False
-                    for dep, depdata in deps.items():
-                        version = self.get_node_version(dep, depdata, d)
+                    deplist = {}
+                    for dep, depver in optdeps.items():
+                        depdata = self.get_npm_data(dep, depver, d)
+                        if self.check_npm_optional_dependency(depdata):
+                            deplist[dep] = depdata
+                    for dep, depver in devdeps.items():
+                        depdata = self.get_npm_data(dep, depver, d)
+                        if self.check_npm_optional_dependency(depdata):
+                            deplist[dep] = depdata
+                    for dep, depver in deps.items():
+                        depdata = self.get_npm_data(dep, depver, d)
+                        deplist[dep] = depdata
+
+                    for dep, depdata in deplist.items():
+                        version = depdata.get('version', None)
                         if version:
                             url = 'npm://registry.npmjs.org;name=%s;version=%s;subdir=node_modules/%s' % (dep, version, dep)
                             scriptutils.fetch_uri(d, url, srctree)
@@ -157,7 +188,9 @@
 
         files = RecipeHandler.checkfiles(srctree, ['package.json'])
         if files:
-            check_npm(tinfoil.config_data)
+            d = bb.data.createCopy(tinfoil.config_data)
+            npm_bindir = check_npm(tinfoil, self._devtool)
+            d.prependVar('PATH', '%s:' % npm_bindir)
 
             data = read_package_json(files[0])
             if 'name' in data and 'version' in data:
@@ -170,18 +203,19 @@
                 if 'homepage' in data:
                     extravalues['HOMEPAGE'] = data['homepage']
 
-                deps = data.get('dependencies', {})
-                updated = self._handle_dependencies(tinfoil.config_data, deps, lines_before, srctree)
+                fetchdev = extravalues['fetchdev'] or None
+                deps, optdeps, devdeps = self.get_npm_package_dependencies(data, fetchdev)
+                updated = self._handle_dependencies(d, deps, optdeps, devdeps, lines_before, srctree)
                 if updated:
                     # We need to redo the license stuff
-                    self._replace_license_vars(srctree, lines_before, handled, extravalues, tinfoil.config_data)
+                    self._replace_license_vars(srctree, lines_before, handled, extravalues, d)
 
                 # Shrinkwrap
                 localfilesdir = tempfile.mkdtemp(prefix='recipetool-npm')
-                self._shrinkwrap(srctree, localfilesdir, extravalues, lines_before)
+                self._shrinkwrap(srctree, localfilesdir, extravalues, lines_before, d)
 
                 # Lockdown
-                self._lockdown(srctree, localfilesdir, extravalues, lines_before)
+                self._lockdown(srctree, localfilesdir, extravalues, lines_before, d)
 
                 # Split each npm module out to is own package
                 npmpackages = oe.package.npm_split_package_dirs(srctree)
@@ -207,7 +241,9 @@
                     packages = OrderedDict((x,y[0]) for x,y in npmpackages.items())
                     packages['${PN}'] = ''
                     pkglicenses = split_pkg_licenses(licvalues, packages, lines_after, licenses)
-                    all_licenses = list(set([item for pkglicense in pkglicenses.values() for item in pkglicense]))
+                    all_licenses = list(set([item.replace('_', ' ') for pkglicense in pkglicenses.values() for item in pkglicense]))
+                    if '&' in all_licenses:
+                        all_licenses.remove('&')
                     # Go back and update the LICENSE value since we have a bit more
                     # information than when that was written out (and we know all apply
                     # vs. there being a choice, so we can join them with &)
@@ -251,17 +287,58 @@
 
     # FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
     # (split out from _getdependencies())
-    def get_node_version(self, pkg, version, d):
+    def get_npm_data(self, pkg, version, d):
         import bb.fetch2
         pkgfullname = pkg
         if version != '*' and not '/' in version:
             pkgfullname += "@'%s'" % version
         logger.debug(2, "Calling getdeps on %s" % pkg)
-        runenv = dict(os.environ, PATH=d.getVar('PATH', True))
+        runenv = dict(os.environ, PATH=d.getVar('PATH'))
         fetchcmd = "npm view %s --json" % pkgfullname
         output, _ = bb.process.run(fetchcmd, stderr=subprocess.STDOUT, env=runenv, shell=True)
         data = self._parse_view(output)
-        return data.get('version', None)
+        return data
+
+    # FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
+    # (split out from _getdependencies())
+    def get_npm_package_dependencies(self, pdata, fetchdev):
+        dependencies = pdata.get('dependencies', {})
+        optionalDependencies = pdata.get('optionalDependencies', {})
+        dependencies.update(optionalDependencies)
+        if fetchdev:
+            devDependencies = pdata.get('devDependencies', {})
+            dependencies.update(devDependencies)
+        else:
+            devDependencies = {}
+        depsfound = {}
+        optdepsfound = {}
+        devdepsfound = {}
+        for dep in dependencies:
+            if dep in optionalDependencies:
+                optdepsfound[dep] = dependencies[dep]
+            elif dep in devDependencies:
+                devdepsfound[dep] = dependencies[dep]
+            else:
+                depsfound[dep] = dependencies[dep]
+        return depsfound, optdepsfound, devdepsfound
+
+    # FIXME this is effectively duplicated from lib/bb/fetch2/npm.py
+    # (split out from _getdependencies())
+    def check_npm_optional_dependency(self, pdata):
+        pkg_os = pdata.get('os', None)
+        if pkg_os:
+            if not isinstance(pkg_os, list):
+                pkg_os = [pkg_os]
+            blacklist = False
+            for item in pkg_os:
+                if item.startswith('!'):
+                    blacklist = True
+                    break
+            if (not blacklist and 'linux' not in pkg_os) or '!linux' in pkg_os:
+                logger.debug(2, "Skipping %s since it's incompatible with Linux" % pkg)
+                return False
+        return True
+
 
 def register_recipe_handlers(handlers):
     handlers.append((NpmRecipeHandler(), 60))
diff --git a/import-layers/yocto-poky/scripts/lib/recipetool/newappend.py b/import-layers/yocto-poky/scripts/lib/recipetool/newappend.py
index fbdd7bc..0b63759 100644
--- a/import-layers/yocto-poky/scripts/lib/recipetool/newappend.py
+++ b/import-layers/yocto-poky/scripts/lib/recipetool/newappend.py
@@ -39,18 +39,6 @@
     tinfoil = instance
 
 
-def _get_recipe_file(cooker, pn):
-    import oe.recipeutils
-    recipefile = oe.recipeutils.pn_to_recipe(cooker, pn)
-    if not recipefile:
-        skipreasons = oe.recipeutils.get_unavailable_reasons(cooker, pn)
-        if skipreasons:
-            logger.error('\n'.join(skipreasons))
-        else:
-            logger.error("Unable to find any recipe file matching %s" % pn)
-    return recipefile
-
-
 def layer(layerpath):
     if not os.path.exists(os.path.join(layerpath, 'conf', 'layer.conf')):
         raise argparse.ArgumentTypeError('{0!r} must be a path to a valid layer'.format(layerpath))
@@ -60,7 +48,7 @@
 def newappend(args):
     import oe.recipeutils
 
-    recipe_path = _get_recipe_file(tinfoil.cooker, args.target)
+    recipe_path = tinfoil.get_recipe_file(args.target)
 
     rd = tinfoil.config_data.createCopy()
     rd.setVar('FILE', recipe_path)
@@ -72,7 +60,7 @@
     if not path_ok:
         logger.warn('Unable to determine correct subdirectory path for bbappend file - check that what %s adds to BBFILES also matches .bbappend files. Using %s for now, but until you fix this the bbappend will not be applied.', os.path.join(args.destlayer, 'conf', 'layer.conf'), os.path.dirname(append_path))
 
-    layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS', True).split()]
+    layerdirs = [os.path.abspath(layerdir) for layerdir in rd.getVar('BBLAYERS').split()]
     if not os.path.abspath(args.destlayer) in layerdirs:
         logger.warn('Specified layer is not currently enabled in bblayers.conf, you will need to add it before this bbappend will be active')
 
diff --git a/import-layers/yocto-poky/scripts/lib/recipetool/setvar.py b/import-layers/yocto-poky/scripts/lib/recipetool/setvar.py
index 85701c0..9de315a 100644
--- a/import-layers/yocto-poky/scripts/lib/recipetool/setvar.py
+++ b/import-layers/yocto-poky/scripts/lib/recipetool/setvar.py
@@ -51,7 +51,7 @@
     if args.recipe_only:
         patches = [oe.recipeutils.patch_recipe_file(args.recipefile, varvalues, patch=args.patch)]
     else:
-        rd = oe.recipeutils.parse_recipe(tinfoil.cooker, args.recipefile, None)
+        rd = tinfoil.parse_recipe_file(args.recipefile, False)
         if not rd:
             return 1
         patches = oe.recipeutils.patch_recipe(rd, args.recipefile, varvalues, patch=args.patch)
diff --git a/import-layers/yocto-poky/scripts/lib/scriptutils.py b/import-layers/yocto-poky/scripts/lib/scriptutils.py
index 5ccc027..92b601c 100644
--- a/import-layers/yocto-poky/scripts/lib/scriptutils.py
+++ b/import-layers/yocto-poky/scripts/lib/scriptutils.py
@@ -21,10 +21,12 @@
 import glob
 import argparse
 import subprocess
+import tempfile
+import shutil
 
-def logger_create(name):
+def logger_create(name, stream=None):
     logger = logging.getLogger(name)
-    loggerhandler = logging.StreamHandler()
+    loggerhandler = logging.StreamHandler(stream=stream)
     loggerhandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
     logger.addHandler(loggerhandler)
     logger.setLevel(logging.INFO)
@@ -52,10 +54,14 @@
             if fp:
                 fp.close()
 
+    def plugin_name(filename):
+        return os.path.splitext(os.path.basename(filename))[0]
+
+    known_plugins = [plugin_name(p.__name__) for p in plugins]
     logger.debug('Loading plugins from %s...' % pluginpath)
     for fn in glob.glob(os.path.join(pluginpath, '*.py')):
-        name = os.path.splitext(os.path.basename(fn))[0]
-        if name != '__init__':
+        name = plugin_name(fn)
+        if name != '__init__' and name not in known_plugins:
             plugin = load_plugin(name)
             if hasattr(plugin, 'plugin_init'):
                 plugin.plugin_init(plugins)
@@ -74,32 +80,47 @@
 
 def fetch_uri(d, uri, destdir, srcrev=None):
     """Fetch a URI to a local directory"""
-    import bb.data
-    bb.utils.mkdirhier(destdir)
-    localdata = bb.data.createCopy(d)
-    localdata.setVar('BB_STRICT_CHECKSUM', '')
-    localdata.setVar('SRCREV', srcrev)
-    ret = (None, None)
-    olddir = os.getcwd()
+    import bb
+    tmpparent = d.getVar('BASE_WORKDIR')
+    bb.utils.mkdirhier(tmpparent)
+    tmpworkdir = tempfile.mkdtemp(dir=tmpparent)
     try:
-        fetcher = bb.fetch2.Fetch([uri], localdata)
-        for u in fetcher.ud:
-            ud = fetcher.ud[u]
-            ud.ignore_checksums = True
-        fetcher.download()
-        for u in fetcher.ud:
-            ud = fetcher.ud[u]
-            if ud.localpath.rstrip(os.sep) == localdata.getVar('DL_DIR', True).rstrip(os.sep):
-                raise Exception('Local path is download directory - please check that the URI "%s" is correct' % uri)
-        fetcher.unpack(destdir)
-        for u in fetcher.ud:
-            ud = fetcher.ud[u]
-            if ud.method.recommends_checksum(ud):
-                md5value = bb.utils.md5_file(ud.localpath)
-                sha256value = bb.utils.sha256_file(ud.localpath)
-                ret = (md5value, sha256value)
+        bb.utils.mkdirhier(destdir)
+        localdata = bb.data.createCopy(d)
+
+        # Set some values to allow extend_recipe_sysroot to work here we're we are not running from a task
+        localdata.setVar('WORKDIR', tmpworkdir)
+        localdata.setVar('BB_RUNTASK', 'do_fetch')
+        localdata.setVar('PN', 'dummy')
+        localdata.setVar('BB_LIMITEDDEPS', '1')
+        bb.build.exec_func("extend_recipe_sysroot", localdata)
+
+        # Set some values for the benefit of the fetcher code
+        localdata.setVar('BB_STRICT_CHECKSUM', '')
+        localdata.setVar('SRCREV', srcrev)
+        ret = (None, None)
+        olddir = os.getcwd()
+        try:
+            fetcher = bb.fetch2.Fetch([uri], localdata)
+            for u in fetcher.ud:
+                ud = fetcher.ud[u]
+                ud.ignore_checksums = True
+            fetcher.download()
+            for u in fetcher.ud:
+                ud = fetcher.ud[u]
+                if ud.localpath.rstrip(os.sep) == localdata.getVar('DL_DIR').rstrip(os.sep):
+                    raise Exception('Local path is download directory - please check that the URI "%s" is correct' % uri)
+            fetcher.unpack(destdir)
+            for u in fetcher.ud:
+                ud = fetcher.ud[u]
+                if ud.method.recommends_checksum(ud):
+                    md5value = bb.utils.md5_file(ud.localpath)
+                    sha256value = bb.utils.sha256_file(ud.localpath)
+                    ret = (md5value, sha256value)
+        finally:
+            os.chdir(olddir)
     finally:
-        os.chdir(olddir)
+        shutil.rmtree(tmpworkdir)
     return ret
 
 def run_editor(fn):
diff --git a/import-layers/yocto-poky/scripts/lib/wic/__init__.py b/import-layers/yocto-poky/scripts/lib/wic/__init__.py
index 63c1d9c..85876b1 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/__init__.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/__init__.py
@@ -1,4 +1,20 @@
-import os, sys
+#!/usr/bin/env python -tt
+#
+# Copyright (c) 2007 Red Hat, Inc.
+# Copyright (c) 2011 Intel, Inc.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 
-cur_path = os.path.dirname(__file__) or '.'
-sys.path.insert(0, cur_path + '/3rdparty')
+class WicError(Exception):
+    pass
diff --git a/import-layers/yocto-poky/scripts/lib/wic/__version__.py b/import-layers/yocto-poky/scripts/lib/wic/__version__.py
deleted file mode 100644
index 5452a46..0000000
--- a/import-layers/yocto-poky/scripts/lib/wic/__version__.py
+++ /dev/null
@@ -1 +0,0 @@
-VERSION = "2.00"
diff --git a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk-bootloader-config.cfg b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk-bootloader-config.cfg
index a16bd6a..d5a07d2 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk-bootloader-config.cfg
+++ b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/directdisk-bootloader-config.cfg
@@ -1,11 +1,27 @@
 # This is an example configuration file for syslinux.
-PROMPT 0
-TIMEOUT 10
-
+TIMEOUT 50
 ALLOWOPTIONS 1
 SERIAL 0 115200
+PROMPT 0
 
-DEFAULT boot
-LABEL boot
+UI vesamenu.c32
+menu title Select boot options
+menu tabmsg Press [Tab] to edit, [Return] to select
+
+DEFAULT Graphics console boot
+
+LABEL Graphics console boot
 KERNEL /vmlinuz
-APPEND label=boot root=/dev/sda2 rootwait rootfstype=ext4 video=vesafb vga=0x318 console=tty0
+APPEND label=boot root=/dev/sda2 rootwait
+
+LABEL Serial console boot
+KERNEL /vmlinuz
+APPEND label=boot root=/dev/sda2 rootwait console=ttyS0,115200
+
+LABEL Graphics console install
+KERNEL /vmlinuz
+APPEND label=install root=/dev/sda2 rootwait
+
+LABEL Serial console install
+KERNEL /vmlinuz
+APPEND label=install root=/dev/sda2 rootwait console=ttyS0,115200
diff --git a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/mkgummidisk.wks b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/mkgummidisk.wks
deleted file mode 100644
index f3ae090..0000000
--- a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/mkgummidisk.wks
+++ /dev/null
@@ -1,11 +0,0 @@
-# short-description: Create an EFI disk image
-# long-description: Creates a partitioned EFI disk image that the user
-# can directly dd to boot media.
-
-part /boot --source bootimg-efi --sourceparams="loader=gummiboot" --ondisk sda --label msdos --active --align 1024
-
-part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024
-
-part swap --ondisk sda --size 44 --label swap1 --fstype=swap
-
-bootloader --ptable gpt --timeout=5  --append="rootwait rootfstype=ext4 console=ttyS0,115200 console=tty0"
diff --git a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/qemux86-directdisk.wks b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
index a6518a0..db30bbc 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
+++ b/import-layers/yocto-poky/scripts/lib/wic/canned-wks/qemux86-directdisk.wks
@@ -4,5 +4,5 @@
 
 include common.wks.inc
 
-bootloader  --timeout=0  --append="vga=0 uvesafb.mode_option=640x480-32 root=/dev/vda2 rw mem=256M ip=192.168.7.2::192.168.7.1:255.255.255.0 oprofile.timer=1 rootfstype=ext4 "
+bootloader  --timeout=0  --append="vga=0 uvesafb.mode_option=640x480-32 root=/dev/sda2 rw mem=256M ip=192.168.7.2::192.168.7.1:255.255.255.0 oprofile.timer=1 rootfstype=ext4 "
 
diff --git a/import-layers/yocto-poky/scripts/lib/wic/conf.py b/import-layers/yocto-poky/scripts/lib/wic/conf.py
deleted file mode 100644
index 070ec30..0000000
--- a/import-layers/yocto-poky/scripts/lib/wic/conf.py
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/env python -tt
-#
-# Copyright (c) 2011 Intel, Inc.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-import os
-
-from wic.ksparser import KickStart, KickStartError
-from wic import msger
-from wic.utils import misc
-
-
-def get_siteconf():
-    wic_path = os.path.dirname(__file__)
-    eos = wic_path.find('scripts') + len('scripts')
-    scripts_path = wic_path[:eos]
-
-    return scripts_path + "/lib/image/config/wic.conf"
-
-class ConfigMgr(object):
-    DEFAULTS = {
-        'common': {
-            "distro_name": "Default Distribution",
-            "plugin_dir": "/usr/lib/wic/plugins"}, # TODO use prefix also?
-        'create': {
-            "tmpdir": '/var/tmp/wic',
-            "outdir": './wic-output',
-            "release": None,
-            "logfile": None,
-            "name_prefix": None,
-            "name_suffix": None}
-        }
-
-    # make the manager class as singleton
-    _instance = None
-    def __new__(cls, *args, **kwargs):
-        if not cls._instance:
-            cls._instance = super(ConfigMgr, cls).__new__(cls, *args, **kwargs)
-
-        return cls._instance
-
-    def __init__(self, ksconf=None, siteconf=None):
-        # reset config options
-        self.reset()
-
-        if not siteconf:
-            siteconf = get_siteconf()
-
-        # initial options from siteconf
-        self._siteconf = siteconf
-
-        if ksconf:
-            self._ksconf = ksconf
-
-    def reset(self):
-        self.__ksconf = None
-        self.__siteconf = None
-        self.create = {}
-
-        # initialize the values with defaults
-        for sec, vals in self.DEFAULTS.items():
-            setattr(self, sec, vals)
-
-    def __set_ksconf(self, ksconf):
-        if not os.path.isfile(ksconf):
-            msger.error('Cannot find ks file: %s' % ksconf)
-
-        self.__ksconf = ksconf
-        self._parse_kickstart(ksconf)
-    def __get_ksconf(self):
-        return self.__ksconf
-    _ksconf = property(__get_ksconf, __set_ksconf)
-
-    def _parse_kickstart(self, ksconf=None):
-        if not ksconf:
-            return
-
-        try:
-            ksobj = KickStart(ksconf)
-        except KickStartError as err:
-            msger.error(str(err))
-
-        self.create['ks'] = ksobj
-        self.create['name'] = os.path.splitext(os.path.basename(ksconf))[0]
-
-        self.create['name'] = misc.build_name(ksconf,
-                                              self.create['release'],
-                                              self.create['name_prefix'],
-                                              self.create['name_suffix'])
-
-configmgr = ConfigMgr()
diff --git a/import-layers/yocto-poky/scripts/lib/wic/config/wic.conf b/import-layers/yocto-poky/scripts/lib/wic/config/wic.conf
deleted file mode 100644
index a51bcb5..0000000
--- a/import-layers/yocto-poky/scripts/lib/wic/config/wic.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-[common]
-; general settings
-distro_name = OpenEmbedded
-
-[create]
-; settings for create subcommand
diff --git a/import-layers/yocto-poky/scripts/lib/wic/creator.py b/import-layers/yocto-poky/scripts/lib/wic/creator.py
deleted file mode 100644
index 8f7d150..0000000
--- a/import-layers/yocto-poky/scripts/lib/wic/creator.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/env python -tt
-#
-# Copyright (c) 2011 Intel, Inc.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-import os, sys
-from optparse import OptionParser, SUPPRESS_HELP
-
-from wic import msger
-from wic.utils import errors
-from wic.conf import configmgr
-from wic.plugin import pluginmgr
-
-
-class Creator():
-    """${name}: create an image
-
-    Usage:
-        ${name} SUBCOMMAND <ksfile> [OPTS]
-
-    ${command_list}
-    ${option_list}
-    """
-
-    name = 'wic create(cr)'
-
-    def __init__(self, *args, **kwargs):
-        self._subcmds = {}
-
-        # get cmds from pluginmgr
-        # mix-in do_subcmd interface
-        for subcmd, klass in pluginmgr.get_plugins('imager').items():
-            if not hasattr(klass, 'do_create'):
-                msger.warning("Unsupported subcmd: %s" % subcmd)
-                continue
-
-            func = getattr(klass, 'do_create')
-            self._subcmds[subcmd] = func
-
-    def get_optparser(self):
-        optparser = OptionParser()
-        optparser.add_option('-d', '--debug', action='store_true',
-                             dest='debug',
-                             help=SUPPRESS_HELP)
-        optparser.add_option('-v', '--verbose', action='store_true',
-                             dest='verbose',
-                             help=SUPPRESS_HELP)
-        optparser.add_option('', '--logfile', type='string', dest='logfile',
-                             default=None,
-                             help='Path of logfile')
-        optparser.add_option('-c', '--config', type='string', dest='config',
-                             default=None,
-                             help='Specify config file for wic')
-        optparser.add_option('-o', '--outdir', type='string', action='store',
-                             dest='outdir', default=None,
-                             help='Output directory')
-        optparser.add_option('', '--tmpfs', action='store_true', dest='enabletmpfs',
-                             help='Setup tmpdir as tmpfs to accelerate, experimental'
-                                  ' feature, use it if you have more than 4G memory')
-        optparser.add_option('', '--bmap', action='store_true', help='generate .bmap')
-        return optparser
-
-    def postoptparse(self, options):
-        abspath = lambda pth: os.path.abspath(os.path.expanduser(pth))
-
-        if options.verbose:
-            msger.set_loglevel('verbose')
-        if options.debug:
-            msger.set_loglevel('debug')
-
-        if options.logfile:
-            logfile_abs_path = abspath(options.logfile)
-            if os.path.isdir(logfile_abs_path):
-                raise errors.Usage("logfile's path %s should be file"
-                                   % options.logfile)
-            if not os.path.exists(os.path.dirname(logfile_abs_path)):
-                os.makedirs(os.path.dirname(logfile_abs_path))
-            msger.set_interactive(False)
-            msger.set_logfile(logfile_abs_path)
-            configmgr.create['logfile'] = options.logfile
-
-        if options.config:
-            configmgr.reset()
-            configmgr._siteconf = options.config
-
-        if options.outdir is not None:
-            configmgr.create['outdir'] = abspath(options.outdir)
-
-        cdir = 'outdir'
-        if os.path.exists(configmgr.create[cdir]) \
-           and not os.path.isdir(configmgr.create[cdir]):
-            msger.error('Invalid directory specified: %s' \
-                        % configmgr.create[cdir])
-
-        if options.enabletmpfs:
-            configmgr.create['enabletmpfs'] = options.enabletmpfs
-
-    def main(self, argv=None):
-        if argv is None:
-            argv = sys.argv
-        else:
-            argv = argv[:] # don't modify caller's list
-
-        pname = argv[0]
-        if pname not in self._subcmds:
-            msger.error('Unknown plugin: %s' % pname)
-
-        optparser = self.get_optparser()
-        options, args = optparser.parse_args(argv)
-
-        self.postoptparse(options)
-
-        return self._subcmds[pname](options, *args[1:])
diff --git a/import-layers/yocto-poky/scripts/lib/wic/engine.py b/import-layers/yocto-poky/scripts/lib/wic/engine.py
index 5b10463..f59821f 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/engine.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/engine.py
@@ -28,14 +28,14 @@
 # Tom Zanussi <tom.zanussi (at] linux.intel.com>
 #
 
+import logging
 import os
-import sys
 
-from wic import msger, creator
-from wic.utils import misc
-from wic.plugin import pluginmgr
-from wic.utils.oe import misc
+from wic import WicError
+from wic.pluginbase import PluginMgr
+from wic.utils.misc import get_bitbake_var
 
+logger = logging.getLogger('wic')
 
 def verify_build_env():
     """
@@ -44,23 +44,25 @@
     Returns True if it is, false otherwise
     """
     if not os.environ.get("BUILDDIR"):
-        print("BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)")
-        sys.exit(1)
+        raise WicError("BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)")
 
     return True
 
 
 CANNED_IMAGE_DIR = "lib/wic/canned-wks" # relative to scripts
 SCRIPTS_CANNED_IMAGE_DIR = "scripts/" + CANNED_IMAGE_DIR
+WIC_DIR = "wic"
 
 def build_canned_image_list(path):
-    layers_path = misc.get_bitbake_var("BBLAYERS")
+    layers_path = get_bitbake_var("BBLAYERS")
     canned_wks_layer_dirs = []
 
     if layers_path is not None:
         for layer_path in layers_path.split():
-            cpath = os.path.join(layer_path, SCRIPTS_CANNED_IMAGE_DIR)
-            canned_wks_layer_dirs.append(cpath)
+            for wks_path in (WIC_DIR, SCRIPTS_CANNED_IMAGE_DIR):
+                cpath = os.path.join(layer_path, wks_path)
+                if os.path.isdir(cpath):
+                    canned_wks_layer_dirs.append(cpath)
 
     cpath = os.path.join(path, CANNED_IMAGE_DIR)
     canned_wks_layer_dirs.append(cpath)
@@ -137,26 +139,24 @@
     """
     List the available source plugins i.e. plugins available for --source.
     """
-    plugins = pluginmgr.get_source_plugins()
+    plugins = PluginMgr.get_plugins('source')
 
     for plugin in plugins:
         print("  %s" % plugin)
 
 
 def wic_create(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
-               native_sysroot, scripts_path, image_output_dir,
-               compressor, bmap, debug):
-    """Create image
+               native_sysroot, options):
+    """
+    Create image
 
     wks_file - user-defined OE kickstart file
     rootfs_dir - absolute path to the build's /rootfs dir
     bootimg_dir - absolute path to the build's boot artifacts directory
     kernel_dir - absolute path to the build's kernel directory
     native_sysroot - absolute path to the build's native sysroots dir
-    scripts_path - absolute path to /scripts dir
     image_output_dir - dirname to create for image
-    compressor - compressor utility to compress the image
-    bmap - enable generation of .bmap
+    options - wic command line options (debug, bmap, etc)
 
     Normally, the values for the build artifacts values are determined
     by 'wic -e' from the output of the 'bitbake -e' command given an
@@ -179,22 +179,22 @@
     try:
         oe_builddir = os.environ["BUILDDIR"]
     except KeyError:
-        print("BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)")
-        sys.exit(1)
+        raise WicError("BUILDDIR not found, exiting. (Did you forget to source oe-init-build-env?)")
 
-    if debug:
-        msger.set_loglevel('debug')
+    if not os.path.exists(options.outdir):
+        os.makedirs(options.outdir)
 
-    crobj = creator.Creator()
+    pname = 'direct'
+    plugin_class = PluginMgr.get_plugins('imager').get(pname)
+    if not plugin_class:
+        raise WicError('Unknown plugin: %s' % pname)
 
-    cmdline = ["direct", native_sysroot, kernel_dir, bootimg_dir, rootfs_dir,
-                wks_file, image_output_dir, oe_builddir, compressor or ""]
-    if bmap:
-        cmdline.append('--bmap')
+    plugin = plugin_class(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
+                          native_sysroot, oe_builddir, options)
 
-    crobj.main(cmdline)
+    plugin.do_create()
 
-    print("\nThe image(s) were created using OE kickstart file:\n  %s" % wks_file)
+    logger.info("The image(s) were created using OE kickstart file:\n  %s", wks_file)
 
 
 def wic_list(args, scripts_path):
@@ -214,12 +214,44 @@
         wks_file = args[0]
         fullpath = find_canned_image(scripts_path, wks_file)
         if not fullpath:
-            print("No image named %s found, exiting. "\
-                  "(Use 'wic list images' to list available images, or "\
-                  "specify a fully-qualified OE kickstart (.wks) "\
-                  "filename)\n" % wks_file)
-            sys.exit(1)
+            raise WicError("No image named %s found, exiting. "
+                           "(Use 'wic list images' to list available images, "
+                           "or specify a fully-qualified OE kickstart (.wks) "
+                           "filename)" % wks_file)
+
         list_canned_image_help(scripts_path, fullpath)
         return True
 
     return False
+
+def find_canned(scripts_path, file_name):
+    """
+    Find a file either by its path or by name in the canned files dir.
+
+    Return None if not found
+    """
+    if os.path.exists(file_name):
+        return file_name
+
+    layers_canned_wks_dir = build_canned_image_list(scripts_path)
+    for canned_wks_dir in layers_canned_wks_dir:
+        for root, dirs, files in os.walk(canned_wks_dir):
+            for fname in files:
+                if fname == file_name:
+                    fullpath = os.path.join(canned_wks_dir, fname)
+                    return fullpath
+
+def get_custom_config(boot_file):
+    """
+    Get the custom configuration to be used for the bootloader.
+
+    Return None if the file can't be found.
+    """
+    # Get the scripts path of poky
+    scripts_path = os.path.abspath("%s/../.." % os.path.dirname(__file__))
+
+    cfg_file = find_canned(scripts_path, boot_file)
+    if cfg_file:
+        with open(cfg_file, "r") as f:
+            config = f.read()
+        return config
diff --git a/import-layers/yocto-poky/scripts/lib/wic/filemap.py b/import-layers/yocto-poky/scripts/lib/wic/filemap.py
index 162603e..1f1aacc 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/filemap.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/filemap.py
@@ -530,13 +530,16 @@
     except ErrorNotSupp:
         return FilemapSeek(image, log)
 
-def sparse_copy(src_fname, dst_fname, offset=0, skip=0):
+def sparse_copy(src_fname, dst_fname, offset=0, skip=0, api=None):
     """Efficiently copy sparse file to or into another file."""
-    fmap = filemap(src_fname)
+    if not api:
+        api = filemap
+    fmap = api(src_fname)
     try:
         dst_file = open(dst_fname, 'r+b')
     except IOError:
         dst_file = open(dst_fname, 'wb')
+        dst_file.truncate(os.path.getsize(src_fname))
 
     for first, last in fmap.get_mapped_ranges(0, fmap.blocks_cnt):
         start = first * fmap.block_size
diff --git a/import-layers/yocto-poky/scripts/lib/wic/help.py b/import-layers/yocto-poky/scripts/lib/wic/help.py
index e5347ec..d6e027d 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/help.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/help.py
@@ -28,10 +28,12 @@
 import subprocess
 import logging
 
-from wic.plugin import pluginmgr, PLUGIN_TYPES
+from wic.pluginbase import PluginMgr, PLUGIN_TYPES
+
+logger = logging.getLogger('wic')
 
 def subcommand_error(args):
-    logging.info("invalid subcommand %s" % args[0])
+    logger.info("invalid subcommand %s", args[0])
 
 
 def display_help(subcommand, subcommands):
@@ -66,7 +68,7 @@
     result = wic_plugins_help
     for plugin_type in PLUGIN_TYPES:
         result += '\n\n%s PLUGINS\n\n' % plugin_type.upper()
-        for name, plugin in pluginmgr.get_plugins(plugin_type).items():
+        for name, plugin in PluginMgr.get_plugins(plugin_type).items():
             result += "\n %s plugin:\n" % name
             if plugin.__doc__:
                 result += plugin.__doc__
@@ -81,13 +83,13 @@
     Should use argparse, but has to work in 2.6.
     """
     if not args:
-        logging.error("No subcommand specified, exiting")
+        logger.error("No subcommand specified, exiting")
         parser.print_help()
         return 1
     elif args[0] == "help":
         wic_help(args, main_command_usage, subcommands)
     elif args[0] not in subcommands:
-        logging.error("Unsupported subcommand %s, exiting\n" % (args[0]))
+        logger.error("Unsupported subcommand %s, exiting\n", args[0])
         parser.print_help()
         return 1
     else:
@@ -371,12 +373,7 @@
 
     This scheme is extensible - adding more hooks is a simple matter
     of adding more plugin methods to SourcePlugin and derived classes.
-    The code that then needs to call the plugin methods uses
-    plugin.get_source_plugin_methods() to find the method(s) needed by
-    the call; this is done by filling up a dict with keys containing
-    the method names of interest - on success, these will be filled in
-    with the actual methods. Please see the implementation for
-    examples and details.
+    Please see the implementation for details.
 """
 
 wic_overview_help = """
@@ -646,6 +643,12 @@
                  not specified, the size is in MB.
                  You do not need this option if you use --source.
 
+         --fixed-size: Exact partition size. Value format is the same
+                       as for --size option. This option cannot be
+                       specified along with --size. If partition data
+                       is larger than --fixed-size and error will be
+                       raised when assembling disk image.
+
          --source: This option is a wic-specific option that names the
                    source of the data that will populate the
                    partition.  The most common value for this option
@@ -684,6 +687,8 @@
            apply to partitions created using '--source rootfs' (see
            --source above).  Valid values are:
 
+             vfat
+             msdos
              ext2
              ext3
              ext4
@@ -715,17 +720,25 @@
                      partition table. It may be useful for
                      bootloaders.
 
+         --exclude-path: This option is specific to wic. It excludes the given
+                         relative path from the resulting image. If the path
+                         ends with a slash, only the content of the directory
+                         is omitted, not the directory itself. This option only
+                         has an effect with the rootfs source plugin.
+
          --extra-space: This option is specific to wic. It adds extra
                         space after the space filled by the content
                         of the partition. The final size can go
                         beyond the size specified by --size.
-                        By default, 10MB.
+                        By default, 10MB. This option cannot be used
+                        with --fixed-size option.
 
          --overhead-factor: This option is specific to wic. The
                             size of the partition is multiplied by
                             this factor. It has to be greater than or
-                            equal to 1.
-                            The default value is 1.3.
+                            equal to 1. The default value is 1.3.
+                            This option cannot be used with --fixed-size
+                            option.
 
          --part-type: This option is specific to wic. It specifies partition
                       type GUID for GPT partitions.
diff --git a/import-layers/yocto-poky/scripts/lib/wic/imager/baseimager.py b/import-layers/yocto-poky/scripts/lib/wic/imager/baseimager.py
deleted file mode 100644
index 1a52dd8..0000000
--- a/import-layers/yocto-poky/scripts/lib/wic/imager/baseimager.py
+++ /dev/null
@@ -1,191 +0,0 @@
-#!/usr/bin/env python -tt
-#
-# Copyright (c) 2007 Red Hat  Inc.
-# Copyright (c) 2009, 2010, 2011 Intel, Inc.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-import os
-import tempfile
-import shutil
-
-from wic import msger
-from wic.utils.errors import CreatorError
-from wic.utils import runner
-
-class BaseImageCreator():
-    """Base class for image creation.
-
-    BaseImageCreator is the simplest creator class available; it will
-    create a system image according to the supplied kickstart file.
-
-    e.g.
-
-      import wic.imgcreate as imgcreate
-      ks = imgcreate.read_kickstart("foo.ks")
-      imgcreate.ImageCreator(ks, "foo").create()
-    """
-
-    def __del__(self):
-        self.cleanup()
-
-    def __init__(self, createopts=None):
-        """Initialize an ImageCreator instance.
-
-        ks -- a pykickstart.KickstartParser instance; this instance will be
-              used to drive the install by e.g. providing the list of packages
-              to be installed, the system configuration and %post scripts
-
-        name -- a name for the image; used for e.g. image filenames or
-                filesystem labels
-        """
-
-        self.__builddir = None
-
-        self.ks = None
-        self.name = "target"
-        self.tmpdir = "/var/tmp/wic"
-        self.workdir = "/var/tmp/wic/build"
-
-        # setup tmpfs tmpdir when enabletmpfs is True
-        self.enabletmpfs = False
-
-        if createopts:
-            # Mapping table for variables that have different names.
-            optmap = {"outdir" : "destdir",
-                     }
-
-            # update setting from createopts
-            for key in createopts:
-                if key in optmap:
-                    option = optmap[key]
-                else:
-                    option = key
-                setattr(self, option, createopts[key])
-
-            self.destdir = os.path.abspath(os.path.expanduser(self.destdir))
-
-        self._dep_checks = ["ls", "bash", "cp", "echo"]
-
-        # Output image file names
-        self.outimage = []
-
-        # No ks provided when called by convertor, so skip the dependency check
-        if self.ks:
-            # If we have btrfs partition we need to check necessary tools
-            for part in self.ks.partitions:
-                if part.fstype and part.fstype == "btrfs":
-                    self._dep_checks.append("mkfs.btrfs")
-                    break
-
-        # make sure the specified tmpdir and cachedir exist
-        if not os.path.exists(self.tmpdir):
-            os.makedirs(self.tmpdir)
-
-
-    #
-    # Hooks for subclasses
-    #
-    def _create(self):
-        """Create partitions for the disk image(s)
-
-        This is the hook where subclasses may create the partitions
-        that will be assembled into disk image(s).
-
-        There is no default implementation.
-        """
-        pass
-
-    def _cleanup(self):
-        """Undo anything performed in _create().
-
-        This is the hook where subclasses must undo anything which was
-        done in _create().
-
-        There is no default implementation.
-
-        """
-        pass
-
-    #
-    # Actual implementation
-    #
-    def __ensure_builddir(self):
-        if not self.__builddir is None:
-            return
-
-        try:
-            self.workdir = os.path.join(self.tmpdir, "build")
-            if not os.path.exists(self.workdir):
-                os.makedirs(self.workdir)
-            self.__builddir = tempfile.mkdtemp(dir=self.workdir,
-                                               prefix="imgcreate-")
-        except OSError as err:
-            raise CreatorError("Failed create build directory in %s: %s" %
-                               (self.tmpdir, err))
-
-    def __setup_tmpdir(self):
-        if not self.enabletmpfs:
-            return
-
-        runner.show('mount -t tmpfs -o size=4G tmpfs %s' % self.workdir)
-
-    def __clean_tmpdir(self):
-        if not self.enabletmpfs:
-            return
-
-        runner.show('umount -l %s' % self.workdir)
-
-    def create(self):
-        """Create partitions for the disk image(s)
-
-        Create the partitions that will be assembled into disk
-        image(s).
-        """
-        self.__setup_tmpdir()
-        self.__ensure_builddir()
-
-        self._create()
-
-    def cleanup(self):
-        """Undo anything performed in create().
-
-        Note, make sure to call this method once finished with the creator
-        instance in order to ensure no stale files are left on the host e.g.:
-
-          creator = ImageCreator(ks, name)
-          try:
-              creator.create()
-          finally:
-              creator.cleanup()
-
-        """
-        if not self.__builddir:
-            return
-
-        self._cleanup()
-
-        shutil.rmtree(self.__builddir, ignore_errors=True)
-        self.__builddir = None
-
-        self.__clean_tmpdir()
-
-
-    def print_outimage_info(self):
-        msg = "The new image can be found here:\n"
-        self.outimage.sort()
-        for path in self.outimage:
-            msg += '  %s\n' % os.path.abspath(path)
-
-        msger.info(msg)
diff --git a/import-layers/yocto-poky/scripts/lib/wic/imager/direct.py b/import-layers/yocto-poky/scripts/lib/wic/imager/direct.py
deleted file mode 100644
index 4c547e0..0000000
--- a/import-layers/yocto-poky/scripts/lib/wic/imager/direct.py
+++ /dev/null
@@ -1,419 +0,0 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
-# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# DESCRIPTION
-# This implements the 'direct' image creator class for 'wic'
-#
-# AUTHORS
-# Tom Zanussi <tom.zanussi (at] linux.intel.com>
-#
-
-import os
-import shutil
-import uuid
-
-from wic import msger
-from wic.utils.oe.misc import get_bitbake_var
-from wic.utils.partitionedfs import Image
-from wic.utils.errors import CreatorError, ImageError
-from wic.imager.baseimager import BaseImageCreator
-from wic.plugin import pluginmgr
-from wic.utils.oe.misc import exec_cmd, exec_native_cmd
-
-disk_methods = {
-    "do_install_disk":None,
-}
-
-class DiskImage():
-    """
-    A Disk backed by a file.
-    """
-    def __init__(self, device, size):
-        self.size = size
-        self.device = device
-        self.created = False
-
-    def exists(self):
-        return os.path.exists(self.device)
-
-    def create(self):
-        if self.created:
-            return
-        # create sparse disk image
-        cmd = "truncate %s -s %s" % (self.device, self.size)
-        exec_cmd(cmd)
-        self.created = True
-
-class DirectImageCreator(BaseImageCreator):
-    """
-    Installs a system into a file containing a partitioned disk image.
-
-    DirectImageCreator is an advanced ImageCreator subclass; an image
-    file is formatted with a partition table, each partition created
-    from a rootfs or other OpenEmbedded build artifact and dd'ed into
-    the virtual disk. The disk image can subsequently be dd'ed onto
-    media and used on actual hardware.
-    """
-
-    def __init__(self, oe_builddir, image_output_dir, rootfs_dir, bootimg_dir,
-                 kernel_dir, native_sysroot, compressor, creatoropts=None,
-                 bmap=False):
-        """
-        Initialize a DirectImageCreator instance.
-
-        This method takes the same arguments as ImageCreator.__init__()
-        """
-        BaseImageCreator.__init__(self, creatoropts)
-
-        self.__image = None
-        self.__disks = {}
-        self.__disk_format = "direct"
-        self._disk_names = []
-        self.ptable_format = self.ks.bootloader.ptable
-
-        self.oe_builddir = oe_builddir
-        if image_output_dir:
-            self.tmpdir = image_output_dir
-        self.rootfs_dir = rootfs_dir
-        self.bootimg_dir = bootimg_dir
-        self.kernel_dir = kernel_dir
-        self.native_sysroot = native_sysroot
-        self.compressor = compressor
-        self.bmap = bmap
-
-    def __get_part_num(self, num, parts):
-        """calculate the real partition number, accounting for partitions not
-        in the partition table and logical partitions
-        """
-        realnum = 0
-        for pnum, part in enumerate(parts, 1):
-            if not part.no_table:
-                realnum += 1
-            if pnum == num:
-                if  part.no_table:
-                    return 0
-                if self.ptable_format == 'msdos' and realnum > 3 and len(parts) > 4:
-                    # account for logical partition numbering, ex. sda5..
-                    return realnum + 1
-                return realnum
-
-    def _write_fstab(self, image_rootfs):
-        """overriden to generate fstab (temporarily) in rootfs. This is called
-        from _create, make sure it doesn't get called from
-        BaseImage.create()
-        """
-        if not image_rootfs:
-            return
-
-        fstab_path = image_rootfs + "/etc/fstab"
-        if not os.path.isfile(fstab_path):
-            return
-
-        with open(fstab_path) as fstab:
-            fstab_lines = fstab.readlines()
-
-        if self._update_fstab(fstab_lines, self._get_parts()):
-            shutil.copyfile(fstab_path, fstab_path + ".orig")
-
-            with open(fstab_path, "w") as fstab:
-                fstab.writelines(fstab_lines)
-
-            return fstab_path
-
-    def _update_fstab(self, fstab_lines, parts):
-        """Assume partition order same as in wks"""
-        updated = False
-        for num, part in enumerate(parts, 1):
-            pnum = self.__get_part_num(num, parts)
-            if not pnum or not part.mountpoint \
-               or part.mountpoint in ("/", "/boot"):
-                continue
-
-            # mmc device partitions are named mmcblk0p1, mmcblk0p2..
-            prefix = 'p' if  part.disk.startswith('mmcblk') else ''
-            device_name = "/dev/%s%s%d" % (part.disk, prefix, pnum)
-
-            opts = part.fsopts if part.fsopts else "defaults"
-            line = "\t".join([device_name, part.mountpoint, part.fstype,
-                              opts, "0", "0"]) + "\n"
-
-            fstab_lines.append(line)
-            updated = True
-
-        return updated
-
-    def set_bootimg_dir(self, bootimg_dir):
-        """
-        Accessor for bootimg_dir, the actual location used for the source
-        of the bootimg.  Should be set by source plugins (only if they
-        change the default bootimg source) so the correct info gets
-        displayed for print_outimage_info().
-        """
-        self.bootimg_dir = bootimg_dir
-
-    def _get_parts(self):
-        if not self.ks:
-            raise CreatorError("Failed to get partition info, "
-                               "please check your kickstart setting.")
-
-        # Set a default partition if no partition is given out
-        if not self.ks.partitions:
-            partstr = "part / --size 1900 --ondisk sda --fstype=ext3"
-            args = partstr.split()
-            part = self.ks.parse(args[1:])
-            if part not in self.ks.partitions:
-                self.ks.partitions.append(part)
-
-        # partitions list from kickstart file
-        return self.ks.partitions
-
-    def get_disk_names(self):
-        """ Returns a list of physical target disk names (e.g., 'sdb') which
-        will be created. """
-
-        if self._disk_names:
-            return self._disk_names
-
-        #get partition info from ks handler
-        parts = self._get_parts()
-
-        for i in range(len(parts)):
-            if parts[i].disk:
-                disk_name = parts[i].disk
-            else:
-                raise CreatorError("Failed to create disks, no --ondisk "
-                                   "specified in partition line of ks file")
-
-            if parts[i].mountpoint and not parts[i].fstype:
-                raise CreatorError("Failed to create disks, no --fstype "
-                                   "specified for partition with mountpoint "
-                                   "'%s' in the ks file")
-
-            self._disk_names.append(disk_name)
-
-        return self._disk_names
-
-    def _full_name(self, name, extention):
-        """ Construct full file name for a file we generate. """
-        return "%s-%s.%s" % (self.name, name, extention)
-
-    def _full_path(self, path, name, extention):
-        """ Construct full file path to a file we generate. """
-        return os.path.join(path, self._full_name(name, extention))
-
-    def get_default_source_plugin(self):
-        """
-        The default source plugin i.e. the plugin that's consulted for
-        overall image generation tasks outside of any particular
-        partition.  For convenience, we just hang it off the
-        bootloader handler since it's the one non-partition object in
-        any setup.  By default the default plugin is set to the same
-        plugin as the /boot partition; since we hang it off the
-        bootloader object, the default can be explicitly set using the
-        --source bootloader param.
-        """
-        return self.ks.bootloader.source
-
-    #
-    # Actual implemention
-    #
-    def _create(self):
-        """
-        For 'wic', we already have our build artifacts - we just create
-        filesystems from the artifacts directly and combine them into
-        a partitioned image.
-        """
-        parts = self._get_parts()
-
-        self.__image = Image(self.native_sysroot)
-
-        disk_ids = {}
-        for num, part in enumerate(parts, 1):
-            # as a convenience, set source to the boot partition source
-            # instead of forcing it to be set via bootloader --source
-            if not self.ks.bootloader.source and part.mountpoint == "/boot":
-                self.ks.bootloader.source = part.source
-
-            # generate parition UUIDs
-            if not part.uuid and part.use_uuid:
-                if self.ptable_format == 'gpt':
-                    part.uuid = str(uuid.uuid4())
-                else: # msdos partition table
-                    if part.disk not in disk_ids:
-                        disk_ids[part.disk] = int.from_bytes(os.urandom(4), 'little')
-                    disk_id = disk_ids[part.disk]
-                    part.uuid = '%0x-%02d' % (disk_id, self.__get_part_num(num, parts))
-
-        fstab_path = self._write_fstab(self.rootfs_dir.get("ROOTFS_DIR"))
-
-        shutil.rmtree(self.workdir)
-        os.mkdir(self.workdir)
-
-        for part in parts:
-            # get rootfs size from bitbake variable if it's not set in .ks file
-            if not part.size:
-                # and if rootfs name is specified for the partition
-                image_name = part.rootfs_dir
-                if image_name:
-                    # Bitbake variable ROOTFS_SIZE is calculated in
-                    # Image._get_rootfs_size method from meta/lib/oe/image.py
-                    # using IMAGE_ROOTFS_SIZE, IMAGE_ROOTFS_ALIGNMENT,
-                    # IMAGE_OVERHEAD_FACTOR and IMAGE_ROOTFS_EXTRA_SPACE
-                    rsize_bb = get_bitbake_var('ROOTFS_SIZE', image_name)
-                    if rsize_bb:
-                        part.size = int(round(float(rsize_bb)))
-            # need to create the filesystems in order to get their
-            # sizes before we can add them and do the layout.
-            # Image.create() actually calls __format_disks() to create
-            # the disk images and carve out the partitions, then
-            # self.assemble() calls Image.assemble() which calls
-            # __write_partitition() for each partition to dd the fs
-            # into the partitions.
-            part.prepare(self, self.workdir, self.oe_builddir, self.rootfs_dir,
-                         self.bootimg_dir, self.kernel_dir, self.native_sysroot)
-
-
-            self.__image.add_partition(int(part.size),
-                                       part.disk,
-                                       part.mountpoint,
-                                       part.source_file,
-                                       part.fstype,
-                                       part.label,
-                                       fsopts=part.fsopts,
-                                       boot=part.active,
-                                       align=part.align,
-                                       no_table=part.no_table,
-                                       part_type=part.part_type,
-                                       uuid=part.uuid,
-                                       system_id=part.system_id)
-
-        if fstab_path:
-            shutil.move(fstab_path + ".orig", fstab_path)
-
-        self.__image.layout_partitions(self.ptable_format)
-
-        self.__imgdir = self.workdir
-        for disk_name, disk in self.__image.disks.items():
-            full_path = self._full_path(self.__imgdir, disk_name, "direct")
-            msger.debug("Adding disk %s as %s with size %s bytes" \
-                        % (disk_name, full_path, disk['min_size']))
-            disk_obj = DiskImage(full_path, disk['min_size'])
-            self.__disks[disk_name] = disk_obj
-            self.__image.add_disk(disk_name, disk_obj, disk_ids.get(disk_name))
-
-        self.__image.create()
-
-    def assemble(self):
-        """
-        Assemble partitions into disk image(s)
-        """
-        for disk_name, disk in self.__image.disks.items():
-            full_path = self._full_path(self.__imgdir, disk_name, "direct")
-            msger.debug("Assembling disk %s as %s with size %s bytes" \
-                        % (disk_name, full_path, disk['min_size']))
-            self.__image.assemble(full_path)
-
-    def finalize(self):
-        """
-        Finalize the disk image.
-
-        For example, prepare the image to be bootable by e.g.
-        creating and installing a bootloader configuration.
-
-        """
-        source_plugin = self.get_default_source_plugin()
-        if source_plugin:
-            self._source_methods = pluginmgr.get_source_plugin_methods(source_plugin, disk_methods)
-            for disk_name, disk in self.__image.disks.items():
-                self._source_methods["do_install_disk"](disk, disk_name, self,
-                                                        self.workdir,
-                                                        self.oe_builddir,
-                                                        self.bootimg_dir,
-                                                        self.kernel_dir,
-                                                        self.native_sysroot)
-
-        for disk_name, disk in self.__image.disks.items():
-            full_path = self._full_path(self.__imgdir, disk_name, "direct")
-            # Generate .bmap
-            if self.bmap:
-                msger.debug("Generating bmap file for %s" % disk_name)
-                exec_native_cmd("bmaptool create %s -o %s.bmap" % (full_path, full_path),
-                                self.native_sysroot)
-            # Compress the image
-            if self.compressor:
-                msger.debug("Compressing disk %s with %s" % (disk_name, self.compressor))
-                exec_cmd("%s %s" % (self.compressor, full_path))
-
-    def print_outimage_info(self):
-        """
-        Print the image(s) and artifacts used, for the user.
-        """
-        msg = "The new image(s) can be found here:\n"
-
-        parts = self._get_parts()
-
-        for disk_name in self.__image.disks:
-            extension = "direct" + {"gzip": ".gz",
-                                    "bzip2": ".bz2",
-                                    "xz": ".xz",
-                                    "": ""}.get(self.compressor)
-            full_path = self._full_path(self.__imgdir, disk_name, extension)
-            msg += '  %s\n\n' % full_path
-
-        msg += 'The following build artifacts were used to create the image(s):\n'
-        for part in parts:
-            if part.rootfs_dir is None:
-                continue
-            if part.mountpoint == '/':
-                suffix = ':'
-            else:
-                suffix = '["%s"]:' % (part.mountpoint or part.label)
-            msg += '  ROOTFS_DIR%s%s\n' % (suffix.ljust(20), part.rootfs_dir)
-
-        msg += '  BOOTIMG_DIR:                  %s\n' % self.bootimg_dir
-        msg += '  KERNEL_DIR:                   %s\n' % self.kernel_dir
-        msg += '  NATIVE_SYSROOT:               %s\n' % self.native_sysroot
-
-        msger.info(msg)
-
-    @property
-    def rootdev(self):
-        """
-        Get root device name to use as a 'root' parameter
-        in kernel command line.
-
-        Assume partition order same as in wks
-        """
-        parts = self._get_parts()
-        for num, part in enumerate(parts, 1):
-            if part.mountpoint == "/":
-                if part.uuid:
-                    return "PARTUUID=%s" % part.uuid
-                else:
-                    suffix = 'p' if part.disk.startswith('mmcblk') else ''
-                    pnum = self.__get_part_num(num, parts)
-                    return "/dev/%s%s%-d" % (part.disk, suffix, pnum)
-
-    def _cleanup(self):
-        if not self.__image is None:
-            try:
-                self.__image.cleanup()
-            except ImageError as err:
-                msger.warning("%s" % err)
-
diff --git a/import-layers/yocto-poky/scripts/lib/wic/ksparser.py b/import-layers/yocto-poky/scripts/lib/wic/ksparser.py
index 0894e2b..d026caa 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/ksparser.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/ksparser.py
@@ -27,11 +27,14 @@
 
 import os
 import shlex
+import logging
+
 from argparse import ArgumentParser, ArgumentError, ArgumentTypeError
 
-from wic import msger
+from wic.engine import find_canned
 from wic.partition import Partition
-from wic.utils.misc import find_canned
+
+logger = logging.getLogger('wic')
 
 class KickStartError(Exception):
     """Custom exception."""
@@ -113,6 +116,9 @@
 class KickStart():
     """"Kickstart parser implementation."""
 
+    DEFAULT_EXTRA_SPACE = 10*1024
+    DEFAULT_OVERHEAD_FACTOR = 1.3
+
     def __init__(self, confpath):
 
         self.partitions = []
@@ -127,16 +133,27 @@
         part.add_argument('mountpoint', nargs='?')
         part.add_argument('--active', action='store_true')
         part.add_argument('--align', type=int)
-        part.add_argument("--extra-space", type=sizetype, default=10*1024)
+        part.add_argument('--exclude-path', nargs='+')
+        part.add_argument("--extra-space", type=sizetype)
         part.add_argument('--fsoptions', dest='fsopts')
-        part.add_argument('--fstype')
+        part.add_argument('--fstype', default='vfat',
+                          choices=('ext2', 'ext3', 'ext4', 'btrfs',
+                                   'squashfs', 'vfat', 'msdos', 'swap'))
         part.add_argument('--label')
         part.add_argument('--no-table', action='store_true')
-        part.add_argument('--ondisk', '--ondrive', dest='disk')
-        part.add_argument("--overhead-factor", type=overheadtype, default=1.3)
+        part.add_argument('--ondisk', '--ondrive', dest='disk', default='sda')
+        part.add_argument("--overhead-factor", type=overheadtype)
         part.add_argument('--part-type')
         part.add_argument('--rootfs-dir')
-        part.add_argument('--size', type=sizetype, default=0)
+
+        # --size and --fixed-size cannot be specified together; options
+        # ----extra-space and --overhead-factor should also raise a parser
+        # --error, but since nesting mutually exclusive groups does not work,
+        # ----extra-space/--overhead-factor are handled later
+        sizeexcl = part.add_mutually_exclusive_group()
+        sizeexcl.add_argument('--size', type=sizetype, default=0)
+        sizeexcl.add_argument('--fixed-size', type=sizetype, default=0)
+
         part.add_argument('--source')
         part.add_argument('--sourceparams')
         part.add_argument('--system-id', type=systemidtype)
@@ -156,7 +173,7 @@
 
         self._parse(parser, confpath)
         if not self.bootloader:
-            msger.warning('bootloader config not specified, using defaults')
+            logger.warning('bootloader config not specified, using defaults\n')
             self.bootloader = bootloader.parse_args([])
 
     def _parse(self, parser, confpath):
@@ -170,11 +187,33 @@
                 lineno += 1
                 if line and line[0] != '#':
                     try:
-                        parsed = parser.parse_args(shlex.split(line))
+                        line_args = shlex.split(line)
+                        parsed = parser.parse_args(line_args)
                     except ArgumentError as err:
                         raise KickStartError('%s:%d: %s' % \
                                              (confpath, lineno, err))
                     if line.startswith('part'):
+                        # using ArgumentParser one cannot easily tell if option
+                        # was passed as argument, if said option has a default
+                        # value; --overhead-factor/--extra-space cannot be used
+                        # with --fixed-size, so at least detect when these were
+                        # passed with non-0 values ...
+                        if parsed.fixed_size:
+                            if parsed.overhead_factor or parsed.extra_space:
+                                err = "%s:%d: arguments --overhead-factor and --extra-space not "\
+                                      "allowed with argument --fixed-size" \
+                                      % (confpath, lineno)
+                                raise KickStartError(err)
+                        else:
+                            # ... and provide defaults if not using
+                            # --fixed-size iff given option was not used
+                            # (again, one cannot tell if option was passed but
+                            # with value equal to 0)
+                            if '--overhead-factor' not in line_args:
+                                parsed.overhead_factor = self.DEFAULT_OVERHEAD_FACTOR
+                            if '--extra-space' not in line_args:
+                                parsed.extra_space = self.DEFAULT_EXTRA_SPACE
+
                         self.partnum += 1
                         self.partitions.append(Partition(parsed, self.partnum))
                     elif line.startswith('include'):
diff --git a/import-layers/yocto-poky/scripts/lib/wic/msger.py b/import-layers/yocto-poky/scripts/lib/wic/msger.py
deleted file mode 100644
index fb8336d..0000000
--- a/import-layers/yocto-poky/scripts/lib/wic/msger.py
+++ /dev/null
@@ -1,235 +0,0 @@
-#!/usr/bin/env python -tt
-# vim: ai ts=4 sts=4 et sw=4
-#
-# Copyright (c) 2009, 2010, 2011 Intel, Inc.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-import os
-import sys
-import re
-import time
-
-__ALL__ = ['get_loglevel',
-           'set_loglevel',
-           'set_logfile',
-           'debug',
-           'verbose',
-           'info',
-           'warning',
-           'error',
-          ]
-
-# COLORs in ANSI
-INFO_COLOR = 32 # green
-WARN_COLOR = 33 # yellow
-ERR_COLOR = 31 # red
-ASK_COLOR = 34 # blue
-NO_COLOR = 0
-
-PREFIX_RE = re.compile('^<(.*?)>\s*(.*)', re.S)
-
-INTERACTIVE = True
-
-LOG_LEVEL = 1
-LOG_LEVELS = {
-    'quiet': 0,
-    'normal': 1,
-    'verbose': 2,
-    'debug': 3,
-    'never': 4,
-}
-
-LOG_FILE_FP = None
-LOG_CONTENT = ''
-CATCHERR_BUFFILE_FD = -1
-CATCHERR_BUFFILE_PATH = None
-CATCHERR_SAVED_2 = -1
-
-def _general_print(head, color, msg=None, stream=None, level='normal'):
-    global LOG_CONTENT
-    if not stream:
-        stream = sys.stdout
-
-    if LOG_LEVELS[level] > LOG_LEVEL:
-        # skip
-        return
-
-    errormsg = ''
-    if CATCHERR_BUFFILE_FD > 0:
-        size = os.lseek(CATCHERR_BUFFILE_FD, 0, os.SEEK_END)
-        os.lseek(CATCHERR_BUFFILE_FD, 0, os.SEEK_SET)
-        errormsg = os.read(CATCHERR_BUFFILE_FD, size)
-        os.ftruncate(CATCHERR_BUFFILE_FD, 0)
-
-    # append error msg to LOG
-    if errormsg:
-        LOG_CONTENT += errormsg
-
-    # append normal msg to LOG
-    save_msg = msg.strip() if msg else None
-    if save_msg:
-        timestr = time.strftime("[%m/%d %H:%M:%S %Z] ", time.localtime())
-        LOG_CONTENT += timestr + save_msg + '\n'
-
-    if errormsg:
-        _color_print('', NO_COLOR, errormsg, stream, level)
-
-    _color_print(head, color, msg, stream, level)
-
-def _color_print(head, color, msg, stream, level):
-    colored = True
-    if color == NO_COLOR or \
-       not stream.isatty() or \
-       os.getenv('ANSI_COLORS_DISABLED') is not None:
-        colored = False
-
-    if head.startswith('\r'):
-        # need not \n at last
-        newline = False
-    else:
-        newline = True
-
-    if colored:
-        head = '\033[%dm%s:\033[0m ' %(color, head)
-        if not newline:
-            # ESC cmd to clear line
-            head = '\033[2K' + head
-    else:
-        if head:
-            head += ': '
-            if head.startswith('\r'):
-                head = head.lstrip()
-                newline = True
-
-    if msg is not None:
-        stream.write('%s%s' % (head, msg))
-        if newline:
-            stream.write('\n')
-
-    stream.flush()
-
-def _color_perror(head, color, msg, level='normal'):
-    if CATCHERR_BUFFILE_FD > 0:
-        _general_print(head, color, msg, sys.stdout, level)
-    else:
-        _general_print(head, color, msg, sys.stderr, level)
-
-def _split_msg(head, msg):
-    if isinstance(msg, list):
-        msg = '\n'.join(map(str, msg))
-
-    if msg.startswith('\n'):
-        # means print \n at first
-        msg = msg.lstrip()
-        head = '\n' + head
-
-    elif msg.startswith('\r'):
-        # means print \r at first
-        msg = msg.lstrip()
-        head = '\r' + head
-
-    match = PREFIX_RE.match(msg)
-    if match:
-        head += ' <%s>' % match.group(1)
-        msg = match.group(2)
-
-    return head, msg
-
-def get_loglevel():
-    return next((k for k, v in LOG_LEVELS.items() if v == LOG_LEVEL))
-
-def set_loglevel(level):
-    global LOG_LEVEL
-    if level not in LOG_LEVELS:
-        # no effect
-        return
-
-    LOG_LEVEL = LOG_LEVELS[level]
-
-def set_interactive(mode=True):
-    global INTERACTIVE
-    if mode:
-        INTERACTIVE = True
-    else:
-        INTERACTIVE = False
-
-def log(msg=''):
-    # log msg to LOG_CONTENT then save to logfile
-    global LOG_CONTENT
-    if msg:
-        LOG_CONTENT += msg
-
-def info(msg):
-    head, msg = _split_msg('Info', msg)
-    _general_print(head, INFO_COLOR, msg)
-
-def verbose(msg):
-    head, msg = _split_msg('Verbose', msg)
-    _general_print(head, INFO_COLOR, msg, level='verbose')
-
-def warning(msg):
-    head, msg = _split_msg('Warning', msg)
-    _color_perror(head, WARN_COLOR, msg)
-
-def debug(msg):
-    head, msg = _split_msg('Debug', msg)
-    _color_perror(head, ERR_COLOR, msg, level='debug')
-
-def error(msg):
-    head, msg = _split_msg('Error', msg)
-    _color_perror(head, ERR_COLOR, msg)
-    sys.exit(1)
-
-def set_logfile(fpath):
-    global LOG_FILE_FP
-
-    def _savelogf():
-        if LOG_FILE_FP:
-            with open(LOG_FILE_FP, 'w') as log:
-                log.write(LOG_CONTENT)
-
-    if LOG_FILE_FP is not None:
-        warning('duplicate log file configuration')
-
-    LOG_FILE_FP = fpath
-
-    import atexit
-    atexit.register(_savelogf)
-
-def enable_logstderr(fpath):
-    global CATCHERR_BUFFILE_FD
-    global CATCHERR_BUFFILE_PATH
-    global CATCHERR_SAVED_2
-
-    if os.path.exists(fpath):
-        os.remove(fpath)
-    CATCHERR_BUFFILE_PATH = fpath
-    CATCHERR_BUFFILE_FD = os.open(CATCHERR_BUFFILE_PATH, os.O_RDWR|os.O_CREAT)
-    CATCHERR_SAVED_2 = os.dup(2)
-    os.dup2(CATCHERR_BUFFILE_FD, 2)
-
-def disable_logstderr():
-    global CATCHERR_BUFFILE_FD
-    global CATCHERR_BUFFILE_PATH
-    global CATCHERR_SAVED_2
-
-    raw(msg=None) # flush message buffer and print it.
-    os.dup2(CATCHERR_SAVED_2, 2)
-    os.close(CATCHERR_SAVED_2)
-    os.close(CATCHERR_BUFFILE_FD)
-    os.unlink(CATCHERR_BUFFILE_PATH)
-    CATCHERR_BUFFILE_FD = -1
-    CATCHERR_BUFFILE_PATH = None
-    CATCHERR_SAVED_2 = -1
diff --git a/import-layers/yocto-poky/scripts/lib/wic/partition.py b/import-layers/yocto-poky/scripts/lib/wic/partition.py
index ec3aa66..939e667 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/partition.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/partition.py
@@ -24,18 +24,15 @@
 # Tom Zanussi <tom.zanussi (at] linux.intel.com>
 # Ed Bartosh <ed.bartosh> (at] linux.intel.com>
 
+import logging
 import os
 import tempfile
 
-from wic.utils.oe.misc import msger, parse_sourceparams
-from wic.utils.oe.misc import exec_cmd, exec_native_cmd
-from wic.plugin import pluginmgr
+from wic import WicError
+from wic.utils.misc import exec_cmd, exec_native_cmd, get_bitbake_var
+from wic.pluginbase import PluginMgr
 
-partition_methods = {
-    "do_stage_partition":None,
-    "do_prepare_partition":None,
-    "do_configure_partition":None,
-}
+logger = logging.getLogger('wic')
 
 class Partition():
 
@@ -44,16 +41,20 @@
         self.active = args.active
         self.align = args.align
         self.disk = args.disk
+        self.device = None
         self.extra_space = args.extra_space
+        self.exclude_path = args.exclude_path
         self.fsopts = args.fsopts
         self.fstype = args.fstype
         self.label = args.label
         self.mountpoint = args.mountpoint
         self.no_table = args.no_table
+        self.num = None
         self.overhead_factor = args.overhead_factor
         self.part_type = args.part_type
         self.rootfs_dir = args.rootfs_dir
         self.size = args.size
+        self.fixed_size = args.fixed_size
         self.source = args.source
         self.sourceparams = args.sourceparams
         self.system_id = args.system_id
@@ -71,100 +72,128 @@
         number of (1k) blocks we need to add to get to --size, 0 if
         we're already there or beyond.
         """
-        msger.debug("Requested partition size for %s: %d" % \
-                    (self.mountpoint, self.size))
+        logger.debug("Requested partition size for %s: %d",
+                     self.mountpoint, self.size)
 
         if not self.size:
             return 0
 
         requested_blocks = self.size
 
-        msger.debug("Requested blocks %d, current_blocks %d" % \
-                    (requested_blocks, current_blocks))
+        logger.debug("Requested blocks %d, current_blocks %d",
+                     requested_blocks, current_blocks)
 
         if requested_blocks > current_blocks:
             return requested_blocks - current_blocks
         else:
             return 0
 
+    def get_rootfs_size(self, actual_rootfs_size=0):
+        """
+        Calculate the required size of rootfs taking into consideration
+        --size/--fixed-size flags as well as overhead and extra space, as
+        specified in kickstart file. Raises an error if the
+        `actual_rootfs_size` is larger than fixed-size rootfs.
+
+        """
+        if self.fixed_size:
+            rootfs_size = self.fixed_size
+            if actual_rootfs_size > rootfs_size:
+                raise WicError("Actual rootfs size (%d kB) is larger than "
+                               "allowed size %d kB" %
+                               (actual_rootfs_size, rootfs_size))
+        else:
+            extra_blocks = self.get_extra_block_count(actual_rootfs_size)
+            if extra_blocks < self.extra_space:
+                extra_blocks = self.extra_space
+
+            rootfs_size = actual_rootfs_size + extra_blocks
+            rootfs_size *= self.overhead_factor
+
+            logger.debug("Added %d extra blocks to %s to get to %d total blocks",
+                         extra_blocks, self.mountpoint, rootfs_size)
+
+        return rootfs_size
+
+    @property
+    def disk_size(self):
+        """
+        Obtain on-disk size of partition taking into consideration
+        --size/--fixed-size options.
+
+        """
+        return self.fixed_size if self.fixed_size else self.size
+
     def prepare(self, creator, cr_workdir, oe_builddir, rootfs_dir,
                 bootimg_dir, kernel_dir, native_sysroot):
         """
         Prepare content for individual partitions, depending on
         partition command parameters.
         """
-        if self.sourceparams:
-            self.sourceparams_dict = parse_sourceparams(self.sourceparams)
-
         if not self.source:
-            if not self.size:
-                msger.error("The %s partition has a size of zero.  Please "
-                            "specify a non-zero --size for that partition." % \
-                            self.mountpoint)
-            if self.fstype and self.fstype == "swap":
+            if not self.size and not self.fixed_size:
+                raise WicError("The %s partition has a size of zero. Please "
+                               "specify a non-zero --size/--fixed-size for that "
+                               "partition." % self.mountpoint)
+
+            if self.fstype == "swap":
                 self.prepare_swap_partition(cr_workdir, oe_builddir,
                                             native_sysroot)
                 self.source_file = "%s/fs.%s" % (cr_workdir, self.fstype)
-            elif self.fstype:
+            else:
+                if self.fstype == 'squashfs':
+                    raise WicError("It's not possible to create empty squashfs "
+                                   "partition '%s'" % (self.mountpoint))
+
                 rootfs = "%s/fs_%s.%s.%s" % (cr_workdir, self.label,
                                              self.lineno, self.fstype)
                 if os.path.isfile(rootfs):
                     os.remove(rootfs)
-                for prefix in ("ext", "btrfs", "vfat", "squashfs"):
-                    if self.fstype.startswith(prefix):
-                        method = getattr(self,
-                                         "prepare_empty_partition_" + prefix)
-                        method(rootfs, oe_builddir, native_sysroot)
-                        self.source_file = rootfs
-                        break
+
+                prefix = "ext" if self.fstype.startswith("ext") else self.fstype
+                method = getattr(self, "prepare_empty_partition_" + prefix)
+                method(rootfs, oe_builddir, native_sysroot)
+                self.source_file = rootfs
             return
 
-        plugins = pluginmgr.get_source_plugins()
+        plugins = PluginMgr.get_plugins('source')
 
         if self.source not in plugins:
-            msger.error("The '%s' --source specified for %s doesn't exist.\n\t"
-                        "See 'wic list source-plugins' for a list of available"
-                        " --sources.\n\tSee 'wic help source-plugins' for "
-                        "details on adding a new source plugin." % \
-                        (self.source, self.mountpoint))
+            raise WicError("The '%s' --source specified for %s doesn't exist.\n\t"
+                           "See 'wic list source-plugins' for a list of available"
+                           " --sources.\n\tSee 'wic help source-plugins' for "
+                           "details on adding a new source plugin." %
+                           (self.source, self.mountpoint))
 
-        self._source_methods = pluginmgr.get_source_plugin_methods(\
-                                   self.source, partition_methods)
-        self._source_methods["do_configure_partition"](self, self.sourceparams_dict,
-                                                       creator, cr_workdir,
-                                                       oe_builddir,
-                                                       bootimg_dir,
-                                                       kernel_dir,
-                                                       native_sysroot)
-        self._source_methods["do_stage_partition"](self, self.sourceparams_dict,
-                                                   creator, cr_workdir,
-                                                   oe_builddir,
-                                                   bootimg_dir, kernel_dir,
-                                                   native_sysroot)
-        self._source_methods["do_prepare_partition"](self, self.sourceparams_dict,
-                                                     creator, cr_workdir,
-                                                     oe_builddir,
-                                                     bootimg_dir, kernel_dir, rootfs_dir,
-                                                     native_sysroot)
+        srcparams_dict = {}
+        if self.sourceparams:
+            # Split sourceparams string of the form key1=val1[,key2=val2,...]
+            # into a dict.  Also accepts valueless keys i.e. without =
+            splitted = self.sourceparams.split(',')
+            srcparams_dict = dict(par.split('=') for par in splitted if par)
+
+        plugin = PluginMgr.get_plugins('source')[self.source]
+        plugin.do_configure_partition(self, srcparams_dict, creator,
+                                      cr_workdir, oe_builddir, bootimg_dir,
+                                      kernel_dir, native_sysroot)
+        plugin.do_stage_partition(self, srcparams_dict, creator,
+                                  cr_workdir, oe_builddir, bootimg_dir,
+                                  kernel_dir, native_sysroot)
+        plugin.do_prepare_partition(self, srcparams_dict, creator,
+                                    cr_workdir, oe_builddir, bootimg_dir,
+                                    kernel_dir, rootfs_dir, native_sysroot)
+
         # further processing required Partition.size to be an integer, make
         # sure that it is one
-        if type(self.size) is not int:
-            msger.error("Partition %s internal size is not an integer. " \
-                          "This a bug in source plugin %s and needs to be fixed." \
-                          % (self.mountpoint, self.source))
+        if not isinstance(self.size, int):
+            raise WicError("Partition %s internal size is not an integer. "
+                           "This a bug in source plugin %s and needs to be fixed." %
+                           (self.mountpoint, self.source))
 
-    def prepare_rootfs_from_fs_image(self, cr_workdir, oe_builddir,
-                                     rootfs_dir):
-        """
-        Handle an already-created partition e.g. xxx.ext3
-        """
-        rootfs = oe_builddir
-        du_cmd = "du -Lbks %s" % rootfs
-        out = exec_cmd(du_cmd)
-        rootfs_size = out.split()[0]
-
-        self.size = int(rootfs_size)
-        self.source_file = rootfs
+        if self.fixed_size and self.size > self.fixed_size:
+            raise WicError("File system image of partition %s is "
+                           "larger (%d kB) than its allowed size %d kB" %
+                           (self.mountpoint, self.size, self.fixed_size))
 
     def prepare_rootfs(self, cr_workdir, oe_builddir, rootfs_dir,
                        native_sysroot):
@@ -183,30 +212,36 @@
         pseudo += "export PSEUDO_LOCALSTATEDIR=%s;" % p_localstatedir
         pseudo += "export PSEUDO_PASSWD=%s;" % p_passwd
         pseudo += "export PSEUDO_NOSYMLINKEXP=%s;" % p_nosymlinkexp
-        pseudo += "%s/usr/bin/pseudo " % native_sysroot
+        pseudo += "%s " % get_bitbake_var("FAKEROOTCMD")
 
         rootfs = "%s/rootfs_%s.%s.%s" % (cr_workdir, self.label,
                                          self.lineno, self.fstype)
         if os.path.isfile(rootfs):
             os.remove(rootfs)
 
-        if not self.fstype:
-            msger.error("File system for partition %s not specified in kickstart, " \
-                        "use --fstype option" % (self.mountpoint))
+        # Get rootfs size from bitbake variable if it's not set in .ks file
+        if not self.size:
+            # Bitbake variable ROOTFS_SIZE is calculated in
+            # Image._get_rootfs_size method from meta/lib/oe/image.py
+            # using IMAGE_ROOTFS_SIZE, IMAGE_ROOTFS_ALIGNMENT,
+            # IMAGE_OVERHEAD_FACTOR and IMAGE_ROOTFS_EXTRA_SPACE
+            rsize_bb = get_bitbake_var('ROOTFS_SIZE')
+            if rsize_bb:
+                logger.warning('overhead-factor was specified, but size was not,'
+                               ' so bitbake variables will be used for the size.'
+                               ' In this case both IMAGE_OVERHEAD_FACTOR and '
+                               '--overhead-factor will be applied')
+                self.size = int(round(float(rsize_bb)))
 
-        for prefix in ("ext", "btrfs", "vfat", "squashfs"):
-            if self.fstype.startswith(prefix):
-                method = getattr(self, "prepare_rootfs_" + prefix)
-                method(rootfs, oe_builddir, rootfs_dir, native_sysroot, pseudo)
+        prefix = "ext" if self.fstype.startswith("ext") else self.fstype
+        method = getattr(self, "prepare_rootfs_" + prefix)
+        method(rootfs, oe_builddir, rootfs_dir, native_sysroot, pseudo)
+        self.source_file = rootfs
 
-                self.source_file = rootfs
-
-                # get the rootfs size in the right units for kickstart (kB)
-                du_cmd = "du -Lbks %s" % rootfs
-                out = exec_cmd(du_cmd)
-                self.size = int(out.split()[0])
-
-                break
+        # get the rootfs size in the right units for kickstart (kB)
+        du_cmd = "du -Lbks %s" % rootfs
+        out = exec_cmd(du_cmd)
+        self.size = int(out.split()[0])
 
     def prepare_rootfs_ext(self, rootfs, oe_builddir, rootfs_dir,
                            native_sysroot, pseudo):
@@ -217,17 +252,10 @@
         out = exec_cmd(du_cmd)
         actual_rootfs_size = int(out.split()[0])
 
-        extra_blocks = self.get_extra_block_count(actual_rootfs_size)
-        if extra_blocks < self.extra_space:
-            extra_blocks = self.extra_space
+        rootfs_size = self.get_rootfs_size(actual_rootfs_size)
 
-        rootfs_size = actual_rootfs_size + extra_blocks
-        rootfs_size *= self.overhead_factor
-
-        msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
-                    (extra_blocks, self.mountpoint, rootfs_size))
-
-        exec_cmd("truncate %s -s %d" % (rootfs, rootfs_size * 1024))
+        with open(rootfs, 'w') as sparse:
+            os.ftruncate(sparse.fileno(), rootfs_size * 1024)
 
         extra_imagecmd = "-i 8192"
 
@@ -239,7 +267,7 @@
             (self.fstype, extra_imagecmd, rootfs, label_str, rootfs_dir)
         exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
 
-        mkfs_cmd = "fsck.%s -pvfD %s || [ $? -le 3 ]" % (self.fstype, rootfs)
+        mkfs_cmd = "fsck.%s -pvfD %s" % (self.fstype, rootfs)
         exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
 
     def prepare_rootfs_btrfs(self, rootfs, oe_builddir, rootfs_dir,
@@ -253,17 +281,10 @@
         out = exec_cmd(du_cmd)
         actual_rootfs_size = int(out.split()[0])
 
-        extra_blocks = self.get_extra_block_count(actual_rootfs_size)
-        if extra_blocks < self.extra_space:
-            extra_blocks = self.extra_space
+        rootfs_size = self.get_rootfs_size(actual_rootfs_size)
 
-        rootfs_size = actual_rootfs_size + extra_blocks
-        rootfs_size *= self.overhead_factor
-
-        msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
-                    (extra_blocks, self.mountpoint, rootfs_size))
-
-        exec_cmd("truncate %s -s %d" % (rootfs, rootfs_size * 1024))
+        with open(rootfs, 'w') as sparse:
+            os.ftruncate(sparse.fileno(), rootfs_size * 1024)
 
         label_str = ""
         if self.label:
@@ -273,29 +294,27 @@
             (self.fstype, rootfs_size * 1024, rootfs_dir, label_str, rootfs)
         exec_native_cmd(mkfs_cmd, native_sysroot, pseudo=pseudo)
 
-    def prepare_rootfs_vfat(self, rootfs, oe_builddir, rootfs_dir,
-                            native_sysroot, pseudo):
+    def prepare_rootfs_msdos(self, rootfs, oe_builddir, rootfs_dir,
+                             native_sysroot, pseudo):
         """
-        Prepare content for a vfat rootfs partition.
+        Prepare content for a msdos/vfat rootfs partition.
         """
         du_cmd = "du -bks %s" % rootfs_dir
         out = exec_cmd(du_cmd)
         blocks = int(out.split()[0])
 
-        extra_blocks = self.get_extra_block_count(blocks)
-        if extra_blocks < self.extra_space:
-            extra_blocks = self.extra_space
-
-        blocks += extra_blocks
-
-        msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
-                    (extra_blocks, self.mountpoint, blocks))
+        rootfs_size = self.get_rootfs_size(blocks)
 
         label_str = "-n boot"
         if self.label:
             label_str = "-n %s" % self.label
 
-        dosfs_cmd = "mkdosfs %s -S 512 -C %s %d" % (label_str, rootfs, blocks)
+        size_str = ""
+        if self.fstype == 'msdos':
+            size_str = "-F 16" # FAT 16
+
+        dosfs_cmd = "mkdosfs %s -S 512 %s -C %s %d" % (label_str, size_str,
+                                                       rootfs, rootfs_size)
         exec_native_cmd(dosfs_cmd, native_sysroot)
 
         mcopy_cmd = "mcopy -i %s -s %s/* ::/" % (rootfs, rootfs_dir)
@@ -304,6 +323,8 @@
         chmod_cmd = "chmod 644 %s" % rootfs
         exec_cmd(chmod_cmd)
 
+    prepare_rootfs_vfat = prepare_rootfs_msdos
+
     def prepare_rootfs_squashfs(self, rootfs, oe_builddir, rootfs_dir,
                                 native_sysroot, pseudo):
         """
@@ -318,7 +339,9 @@
         """
         Prepare an empty ext2/3/4 partition.
         """
-        exec_cmd("truncate %s -s %d" % (rootfs, self.size * 1024))
+        size = self.disk_size
+        with open(rootfs, 'w') as sparse:
+            os.ftruncate(sparse.fileno(), size * 1024)
 
         extra_imagecmd = "-i 8192"
 
@@ -335,7 +358,9 @@
         """
         Prepare an empty btrfs partition.
         """
-        exec_cmd("truncate %s -s %d" % (rootfs, self.size * 1024))
+        size = self.disk_size
+        with open(rootfs, 'w') as sparse:
+            os.ftruncate(sparse.fileno(), size * 1024)
 
         label_str = ""
         if self.label:
@@ -345,50 +370,29 @@
             (self.fstype, self.size * 1024, label_str, rootfs)
         exec_native_cmd(mkfs_cmd, native_sysroot)
 
-    def prepare_empty_partition_vfat(self, rootfs, oe_builddir,
-                                     native_sysroot):
+    def prepare_empty_partition_msdos(self, rootfs, oe_builddir,
+                                      native_sysroot):
         """
         Prepare an empty vfat partition.
         """
-        blocks = self.size
+        blocks = self.disk_size
 
         label_str = "-n boot"
         if self.label:
             label_str = "-n %s" % self.label
 
-        dosfs_cmd = "mkdosfs %s -S 512 -C %s %d" % (label_str, rootfs, blocks)
+        size_str = ""
+        if self.fstype == 'msdos':
+            size_str = "-F 16" # FAT 16
+
+        dosfs_cmd = "mkdosfs %s -S 512 %s -C %s %d" % (label_str, size_str,
+                                                       rootfs, blocks)
         exec_native_cmd(dosfs_cmd, native_sysroot)
 
         chmod_cmd = "chmod 644 %s" % rootfs
         exec_cmd(chmod_cmd)
 
-    def prepare_empty_partition_squashfs(self, cr_workdir, oe_builddir,
-                                         native_sysroot):
-        """
-        Prepare an empty squashfs partition.
-        """
-        msger.warning("Creating of an empty squashfs %s partition was attempted. " \
-                      "Proceeding as requested." % self.mountpoint)
-
-        path = "%s/fs_%s.%s" % (cr_workdir, self.label, self.fstype)
-        os.path.isfile(path) and os.remove(path)
-
-        # it is not possible to create a squashfs without source data,
-        # thus prepare an empty temp dir that is used as source
-        tmpdir = tempfile.mkdtemp()
-
-        squashfs_cmd = "mksquashfs %s %s -noappend" % \
-                       (tmpdir, path)
-        exec_native_cmd(squashfs_cmd, native_sysroot)
-
-        os.rmdir(tmpdir)
-
-        # get the rootfs size in the right units for kickstart (kB)
-        du_cmd = "du -Lbks %s" % path
-        out = exec_cmd(du_cmd)
-        fs_size = out.split()[0]
-
-        self.size = int(fs_size)
+    prepare_empty_partition_vfat = prepare_empty_partition_msdos
 
     def prepare_swap_partition(self, cr_workdir, oe_builddir, native_sysroot):
         """
@@ -396,7 +400,8 @@
         """
         path = "%s/fs.%s" % (cr_workdir, self.fstype)
 
-        exec_cmd("truncate %s -s %d" % (path, self.size * 1024))
+        with open(path, 'w') as sparse:
+            os.ftruncate(sparse.fileno(), self.size * 1024)
 
         import uuid
         label_str = ""
@@ -404,4 +409,3 @@
             label_str = "-L %s" % self.label
         mkswap_cmd = "mkswap %s -U %s %s" % (label_str, str(uuid.uuid1()), path)
         exec_native_cmd(mkswap_cmd, native_sysroot)
-
diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugin.py b/import-layers/yocto-poky/scripts/lib/wic/plugin.py
deleted file mode 100644
index 306b324..0000000
--- a/import-layers/yocto-poky/scripts/lib/wic/plugin.py
+++ /dev/null
@@ -1,150 +0,0 @@
-#!/usr/bin/env python -tt
-#
-# Copyright (c) 2011 Intel, Inc.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-import os, sys
-
-from wic import msger
-from wic import pluginbase
-from wic.utils import errors
-from wic.utils.oe.misc import get_bitbake_var
-
-__ALL__ = ['PluginMgr', 'pluginmgr']
-
-PLUGIN_TYPES = ["imager", "source"]
-
-PLUGIN_DIR = "/lib/wic/plugins" # relative to scripts
-SCRIPTS_PLUGIN_DIR = "scripts" + PLUGIN_DIR
-
-class PluginMgr():
-    plugin_dirs = {}
-
-    # make the manager class as singleton
-    _instance = None
-    def __new__(cls, *args, **kwargs):
-        if not cls._instance:
-            cls._instance = super(PluginMgr, cls).__new__(cls, *args, **kwargs)
-
-        return cls._instance
-
-    def __init__(self):
-        wic_path = os.path.dirname(__file__)
-        eos = wic_path.rfind('scripts') + len('scripts')
-        scripts_path = wic_path[:eos]
-        self.scripts_path = scripts_path
-        self.plugin_dir = scripts_path + PLUGIN_DIR
-        self.layers_path = None
-
-    def _build_plugin_dir_list(self, plugin_dir, ptype):
-        if self.layers_path is None:
-            self.layers_path = get_bitbake_var("BBLAYERS")
-        layer_dirs = []
-
-        if self.layers_path is not None:
-            for layer_path in self.layers_path.split():
-                path = os.path.join(layer_path, SCRIPTS_PLUGIN_DIR, ptype)
-                layer_dirs.append(path)
-
-        path = os.path.join(plugin_dir, ptype)
-        layer_dirs.append(path)
-
-        return layer_dirs
-
-    def append_dirs(self, dirs):
-        for path in dirs:
-            self._add_plugindir(path)
-
-        # load all the plugins AGAIN
-        self._load_all()
-
-    def _add_plugindir(self, path):
-        path = os.path.abspath(os.path.expanduser(path))
-
-        if not os.path.isdir(path):
-            return
-
-        if path not in self.plugin_dirs:
-            self.plugin_dirs[path] = False
-            # the value True/False means "loaded"
-
-    def _load_all(self):
-        for (pdir, loaded) in self.plugin_dirs.items():
-            if loaded:
-                continue
-
-            sys.path.insert(0, pdir)
-            for mod in [x[:-3] for x in os.listdir(pdir) if x.endswith(".py")]:
-                if mod and mod != '__init__':
-                    if mod in sys.modules:
-                        #self.plugin_dirs[pdir] = True
-                        msger.warning("Module %s already exists, skip" % mod)
-                    else:
-                        try:
-                            pymod = __import__(mod)
-                            self.plugin_dirs[pdir] = True
-                            msger.debug("Plugin module %s:%s imported"\
-                                        % (mod, pymod.__file__))
-                        except ImportError as err:
-                            msg = 'Failed to load plugin %s/%s: %s' \
-                                % (os.path.basename(pdir), mod, err)
-                            msger.warning(msg)
-
-            del sys.path[0]
-
-    def get_plugins(self, ptype):
-        """ the return value is dict of name:class pairs """
-
-        if ptype not in PLUGIN_TYPES:
-            raise errors.CreatorError('%s is not valid plugin type' % ptype)
-
-        plugins_dir = self._build_plugin_dir_list(self.plugin_dir, ptype)
-
-        self.append_dirs(plugins_dir)
-
-        return pluginbase.get_plugins(ptype)
-
-    def get_source_plugins(self):
-        """
-        Return list of available source plugins.
-        """
-        plugins_dir = self._build_plugin_dir_list(self.plugin_dir, 'source')
-
-        self.append_dirs(plugins_dir)
-
-        return self.get_plugins('source')
-
-
-    def get_source_plugin_methods(self, source_name, methods):
-        """
-        The methods param is a dict with the method names to find.  On
-        return, the dict values will be filled in with pointers to the
-        corresponding methods.  If one or more methods are not found,
-        None is returned.
-        """
-        return_methods = None
-        for _source_name, klass in self.get_plugins('source').items():
-            if _source_name == source_name:
-                for _method_name in methods:
-                    if not hasattr(klass, _method_name):
-                        msger.warning("Unimplemented %s source interface for: %s"\
-                                      % (_method_name, _source_name))
-                        return None
-                    func = getattr(klass, _method_name)
-                    methods[_method_name] = func
-                    return_methods = methods
-        return return_methods
-
-pluginmgr = PluginMgr()
diff --git a/import-layers/yocto-poky/scripts/lib/wic/pluginbase.py b/import-layers/yocto-poky/scripts/lib/wic/pluginbase.py
index e737dee..fb3d179 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/pluginbase.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/pluginbase.py
@@ -15,26 +15,74 @@
 # with this program; if not, write to the Free Software Foundation, Inc., 59
 # Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 
-__all__ = ['ImagerPlugin', 'SourcePlugin', 'get_plugins']
+__all__ = ['ImagerPlugin', 'SourcePlugin']
 
-import sys
+import os
+import logging
+
 from collections import defaultdict
+from importlib.machinery import SourceFileLoader
 
-from wic import msger
+from wic import WicError
+from wic.utils.misc import get_bitbake_var
+
+PLUGIN_TYPES = ["imager", "source"]
+
+SCRIPTS_PLUGIN_DIR = "scripts/lib/wic/plugins"
+
+logger = logging.getLogger('wic')
+
+PLUGINS = defaultdict(dict)
+
+class PluginMgr:
+    _plugin_dirs = []
+
+    @classmethod
+    def get_plugins(cls, ptype):
+        """Get dictionary of <plugin_name>:<class> pairs."""
+        if ptype not in PLUGIN_TYPES:
+            raise WicError('%s is not valid plugin type' % ptype)
+
+        # collect plugin directories
+        if not cls._plugin_dirs:
+            cls._plugin_dirs = [os.path.join(os.path.dirname(__file__), 'plugins')]
+            layers = get_bitbake_var("BBLAYERS") or ''
+            for layer_path in layers.split():
+                path = os.path.join(layer_path, SCRIPTS_PLUGIN_DIR)
+                path = os.path.abspath(os.path.expanduser(path))
+                if path not in cls._plugin_dirs and os.path.isdir(path):
+                    cls._plugin_dirs.insert(0, path)
+
+        if ptype not in PLUGINS:
+            # load all ptype plugins
+            for pdir in cls._plugin_dirs:
+                ppath = os.path.join(pdir, ptype)
+                if os.path.isdir(ppath):
+                    for fname in os.listdir(ppath):
+                        if fname.endswith('.py'):
+                            mname = fname[:-3]
+                            mpath = os.path.join(ppath, fname)
+                            logger.debug("loading plugin module %s", mpath)
+                            SourceFileLoader(mname, mpath).load_module()
+
+        return PLUGINS.get(ptype)
 
 class PluginMeta(type):
-    plugins = defaultdict(dict)
     def __new__(cls, name, bases, attrs):
         class_type = type.__new__(cls, name, bases, attrs)
         if 'name' in attrs:
-            cls.plugins[class_type.wic_plugin_type][attrs['name']] = class_type
+            PLUGINS[class_type.wic_plugin_type][attrs['name']] = class_type
 
         return class_type
 
-class ImagerPlugin(PluginMeta("Plugin", (), {})):
+class ImagerPlugin(metaclass=PluginMeta):
     wic_plugin_type = "imager"
 
-class SourcePlugin(PluginMeta("Plugin", (), {})):
+    def do_create(self):
+        raise WicError("Method %s.do_create is not implemented" %
+                       self.__class__.__name__)
+
+class SourcePlugin(metaclass=PluginMeta):
     wic_plugin_type = "source"
     """
     The methods that can be implemented by --source plugins.
@@ -50,7 +98,7 @@
         disk image.  This provides a hook to allow finalization of a
         disk image e.g. to write an MBR to it.
         """
-        msger.debug("SourcePlugin: do_install_disk: disk: %s" % disk_name)
+        logger.debug("SourcePlugin: do_install_disk: disk: %s", disk_name)
 
     @classmethod
     def do_stage_partition(cls, part, source_params, creator, cr_workdir,
@@ -67,7 +115,7 @@
         Not that get_bitbake_var() allows you to acces non-standard
         variables that you might want to use for this.
         """
-        msger.debug("SourcePlugin: do_stage_partition: part: %s" % part)
+        logger.debug("SourcePlugin: do_stage_partition: part: %s", part)
 
     @classmethod
     def do_configure_partition(cls, part, source_params, creator, cr_workdir,
@@ -78,7 +126,7 @@
         custom configuration files for a partition, for example
         syslinux or grub config files.
         """
-        msger.debug("SourcePlugin: do_configure_partition: part: %s" % part)
+        logger.debug("SourcePlugin: do_configure_partition: part: %s", part)
 
     @classmethod
     def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
@@ -88,7 +136,5 @@
         Called to do the actual content population for a partition i.e. it
         'prepares' the partition to be incorporated into the image.
         """
-        msger.debug("SourcePlugin: do_prepare_partition: part: %s" % part)
+        logger.debug("SourcePlugin: do_prepare_partition: part: %s", part)
 
-def get_plugins(typen):
-    return PluginMeta.plugins.get(typen)
diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugins/imager/direct.py b/import-layers/yocto-poky/scripts/lib/wic/plugins/imager/direct.py
new file mode 100644
index 0000000..f2e6127
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/lib/wic/plugins/imager/direct.py
@@ -0,0 +1,561 @@
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2013, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This implements the 'direct' imager plugin class for 'wic'
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+
+import logging
+import os
+import shutil
+import tempfile
+import uuid
+
+from time import strftime
+
+from wic import WicError
+from wic.filemap import sparse_copy
+from wic.ksparser import KickStart, KickStartError
+from wic.pluginbase import PluginMgr, ImagerPlugin
+from wic.utils.misc import get_bitbake_var, exec_cmd, exec_native_cmd
+
+logger = logging.getLogger('wic')
+
+class DirectPlugin(ImagerPlugin):
+    """
+    Install a system into a file containing a partitioned disk image.
+
+    An image file is formatted with a partition table, each partition
+    created from a rootfs or other OpenEmbedded build artifact and dd'ed
+    into the virtual disk. The disk image can subsequently be dd'ed onto
+    media and used on actual hardware.
+    """
+    name = 'direct'
+
+    def __init__(self, wks_file, rootfs_dir, bootimg_dir, kernel_dir,
+                 native_sysroot, oe_builddir, options):
+        try:
+            self.ks = KickStart(wks_file)
+        except KickStartError as err:
+            raise WicError(str(err))
+
+        # parse possible 'rootfs=name' items
+        self.rootfs_dir = dict(rdir.split('=') for rdir in rootfs_dir.split(' '))
+        self.bootimg_dir = bootimg_dir
+        self.kernel_dir = kernel_dir
+        self.native_sysroot = native_sysroot
+        self.oe_builddir = oe_builddir
+
+        self.outdir = options.outdir
+        self.compressor = options.compressor
+        self.bmap = options.bmap
+
+        self.name = "%s-%s" % (os.path.splitext(os.path.basename(wks_file))[0],
+                               strftime("%Y%m%d%H%M"))
+        self.workdir = tempfile.mkdtemp(dir=self.outdir, prefix='tmp.wic.')
+        self._image = None
+        self.ptable_format = self.ks.bootloader.ptable
+        self.parts = self.ks.partitions
+
+        # as a convenience, set source to the boot partition source
+        # instead of forcing it to be set via bootloader --source
+        for part in self.parts:
+            if not self.ks.bootloader.source and part.mountpoint == "/boot":
+                self.ks.bootloader.source = part.source
+                break
+
+        image_path = self._full_path(self.workdir, self.parts[0].disk, "direct")
+        self._image = PartitionedImage(image_path, self.ptable_format,
+                                       self.parts, self.native_sysroot)
+
+    def do_create(self):
+        """
+        Plugin entry point.
+        """
+        try:
+            self.create()
+            self.assemble()
+            self.finalize()
+            self.print_info()
+        finally:
+            self.cleanup()
+
+    def _write_fstab(self, image_rootfs):
+        """overriden to generate fstab (temporarily) in rootfs. This is called
+        from _create, make sure it doesn't get called from
+        BaseImage.create()
+        """
+        if not image_rootfs:
+            return
+
+        fstab_path = image_rootfs + "/etc/fstab"
+        if not os.path.isfile(fstab_path):
+            return
+
+        with open(fstab_path) as fstab:
+            fstab_lines = fstab.readlines()
+
+        if self._update_fstab(fstab_lines, self.parts):
+            shutil.copyfile(fstab_path, fstab_path + ".orig")
+
+            with open(fstab_path, "w") as fstab:
+                fstab.writelines(fstab_lines)
+
+            return fstab_path
+
+    def _update_fstab(self, fstab_lines, parts):
+        """Assume partition order same as in wks"""
+        updated = False
+        for part in parts:
+            if not part.realnum or not part.mountpoint \
+               or part.mountpoint in ("/", "/boot"):
+                continue
+
+            # mmc device partitions are named mmcblk0p1, mmcblk0p2..
+            prefix = 'p' if  part.disk.startswith('mmcblk') else ''
+            device_name = "/dev/%s%s%d" % (part.disk, prefix, part.realnum)
+
+            opts = part.fsopts if part.fsopts else "defaults"
+            line = "\t".join([device_name, part.mountpoint, part.fstype,
+                              opts, "0", "0"]) + "\n"
+
+            fstab_lines.append(line)
+            updated = True
+
+        return updated
+
+    def _full_path(self, path, name, extention):
+        """ Construct full file path to a file we generate. """
+        return os.path.join(path, "%s-%s.%s" % (self.name, name, extention))
+
+    #
+    # Actual implemention
+    #
+    def create(self):
+        """
+        For 'wic', we already have our build artifacts - we just create
+        filesystems from the artifacts directly and combine them into
+        a partitioned image.
+        """
+        fstab_path = self._write_fstab(self.rootfs_dir.get("ROOTFS_DIR"))
+
+        for part in self.parts:
+            # get rootfs size from bitbake variable if it's not set in .ks file
+            if not part.size:
+                # and if rootfs name is specified for the partition
+                image_name = self.rootfs_dir.get(part.rootfs_dir)
+                if image_name and os.path.sep not in image_name:
+                    # Bitbake variable ROOTFS_SIZE is calculated in
+                    # Image._get_rootfs_size method from meta/lib/oe/image.py
+                    # using IMAGE_ROOTFS_SIZE, IMAGE_ROOTFS_ALIGNMENT,
+                    # IMAGE_OVERHEAD_FACTOR and IMAGE_ROOTFS_EXTRA_SPACE
+                    rsize_bb = get_bitbake_var('ROOTFS_SIZE', image_name)
+                    if rsize_bb:
+                        part.size = int(round(float(rsize_bb)))
+
+        self._image.prepare(self)
+
+        if fstab_path:
+            shutil.move(fstab_path + ".orig", fstab_path)
+
+        self._image.layout_partitions()
+        self._image.create()
+
+    def assemble(self):
+        """
+        Assemble partitions into disk image
+        """
+        self._image.assemble()
+
+    def finalize(self):
+        """
+        Finalize the disk image.
+
+        For example, prepare the image to be bootable by e.g.
+        creating and installing a bootloader configuration.
+        """
+        source_plugin = self.ks.bootloader.source
+        disk_name = self.parts[0].disk
+        if source_plugin:
+            plugin = PluginMgr.get_plugins('source')[source_plugin]
+            plugin.do_install_disk(self._image, disk_name, self, self.workdir,
+                                   self.oe_builddir, self.bootimg_dir,
+                                   self.kernel_dir, self.native_sysroot)
+
+        full_path = self._image.path
+        # Generate .bmap
+        if self.bmap:
+            logger.debug("Generating bmap file for %s", disk_name)
+            exec_native_cmd("bmaptool create %s -o %s.bmap" % (full_path, full_path),
+                            self.native_sysroot)
+        # Compress the image
+        if self.compressor:
+            logger.debug("Compressing disk %s with %s", disk_name, self.compressor)
+            exec_cmd("%s %s" % (self.compressor, full_path))
+
+    def print_info(self):
+        """
+        Print the image(s) and artifacts used, for the user.
+        """
+        msg = "The new image(s) can be found here:\n"
+
+        extension = "direct" + {"gzip": ".gz",
+                                "bzip2": ".bz2",
+                                "xz": ".xz",
+                                None: ""}.get(self.compressor)
+        full_path = self._full_path(self.outdir, self.parts[0].disk, extension)
+        msg += '  %s\n\n' % full_path
+
+        msg += 'The following build artifacts were used to create the image(s):\n'
+        for part in self.parts:
+            if part.rootfs_dir is None:
+                continue
+            if part.mountpoint == '/':
+                suffix = ':'
+            else:
+                suffix = '["%s"]:' % (part.mountpoint or part.label)
+            msg += '  ROOTFS_DIR%s%s\n' % (suffix.ljust(20), part.rootfs_dir)
+
+        msg += '  BOOTIMG_DIR:                  %s\n' % self.bootimg_dir
+        msg += '  KERNEL_DIR:                   %s\n' % self.kernel_dir
+        msg += '  NATIVE_SYSROOT:               %s\n' % self.native_sysroot
+
+        logger.info(msg)
+
+    @property
+    def rootdev(self):
+        """
+        Get root device name to use as a 'root' parameter
+        in kernel command line.
+
+        Assume partition order same as in wks
+        """
+        for part in self.parts:
+            if part.mountpoint == "/":
+                if part.uuid:
+                    return "PARTUUID=%s" % part.uuid
+                else:
+                    suffix = 'p' if part.disk.startswith('mmcblk') else ''
+                    return "/dev/%s%s%-d" % (part.disk, suffix, part.realnum)
+
+    def cleanup(self):
+        if self._image:
+            self._image.cleanup()
+
+        # Move results to the output dir
+        if not os.path.exists(self.outdir):
+            os.makedirs(self.outdir)
+
+        for fname in os.listdir(self.workdir):
+            path = os.path.join(self.workdir, fname)
+            if os.path.isfile(path):
+                shutil.move(path, os.path.join(self.outdir, fname))
+
+        # remove work directory
+        shutil.rmtree(self.workdir, ignore_errors=True)
+
+# Overhead of the MBR partitioning scheme (just one sector)
+MBR_OVERHEAD = 1
+
+# Overhead of the GPT partitioning scheme
+GPT_OVERHEAD = 34
+
+# Size of a sector in bytes
+SECTOR_SIZE = 512
+
+class PartitionedImage():
+    """
+    Partitioned image in a file.
+    """
+
+    def __init__(self, path, ptable_format, partitions, native_sysroot=None):
+        self.path = path  # Path to the image file
+        self.numpart = 0  # Number of allocated partitions
+        self.realpart = 0 # Number of partitions in the partition table
+        self.offset = 0   # Offset of next partition (in sectors)
+        self.min_size = 0 # Minimum required disk size to fit
+                          # all partitions (in bytes)
+        self.ptable_format = ptable_format  # Partition table format
+        # Disk system identifier
+        self.identifier = int.from_bytes(os.urandom(4), 'little')
+
+        self.partitions = partitions
+        self.partimages = []
+        # Size of a sector used in calculations
+        self.sector_size = SECTOR_SIZE
+        self.native_sysroot = native_sysroot
+
+        # calculate the real partition number, accounting for partitions not
+        # in the partition table and logical partitions
+        realnum = 0
+        for part in self.partitions:
+            if part.no_table:
+                part.realnum = 0
+            else:
+                realnum += 1
+                if self.ptable_format == 'msdos' and realnum > 3:
+                    part.realnum = realnum + 1
+                    continue
+                part.realnum = realnum
+
+        # generate parition UUIDs
+        for part in self.partitions:
+            if not part.uuid and part.use_uuid:
+                if self.ptable_format == 'gpt':
+                    part.uuid = str(uuid.uuid4())
+                else: # msdos partition table
+                    part.uuid = '%08x-%02d' % (self.identifier, part.realnum)
+
+    def prepare(self, imager):
+        """Prepare an image. Call prepare method of all image partitions."""
+        for part in self.partitions:
+            # need to create the filesystems in order to get their
+            # sizes before we can add them and do the layout.
+            part.prepare(imager, imager.workdir, imager.oe_builddir,
+                         imager.rootfs_dir, imager.bootimg_dir,
+                         imager.kernel_dir, imager.native_sysroot)
+
+            # Converting kB to sectors for parted
+            part.size_sec = part.disk_size * 1024 // self.sector_size
+
+    def layout_partitions(self):
+        """ Layout the partitions, meaning calculate the position of every
+        partition on the disk. The 'ptable_format' parameter defines the
+        partition table format and may be "msdos". """
+
+        logger.debug("Assigning %s partitions to disks", self.ptable_format)
+
+        # The number of primary and logical partitions. Extended partition and
+        # partitions not listed in the table are not included.
+        num_real_partitions = len([p for p in self.partitions if not p.no_table])
+
+        # Go through partitions in the order they are added in .ks file
+        for num in range(len(self.partitions)):
+            part = self.partitions[num]
+
+            if self.ptable_format == 'msdos' and part.part_type:
+                # The --part-type can also be implemented for MBR partitions,
+                # in which case it would map to the 1-byte "partition type"
+                # filed at offset 3 of the partition entry.
+                raise WicError("setting custom partition type is not " \
+                               "implemented for msdos partitions")
+
+            # Get the disk where the partition is located
+            self.numpart += 1
+            if not part.no_table:
+                self.realpart += 1
+
+            if self.numpart == 1:
+                if self.ptable_format == "msdos":
+                    overhead = MBR_OVERHEAD
+                elif self.ptable_format == "gpt":
+                    overhead = GPT_OVERHEAD
+
+                # Skip one sector required for the partitioning scheme overhead
+                self.offset += overhead
+
+            if self.realpart > 3 and num_real_partitions > 4:
+                # Reserve a sector for EBR for every logical partition
+                # before alignment is performed.
+                if self.ptable_format == "msdos":
+                    self.offset += 1
+
+            if part.align:
+                # If not first partition and we do have alignment set we need
+                # to align the partition.
+                # FIXME: This leaves a empty spaces to the disk. To fill the
+                # gaps we could enlargea the previous partition?
+
+                # Calc how much the alignment is off.
+                align_sectors = self.offset % (part.align * 1024 // self.sector_size)
+
+                if align_sectors:
+                    # If partition is not aligned as required, we need
+                    # to move forward to the next alignment point
+                    align_sectors = (part.align * 1024 // self.sector_size) - align_sectors
+
+                    logger.debug("Realignment for %s%s with %s sectors, original"
+                                 " offset %s, target alignment is %sK.",
+                                 part.disk, self.numpart, align_sectors,
+                                 self.offset, part.align)
+
+                    # increase the offset so we actually start the partition on right alignment
+                    self.offset += align_sectors
+
+            part.start = self.offset
+            self.offset += part.size_sec
+
+            part.type = 'primary'
+            if not part.no_table:
+                part.num = self.realpart
+            else:
+                part.num = 0
+
+            if self.ptable_format == "msdos":
+                # only count the partitions that are in partition table
+                if num_real_partitions > 4:
+                    if self.realpart > 3:
+                        part.type = 'logical'
+                        part.num = self.realpart + 1
+
+            logger.debug("Assigned %s to %s%d, sectors range %d-%d size %d "
+                         "sectors (%d bytes).", part.mountpoint, part.disk,
+                         part.num, part.start, self.offset - 1, part.size_sec,
+                         part.size_sec * self.sector_size)
+
+        # Once all the partitions have been layed out, we can calculate the
+        # minumim disk size
+        self.min_size = self.offset
+        if self.ptable_format == "gpt":
+            self.min_size += GPT_OVERHEAD
+
+        self.min_size *= self.sector_size
+
+    def _create_partition(self, device, parttype, fstype, start, size):
+        """ Create a partition on an image described by the 'device' object. """
+
+        # Start is included to the size so we need to substract one from the end.
+        end = start + size - 1
+        logger.debug("Added '%s' partition, sectors %d-%d, size %d sectors",
+                     parttype, start, end, size)
+
+        cmd = "parted -s %s unit s mkpart %s" % (device, parttype)
+        if fstype:
+            cmd += " %s" % fstype
+        cmd += " %d %d" % (start, end)
+
+        return exec_native_cmd(cmd, self.native_sysroot)
+
+    def create(self):
+        logger.debug("Creating sparse file %s", self.path)
+        with open(self.path, 'w') as sparse:
+            os.ftruncate(sparse.fileno(), self.min_size)
+
+        logger.debug("Initializing partition table for %s", self.path)
+        exec_native_cmd("parted -s %s mklabel %s" %
+                        (self.path, self.ptable_format), self.native_sysroot)
+
+        logger.debug("Set disk identifier %x", self.identifier)
+        with open(self.path, 'r+b') as img:
+            img.seek(0x1B8)
+            img.write(self.identifier.to_bytes(4, 'little'))
+
+        logger.debug("Creating partitions")
+
+        for part in self.partitions:
+            if part.num == 0:
+                continue
+
+            if self.ptable_format == "msdos" and part.num == 5:
+                # Create an extended partition (note: extended
+                # partition is described in MBR and contains all
+                # logical partitions). The logical partitions save a
+                # sector for an EBR just before the start of a
+                # partition. The extended partition must start one
+                # sector before the start of the first logical
+                # partition. This way the first EBR is inside of the
+                # extended partition. Since the extended partitions
+                # starts a sector before the first logical partition,
+                # add a sector at the back, so that there is enough
+                # room for all logical partitions.
+                self._create_partition(self.path, "extended",
+                                       None, part.start - 1,
+                                       self.offset - part.start + 1)
+
+            if part.fstype == "swap":
+                parted_fs_type = "linux-swap"
+            elif part.fstype == "vfat":
+                parted_fs_type = "fat32"
+            elif part.fstype == "msdos":
+                parted_fs_type = "fat16"
+                if not part.system_id:
+                    part.system_id = '0x6' # FAT16
+            else:
+                # Type for ext2/ext3/ext4/btrfs
+                parted_fs_type = "ext2"
+
+            # Boot ROM of OMAP boards require vfat boot partition to have an
+            # even number of sectors.
+            if part.mountpoint == "/boot" and part.fstype in ["vfat", "msdos"] \
+               and part.size_sec % 2:
+                logger.debug("Subtracting one sector from '%s' partition to "
+                             "get even number of sectors for the partition",
+                             part.mountpoint)
+                part.size_sec -= 1
+
+            self._create_partition(self.path, part.type,
+                                   parted_fs_type, part.start, part.size_sec)
+
+            if part.part_type:
+                logger.debug("partition %d: set type UID to %s",
+                             part.num, part.part_type)
+                exec_native_cmd("sgdisk --typecode=%d:%s %s" % \
+                                         (part.num, part.part_type,
+                                          self.path), self.native_sysroot)
+
+            if part.uuid and self.ptable_format == "gpt":
+                logger.debug("partition %d: set UUID to %s",
+                             part.num, part.uuid)
+                exec_native_cmd("sgdisk --partition-guid=%d:%s %s" % \
+                                (part.num, part.uuid, self.path),
+                                self.native_sysroot)
+
+            if part.label and self.ptable_format == "gpt":
+                logger.debug("partition %d: set name to %s",
+                             part.num, part.label)
+                exec_native_cmd("parted -s %s name %d %s" % \
+                                (self.path, part.num, part.label),
+                                self.native_sysroot)
+
+            if part.active:
+                flag_name = "legacy_boot" if self.ptable_format == 'gpt' else "boot"
+                logger.debug("Set '%s' flag for partition '%s' on disk '%s'",
+                             flag_name, part.num, self.path)
+                exec_native_cmd("parted -s %s set %d %s on" % \
+                                (self.path, part.num, flag_name),
+                                self.native_sysroot)
+            if part.system_id:
+                exec_native_cmd("sfdisk --part-type %s %s %s" % \
+                                (self.path, part.num, part.system_id),
+                                self.native_sysroot)
+
+    def cleanup(self):
+        # remove partition images
+        for image in set(self.partimages):
+            os.remove(image)
+
+    def assemble(self):
+        logger.debug("Installing partitions")
+
+        for part in self.partitions:
+            source = part.source_file
+            if source:
+                # install source_file contents into a partition
+                sparse_copy(source, self.path, part.start * self.sector_size)
+
+                logger.debug("Installed %s in partition %d, sectors %d-%d, "
+                             "size %d sectors", source, part.num, part.start,
+                             part.start + part.size_sec - 1, part.size_sec)
+
+                partimage = self.path + '.p%d' % part.num
+                os.rename(source, partimage)
+                self.partimages.append(partimage)
diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugins/imager/direct_plugin.py b/import-layers/yocto-poky/scripts/lib/wic/plugins/imager/direct_plugin.py
deleted file mode 100644
index 8fe3930..0000000
--- a/import-layers/yocto-poky/scripts/lib/wic/plugins/imager/direct_plugin.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
-# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# DESCRIPTION
-# This implements the 'direct' imager plugin class for 'wic'
-#
-# AUTHORS
-# Tom Zanussi <tom.zanussi (at] linux.intel.com>
-#
-
-from wic.utils import errors
-from wic.conf import configmgr
-
-import wic.imager.direct as direct
-from wic.pluginbase import ImagerPlugin
-
-class DirectPlugin(ImagerPlugin):
-    """
-    Install a system into a file containing a partitioned disk image.
-
-    An image file is formatted with a partition table, each partition
-    created from a rootfs or other OpenEmbedded build artifact and dd'ed
-    into the virtual disk. The disk image can subsequently be dd'ed onto
-    media and used on actual hardware.
-    """
-
-    name = 'direct'
-
-    @classmethod
-    def __rootfs_dir_to_dict(cls, rootfs_dirs):
-        """
-        Gets a string that contain 'connection=dir' splitted by
-        space and return a dict
-        """
-        krootfs_dir = {}
-        for rootfs_dir in rootfs_dirs.split(' '):
-            key, val = rootfs_dir.split('=')
-            krootfs_dir[key] = val
-
-        return krootfs_dir
-
-    @classmethod
-    def do_create(cls, opts, *args):
-        """
-        Create direct image, called from creator as 'direct' cmd
-        """
-        if len(args) != 8:
-            raise errors.Usage("Extra arguments given")
-
-        native_sysroot = args[0]
-        kernel_dir = args[1]
-        bootimg_dir = args[2]
-        rootfs_dir = args[3]
-
-        creatoropts = configmgr.create
-        ksconf = args[4]
-
-        image_output_dir = args[5]
-        oe_builddir = args[6]
-        compressor = args[7]
-
-        krootfs_dir = cls.__rootfs_dir_to_dict(rootfs_dir)
-
-        configmgr._ksconf = ksconf
-
-        creator = direct.DirectImageCreator(oe_builddir,
-                                            image_output_dir,
-                                            krootfs_dir,
-                                            bootimg_dir,
-                                            kernel_dir,
-                                            native_sysroot,
-                                            compressor,
-                                            creatoropts,
-                                            opts.bmap)
-
-        try:
-            creator.create()
-            creator.assemble()
-            creator.finalize()
-            creator.print_outimage_info()
-
-        except errors.CreatorError:
-            raise
-        finally:
-            creator.cleanup()
-
-        return 0
diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-efi.py b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-efi.py
index 4adb80b..9879cb9 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-efi.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-efi.py
@@ -24,25 +24,28 @@
 # Tom Zanussi <tom.zanussi (at] linux.intel.com>
 #
 
+import logging
 import os
 import shutil
 
-from wic import msger
+from wic import WicError
+from wic.engine import get_custom_config
 from wic.pluginbase import SourcePlugin
-from wic.utils.misc import get_custom_config
-from wic.utils.oe.misc import exec_cmd, exec_native_cmd, get_bitbake_var, \
-                              BOOTDD_EXTRA_SPACE
+from wic.utils.misc import (exec_cmd, exec_native_cmd, get_bitbake_var,
+                            BOOTDD_EXTRA_SPACE)
+
+logger = logging.getLogger('wic')
 
 class BootimgEFIPlugin(SourcePlugin):
     """
     Create EFI boot partition.
-    This plugin supports GRUB 2 and gummiboot bootloaders.
+    This plugin supports GRUB 2 and systemd-boot bootloaders.
     """
 
     name = 'bootimg-efi'
 
     @classmethod
-    def do_configure_grubefi(cls, hdddir, creator, cr_workdir):
+    def do_configure_grubefi(cls, creator, cr_workdir):
         """
         Create loader-specific (grub-efi) config
         """
@@ -53,11 +56,11 @@
             if custom_cfg:
                 # Use a custom configuration for grub
                 grubefi_conf = custom_cfg
-                msger.debug("Using custom configuration file "
-                        "%s for grub.cfg" % configfile)
+                logger.debug("Using custom configuration file "
+                             "%s for grub.cfg", configfile)
             else:
-                msger.error("configfile is specified but failed to "
-                        "get it from %s." % configfile)
+                raise WicError("configfile is specified but failed to "
+                               "get it from %s." % configfile)
 
         if not custom_cfg:
             # Create grub configuration using parameters from wks file
@@ -75,14 +78,14 @@
                 % (kernel, creator.rootdev, bootloader.append)
             grubefi_conf += "}\n"
 
-        msger.debug("Writing grubefi config %s/hdd/boot/EFI/BOOT/grub.cfg" \
-                        % cr_workdir)
+        logger.debug("Writing grubefi config %s/hdd/boot/EFI/BOOT/grub.cfg",
+                     cr_workdir)
         cfg = open("%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir, "w")
         cfg.write(grubefi_conf)
         cfg.close()
 
     @classmethod
-    def do_configure_gummiboot(cls, hdddir, creator, cr_workdir):
+    def do_configure_systemdboot(cls, hdddir, creator, cr_workdir, source_params):
         """
         Create loader-specific systemd-boot/gummiboot config
         """
@@ -98,8 +101,21 @@
         loader_conf += "default boot\n"
         loader_conf += "timeout %d\n" % bootloader.timeout
 
-        msger.debug("Writing gummiboot config %s/hdd/boot/loader/loader.conf" \
-                        % cr_workdir)
+        initrd = source_params.get('initrd')
+
+        if initrd:
+            # obviously we need to have a common common deploy var
+            bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+            if not bootimg_dir:
+                raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
+
+            cp_cmd = "cp %s/%s %s" % (bootimg_dir, initrd, hdddir)
+            exec_cmd(cp_cmd, True)
+        else:
+            logger.debug("Ignoring missing initrd")
+
+        logger.debug("Writing systemd-boot config "
+                     "%s/hdd/boot/loader/loader.conf", cr_workdir)
         cfg = open("%s/hdd/boot/loader/loader.conf" % cr_workdir, "w")
         cfg.write(loader_conf)
         cfg.close()
@@ -109,16 +125,16 @@
         if configfile:
             custom_cfg = get_custom_config(configfile)
             if custom_cfg:
-                # Use a custom configuration for gummiboot
+                # Use a custom configuration for systemd-boot
                 boot_conf = custom_cfg
-                msger.debug("Using custom configuration file "
-                        "%s for gummiboots's boot.conf" % configfile)
+                logger.debug("Using custom configuration file "
+                             "%s for systemd-boots's boot.conf", configfile)
             else:
-                msger.error("configfile is specified but failed to "
-                        "get it from %s." % configfile)
+                raise WicError("configfile is specified but failed to "
+                               "get it from %s.", configfile)
 
         if not custom_cfg:
-            # Create gummiboot configuration using parameters from wks file
+            # Create systemd-boot configuration using parameters from wks file
             kernel = "/bzImage"
 
             boot_conf = ""
@@ -127,8 +143,11 @@
             boot_conf += "options LABEL=Boot root=%s %s\n" % \
                              (creator.rootdev, bootloader.append)
 
-        msger.debug("Writing gummiboot config %s/hdd/boot/loader/entries/boot.conf" \
-                        % cr_workdir)
+            if initrd:
+                boot_conf += "initrd /%s\n" % initrd
+
+        logger.debug("Writing systemd-boot config "
+                     "%s/hdd/boot/loader/entries/boot.conf", cr_workdir)
         cfg = open("%s/hdd/boot/loader/entries/boot.conf" % cr_workdir, "w")
         cfg.write(boot_conf)
         cfg.close()
@@ -148,14 +167,13 @@
 
         try:
             if source_params['loader'] == 'grub-efi':
-                cls.do_configure_grubefi(hdddir, creator, cr_workdir)
-            elif source_params['loader'] == 'gummiboot' \
-                 or source_params['loader'] == 'systemd-boot':
-                cls.do_configure_gummiboot(hdddir, creator, cr_workdir)
+                cls.do_configure_grubefi(creator, cr_workdir)
+            elif source_params['loader'] == 'systemd-boot':
+                cls.do_configure_systemdboot(hdddir, creator, cr_workdir, source_params)
             else:
-                msger.error("unrecognized bootimg-efi loader: %s" % source_params['loader'])
+                raise WicError("unrecognized bootimg-efi loader: %s" % source_params['loader'])
         except KeyError:
-            msger.error("bootimg-efi requires a loader, none specified")
+            raise WicError("bootimg-efi requires a loader, none specified")
 
 
     @classmethod
@@ -167,12 +185,10 @@
         'prepares' the partition to be incorporated into the image.
         In this case, prepare content for an EFI (grub) boot partition.
         """
-        if not bootimg_dir:
-            bootimg_dir = get_bitbake_var("HDDDIR")
-            if not bootimg_dir:
-                msger.error("Couldn't find HDDDIR, exiting\n")
-            # just so the result notes display it
-            creator.set_bootimg_dir(bootimg_dir)
+        if not kernel_dir:
+            kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+            if not kernel_dir:
+                raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
 
         staging_kernel_dir = kernel_dir
 
@@ -182,24 +198,27 @@
             (staging_kernel_dir, hdddir)
         exec_cmd(install_cmd)
 
+
         try:
             if source_params['loader'] == 'grub-efi':
                 shutil.copyfile("%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir,
                                 "%s/grub.cfg" % cr_workdir)
-                cp_cmd = "cp %s/EFI/BOOT/* %s/EFI/BOOT" % (bootimg_dir, hdddir)
-                exec_cmd(cp_cmd, True)
+                for mod in [x for x in os.listdir(kernel_dir) if x.startswith("grub-efi-")]:
+                    cp_cmd = "cp %s/%s %s/EFI/BOOT/%s" % (kernel_dir, mod, hdddir, mod[9:])
+                    exec_cmd(cp_cmd, True)
                 shutil.move("%s/grub.cfg" % cr_workdir,
                             "%s/hdd/boot/EFI/BOOT/grub.cfg" % cr_workdir)
-            elif source_params['loader'] == 'gummiboot' \
-                 or source_params['loader'] == 'systemd-boot':
-                cp_cmd = "cp %s/EFI/BOOT/* %s/EFI/BOOT" % (bootimg_dir, hdddir)
-                exec_cmd(cp_cmd, True)
+            elif source_params['loader'] == 'systemd-boot':
+                for mod in [x for x in os.listdir(kernel_dir) if x.startswith("systemd-")]:
+                    cp_cmd = "cp %s/%s %s/EFI/BOOT/%s" % (kernel_dir, mod, hdddir, mod[8:])
+                    exec_cmd(cp_cmd, True)
             else:
-                msger.error("unrecognized bootimg-efi loader: %s" % source_params['loader'])
+                raise WicError("unrecognized bootimg-efi loader: %s" %
+                               source_params['loader'])
         except KeyError:
-            msger.error("bootimg-efi requires a loader, none specified")
+            raise WicError("bootimg-efi requires a loader, none specified")
 
-        startup = os.path.join(bootimg_dir, "startup.nsh")
+        startup = os.path.join(kernel_dir, "startup.nsh")
         if os.path.exists(startup):
             cp_cmd = "cp %s %s/" % (startup, hdddir)
             exec_cmd(cp_cmd, True)
@@ -215,8 +234,8 @@
 
         blocks += extra_blocks
 
-        msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
-                    (extra_blocks, part.mountpoint, blocks))
+        logger.debug("Added %d extra blocks to %s to get to %d total blocks",
+                     extra_blocks, part.mountpoint, blocks)
 
         # dosfs image, created by mkdosfs
         bootimg = "%s/boot.img" % cr_workdir
diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-partition.py b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-partition.py
index b76c121..13fddbd 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-partition.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-partition.py
@@ -23,14 +23,18 @@
 # Maciej Borzecki <maciej.borzecki (at] open-rnd.pl>
 #
 
+import logging
 import os
 import re
 
-from wic import msger
-from wic.pluginbase import SourcePlugin
-from wic.utils.oe.misc import exec_cmd, get_bitbake_var
 from glob import glob
 
+from wic import WicError
+from wic.pluginbase import SourcePlugin
+from wic.utils.misc import exec_cmd, get_bitbake_var
+
+logger = logging.getLogger('wic')
+
 class BootimgPartitionPlugin(SourcePlugin):
     """
     Create an image of boot partition, copying over files
@@ -40,26 +44,6 @@
     name = 'bootimg-partition'
 
     @classmethod
-    def do_install_disk(cls, disk, disk_name, cr, workdir, oe_builddir,
-                        bootimg_dir, kernel_dir, native_sysroot):
-        """
-        Called after all partitions have been prepared and assembled into a
-        disk image. Do nothing.
-        """
-        pass
-
-    @classmethod
-    def do_configure_partition(cls, part, source_params, cr, cr_workdir,
-                               oe_builddir, bootimg_dir, kernel_dir,
-                               native_sysroot):
-        """
-        Called before do_prepare_partition(). Possibly prepare
-        configuration files of some sort.
-
-        """
-        pass
-
-    @classmethod
     def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
                              oe_builddir, bootimg_dir, kernel_dir,
                              rootfs_dir, native_sysroot):
@@ -74,19 +58,19 @@
         install_cmd = "install -d %s" % hdddir
         exec_cmd(install_cmd)
 
-        if not bootimg_dir:
-            bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
-            if not bootimg_dir:
-                msger.error("Couldn't find DEPLOY_DIR_IMAGE, exiting\n")
+        if not kernel_dir:
+            kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+            if not kernel_dir:
+                raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
 
-        msger.debug('Bootimg dir: %s' % bootimg_dir)
+        logger.debug('Kernel dir: %s', bootimg_dir)
 
         boot_files = get_bitbake_var("IMAGE_BOOT_FILES")
 
         if not boot_files:
-            msger.error('No boot files defined, IMAGE_BOOT_FILES unset')
+            raise WicError('No boot files defined, IMAGE_BOOT_FILES unset')
 
-        msger.debug('Boot files: %s' % boot_files)
+        logger.debug('Boot files: %s', boot_files)
 
         # list of tuples (src_name, dst_name)
         deploy_files = []
@@ -94,11 +78,11 @@
             if ';' in src_entry:
                 dst_entry = tuple(src_entry.split(';'))
                 if not dst_entry[0] or not dst_entry[1]:
-                    msger.error('Malformed boot file entry: %s' % (src_entry))
+                    raise WicError('Malformed boot file entry: %s' % src_entry)
             else:
                 dst_entry = (src_entry, src_entry)
 
-            msger.debug('Destination entry: %r' % (dst_entry,))
+            logger.debug('Destination entry: %r', dst_entry)
             deploy_files.append(dst_entry)
 
         for deploy_entry in deploy_files:
@@ -114,27 +98,26 @@
                                     os.path.join(dst,
                                                  os.path.basename(name))
 
-                srcs = glob(os.path.join(bootimg_dir, src))
+                srcs = glob(os.path.join(kernel_dir, src))
 
-                msger.debug('Globbed sources: %s' % (', '.join(srcs)))
+                logger.debug('Globbed sources: %s', ', '.join(srcs))
                 for entry in srcs:
                     entry_dst_name = entry_name_fn(entry)
                     install_task.append((entry,
                                          os.path.join(hdddir,
                                                       entry_dst_name)))
             else:
-                install_task = [(os.path.join(bootimg_dir, src),
+                install_task = [(os.path.join(kernel_dir, src),
                                  os.path.join(hdddir, dst))]
 
             for task in install_task:
                 src_path, dst_path = task
-                msger.debug('Install %s as %s' % (os.path.basename(src_path),
-                                                  dst_path))
+                logger.debug('Install %s as %s',
+                             os.path.basename(src_path), dst_path)
                 install_cmd = "install -m 0644 -D %s %s" \
                               % (src_path, dst_path)
                 exec_cmd(install_cmd)
 
-        msger.debug('Prepare boot partition using rootfs in %s' % (hdddir))
+        logger.debug('Prepare boot partition using rootfs in %s', hdddir)
         part.prepare_rootfs(cr_workdir, oe_builddir, hdddir,
                             native_sysroot)
-
diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py
index f204daa..5890c12 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/bootimg-pcbios.py
@@ -24,15 +24,17 @@
 # Tom Zanussi <tom.zanussi (at] linux.intel.com>
 #
 
+import logging
 import os
 
-from wic.utils.errors import ImageError
-from wic import msger
+from wic import WicError
+from wic.engine import get_custom_config
 from wic.utils import runner
-from wic.utils.misc import get_custom_config
 from wic.pluginbase import SourcePlugin
-from wic.utils.oe.misc import exec_cmd, exec_native_cmd, \
-                              get_bitbake_var, BOOTDD_EXTRA_SPACE
+from wic.utils.misc import (exec_cmd, exec_native_cmd,
+                            get_bitbake_var, BOOTDD_EXTRA_SPACE)
+
+logger = logging.getLogger('wic')
 
 class BootimgPcbiosPlugin(SourcePlugin):
     """
@@ -42,33 +44,45 @@
     name = 'bootimg-pcbios'
 
     @classmethod
+    def _get_bootimg_dir(cls, bootimg_dir, dirname):
+        """
+        Check if dirname exists in default bootimg_dir or
+        in wic-tools STAGING_DIR.
+        """
+        for result in (bootimg_dir, get_bitbake_var("STAGING_DATADIR", "wic-tools")):
+            if os.path.exists("%s/%s" % (result, dirname)):
+                return result
+
+        raise WicError("Couldn't find correct bootimg_dir, exiting")
+
+    @classmethod
     def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,
                         bootimg_dir, kernel_dir, native_sysroot):
         """
         Called after all partitions have been prepared and assembled into a
         disk image.  In this case, we install the MBR.
         """
+        bootimg_dir = cls._get_bootimg_dir(bootimg_dir, 'syslinux')
         mbrfile = "%s/syslinux/" % bootimg_dir
         if creator.ptable_format == 'msdos':
             mbrfile += "mbr.bin"
         elif creator.ptable_format == 'gpt':
             mbrfile += "gptmbr.bin"
         else:
-            msger.error("Unsupported partition table: %s" % creator.ptable_format)
+            raise WicError("Unsupported partition table: %s" %
+                           creator.ptable_format)
 
         if not os.path.exists(mbrfile):
-            msger.error("Couldn't find %s.  If using the -e option, do you "
-                        "have the right MACHINE set in local.conf?  If not, "
-                        "is the bootimg_dir path correct?" % mbrfile)
+            raise WicError("Couldn't find %s.  If using the -e option, do you "
+                           "have the right MACHINE set in local.conf?  If not, "
+                           "is the bootimg_dir path correct?" % mbrfile)
 
         full_path = creator._full_path(workdir, disk_name, "direct")
-        msger.debug("Installing MBR on disk %s as %s with size %s bytes" \
-                    % (disk_name, full_path, disk['min_size']))
+        logger.debug("Installing MBR on disk %s as %s with size %s bytes",
+                     disk_name, full_path, disk.min_size)
 
-        rcode = runner.show(['dd', 'if=%s' % mbrfile,
-                             'of=%s' % full_path, 'conv=notrunc'])
-        if rcode != 0:
-            raise ImageError("Unable to set MBR to %s" % full_path)
+        dd_cmd = "dd if=%s of=%s conv=notrunc" % (mbrfile, full_path)
+        exec_cmd(dd_cmd, native_sysroot)
 
     @classmethod
     def do_configure_partition(cls, part, source_params, creator, cr_workdir,
@@ -90,11 +104,11 @@
             if custom_cfg:
                 # Use a custom configuration for grub
                 syslinux_conf = custom_cfg
-                msger.debug("Using custom configuration file "
-                            "%s for syslinux.cfg" % bootloader.configfile)
+                logger.debug("Using custom configuration file %s "
+                             "for syslinux.cfg", bootloader.configfile)
             else:
-                msger.error("configfile is specified but failed to "
-                            "get it from %s." % bootloader.configfile)
+                raise WicError("configfile is specified but failed to "
+                               "get it from %s." % bootloader.configfile)
 
         if not custom_cfg:
             # Create syslinux configuration using parameters from wks file
@@ -122,8 +136,8 @@
             syslinux_conf += "APPEND label=boot root=%s %s\n" % \
                              (creator.rootdev, bootloader.append)
 
-        msger.debug("Writing syslinux config %s/hdd/boot/syslinux.cfg" \
-                    % cr_workdir)
+        logger.debug("Writing syslinux config %s/hdd/boot/syslinux.cfg",
+                     cr_workdir)
         cfg = open("%s/hdd/boot/syslinux.cfg" % cr_workdir, "w")
         cfg.write(syslinux_conf)
         cfg.close()
@@ -137,33 +151,25 @@
         'prepares' the partition to be incorporated into the image.
         In this case, prepare content for legacy bios boot partition.
         """
-        def _has_syslinux(dirname):
-            if dirname:
-                syslinux = "%s/syslinux" % dirname
-                if os.path.exists(syslinux):
-                    return True
-            return False
-
-        if not _has_syslinux(bootimg_dir):
-            bootimg_dir = get_bitbake_var("STAGING_DATADIR")
-            if not bootimg_dir:
-                msger.error("Couldn't find STAGING_DATADIR, exiting\n")
-            if not _has_syslinux(bootimg_dir):
-                msger.error("Please build syslinux first\n")
-            # just so the result notes display it
-            creator.set_bootimg_dir(bootimg_dir)
+        bootimg_dir = cls._get_bootimg_dir(bootimg_dir, 'syslinux')
 
         staging_kernel_dir = kernel_dir
 
         hdddir = "%s/hdd/boot" % cr_workdir
 
-        install_cmd = "install -m 0644 %s/bzImage %s/vmlinuz" \
-            % (staging_kernel_dir, hdddir)
-        exec_cmd(install_cmd)
+        cmds = ("install -m 0644 %s/bzImage %s/vmlinuz" %
+                (staging_kernel_dir, hdddir),
+                "install -m 444 %s/syslinux/ldlinux.sys %s/ldlinux.sys" %
+                (bootimg_dir, hdddir),
+                "install -m 0644 %s/syslinux/vesamenu.c32 %s/vesamenu.c32" %
+                (bootimg_dir, hdddir),
+                "install -m 444 %s/syslinux/libcom32.c32 %s/libcom32.c32" %
+                (bootimg_dir, hdddir),
+                "install -m 444 %s/syslinux/libutil.c32 %s/libutil.c32" %
+                (bootimg_dir, hdddir))
 
-        install_cmd = "install -m 444 %s/syslinux/ldlinux.sys %s/ldlinux.sys" \
-            % (bootimg_dir, hdddir)
-        exec_cmd(install_cmd)
+        for install_cmd in cmds:
+            exec_cmd(install_cmd)
 
         du_cmd = "du -bks %s" % hdddir
         out = exec_cmd(du_cmd)
@@ -176,8 +182,8 @@
 
         blocks += extra_blocks
 
-        msger.debug("Added %d extra blocks to %s to get to %d total blocks" % \
-                    (extra_blocks, part.mountpoint, blocks))
+        logger.debug("Added %d extra blocks to %s to get to %d total blocks",
+                     extra_blocks, part.mountpoint, blocks)
 
         # dosfs image, created by mkdosfs
         bootimg = "%s/boot.img" % cr_workdir
@@ -198,7 +204,5 @@
         out = exec_cmd(du_cmd)
         bootimg_size = out.split()[0]
 
-        part.size = int(out.split()[0])
+        part.size = int(bootimg_size)
         part.source_file = bootimg
-
-
diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/fsimage.py b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/fsimage.py
deleted file mode 100644
index f894e89..0000000
--- a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/fsimage.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-
-import os
-
-from wic import msger
-from wic.pluginbase import SourcePlugin
-from wic.utils.oe.misc import get_bitbake_var
-
-class FSImagePlugin(SourcePlugin):
-    """
-    Add an already existing filesystem image to the partition layout.
-    """
-
-    name = 'fsimage'
-
-    @classmethod
-    def do_install_disk(cls, disk, disk_name, cr, workdir, oe_builddir,
-                        bootimg_dir, kernel_dir, native_sysroot):
-        """
-        Called after all partitions have been prepared and assembled into a
-        disk image. Do nothing.
-        """
-        pass
-
-    @classmethod
-    def do_configure_partition(cls, part, source_params, cr, cr_workdir,
-                               oe_builddir, bootimg_dir, kernel_dir,
-                               native_sysroot):
-        """
-        Called before do_prepare_partition(). Possibly prepare
-        configuration files of some sort.
-        """
-        pass
-
-    @classmethod
-    def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
-                             oe_builddir, bootimg_dir, kernel_dir,
-                             rootfs_dir, native_sysroot):
-        """
-        Called to do the actual content population for a partition i.e. it
-        'prepares' the partition to be incorporated into the image.
-        """
-        if not bootimg_dir:
-            bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
-            if not bootimg_dir:
-                msger.error("Couldn't find DEPLOY_DIR_IMAGE, exiting\n")
-
-        msger.debug('Bootimg dir: %s' % bootimg_dir)
-
-        if 'file' not in source_params:
-            msger.error("No file specified\n")
-            return
-
-        src = os.path.join(bootimg_dir, source_params['file'])
-
-
-        msger.debug('Preparing partition using image %s' % (src))
-        part.prepare_rootfs_from_fs_image(cr_workdir, src, "")
diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
index 3858fd4..1ceba62 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/isoimage-isohybrid.py
@@ -20,15 +20,18 @@
 # AUTHORS
 # Mihaly Varga <mihaly.varga (at] ni.com>
 
+import glob
+import logging
 import os
 import re
 import shutil
-import glob
 
-from wic import msger
+from wic import WicError
+from wic.engine import get_custom_config
 from wic.pluginbase import SourcePlugin
-from wic.utils.misc import get_custom_config
-from wic.utils.oe.misc import exec_cmd, exec_native_cmd, get_bitbake_var
+from wic.utils.misc import exec_cmd, exec_native_cmd, get_bitbake_var
+
+logger = logging.getLogger('wic')
 
 class IsoImagePlugin(SourcePlugin):
     """
@@ -85,8 +88,9 @@
         syslinux_conf += "APPEND initrd=/initrd LABEL=boot %s\n" \
                              % bootloader.append
 
-        msger.debug("Writing syslinux config %s/ISO/isolinux/isolinux.cfg" \
-                    % cr_workdir)
+        logger.debug("Writing syslinux config %s/ISO/isolinux/isolinux.cfg",
+                     cr_workdir)
+
         with open("%s/ISO/isolinux/isolinux.cfg" % cr_workdir, "w") as cfg:
             cfg.write(syslinux_conf)
 
@@ -99,11 +103,11 @@
         if configfile:
             grubefi_conf = get_custom_config(configfile)
             if grubefi_conf:
-                msger.debug("Using custom configuration file "
-                        "%s for grub.cfg" % configfile)
+                logger.debug("Using custom configuration file %s for grub.cfg",
+                             configfile)
             else:
-                msger.error("configfile is specified but failed to "
-                        "get it from %s." % configfile)
+                raise WicError("configfile is specified "
+                               "but failed to get it from %s", configfile)
         else:
             splash = os.path.join(cr_workdir, "EFI/boot/splash.jpg")
             if os.path.exists(splash):
@@ -133,8 +137,8 @@
             if splashline:
                 grubefi_conf += "%s\n" % splashline
 
-        msger.debug("Writing grubefi config %s/EFI/BOOT/grub.cfg" \
-                        % cr_workdir)
+        logger.debug("Writing grubefi config %s/EFI/BOOT/grub.cfg", cr_workdir)
+
         with open("%s/EFI/BOOT/grub.cfg" % cr_workdir, "w") as cfg:
             cfg.write(grubefi_conf)
 
@@ -144,25 +148,25 @@
         Create path for initramfs image
         """
 
-        initrd = get_bitbake_var("INITRD")
+        initrd = get_bitbake_var("INITRD_LIVE") or get_bitbake_var("INITRD")
         if not initrd:
             initrd_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
             if not initrd_dir:
-                msger.error("Couldn't find DEPLOY_DIR_IMAGE, exiting.\n")
+                raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting.")
 
             image_name = get_bitbake_var("IMAGE_BASENAME")
             if not image_name:
-                msger.error("Couldn't find IMAGE_BASENAME, exiting.\n")
+                raise WicError("Couldn't find IMAGE_BASENAME, exiting.")
 
             image_type = get_bitbake_var("INITRAMFS_FSTYPES")
             if not image_type:
-                msger.error("Couldn't find INITRAMFS_FSTYPES, exiting.\n")
+                raise WicError("Couldn't find INITRAMFS_FSTYPES, exiting.")
 
-            machine_arch = get_bitbake_var("MACHINE_ARCH")
-            if not machine_arch:
-                msger.error("Couldn't find MACHINE_ARCH, exiting.\n")
+            target_arch = get_bitbake_var("TRANSLATED_TARGET_ARCH")
+            if not target_arch:
+                raise WicError("Couldn't find TRANSLATED_TARGET_ARCH, exiting.")
 
-            initrd = glob.glob('%s/%s*%s.%s' % (initrd_dir, image_name, machine_arch, image_type))[0]
+            initrd = glob.glob('%s/%s*%s.%s' % (initrd_dir, image_name, target_arch, image_type))[0]
 
         if not os.path.exists(initrd):
             # Create initrd from rootfs directory
@@ -183,7 +187,7 @@
                 os.symlink(os.readlink("%s/sbin/init" % rootfs_dir), \
                             "%s/init" % initrd_dir)
             else:
-                msger.error("Couldn't find or build initrd, exiting.\n")
+                raise WicError("Couldn't find or build initrd, exiting.")
 
             exec_cmd("cd %s && find . | cpio -o -H newc -R +0:+0 >./initrd.cpio " \
                     % initrd_dir, as_shell=True)
@@ -194,55 +198,6 @@
         return initrd
 
     @classmethod
-    def do_stage_partition(cls, part, source_params, creator, cr_workdir,
-                           oe_builddir, bootimg_dir, kernel_dir,
-                           native_sysroot):
-        """
-        Special content staging called before do_prepare_partition().
-        It cheks if all necessary tools are available, if not
-        tries to instal them.
-        """
-        # Make sure parted is available in native sysroot
-        if not os.path.isfile("%s/usr/sbin/parted" % native_sysroot):
-            msger.info("Building parted-native...\n")
-            exec_cmd("bitbake parted-native")
-
-        # Make sure mkfs.ext2/3/4 is available in native sysroot
-        if not os.path.isfile("%s/sbin/mkfs.ext2" % native_sysroot):
-            msger.info("Building e2fsprogs-native...\n")
-            exec_cmd("bitbake e2fsprogs-native")
-
-        # Make sure syslinux is available in sysroot and in native sysroot
-        syslinux_dir = get_bitbake_var("STAGING_DATADIR")
-        if not syslinux_dir:
-            msger.error("Couldn't find STAGING_DATADIR, exiting.\n")
-        if not os.path.exists("%s/syslinux" % syslinux_dir):
-            msger.info("Building syslinux...\n")
-            exec_cmd("bitbake syslinux")
-        if not os.path.exists("%s/syslinux" % syslinux_dir):
-            msger.error("Please build syslinux first\n")
-
-        # Make sure syslinux is available in native sysroot
-        if not os.path.exists("%s/usr/bin/syslinux" % native_sysroot):
-            msger.info("Building syslinux-native...\n")
-            exec_cmd("bitbake syslinux-native")
-
-        #Make sure mkisofs is available in native sysroot
-        if not os.path.isfile("%s/usr/bin/mkisofs" % native_sysroot):
-            msger.info("Building cdrtools-native...\n")
-            exec_cmd("bitbake cdrtools-native")
-
-        # Make sure mkfs.vfat is available in native sysroot
-        if not os.path.isfile("%s/sbin/mkfs.vfat" % native_sysroot):
-            msger.info("Building dosfstools-native...\n")
-            exec_cmd("bitbake dosfstools-native")
-
-        # Make sure mtools is available in native sysroot
-        if not os.path.isfile("%s/usr/bin/mcopy" % native_sysroot):
-            msger.info("Building mtools-native...\n")
-            exec_cmd("bitbake mtools-native")
-
-    @classmethod
     def do_configure_partition(cls, part, source_params, creator, cr_workdir,
                                oe_builddir, bootimg_dir, kernel_dir,
                                native_sysroot):
@@ -258,11 +213,11 @@
         exec_cmd(install_cmd)
 
         # Overwrite the name of the created image
-        msger.debug("%s" % source_params)
+        logger.debug(source_params)
         if 'image_name' in source_params and \
                     source_params['image_name'].strip():
             creator.name = source_params['image_name'].strip()
-            msger.debug("The name of the image is: %s" % creator.name)
+            logger.debug("The name of the image is: %s", creator.name)
 
     @classmethod
     def do_prepare_partition(cls, part, source_params, creator, cr_workdir,
@@ -278,7 +233,7 @@
 
         if part.rootfs_dir is None:
             if not 'ROOTFS_DIR' in rootfs_dir:
-                msger.error("Couldn't find --rootfs-dir, exiting.\n")
+                raise WicError("Couldn't find --rootfs-dir, exiting.")
             rootfs_dir = rootfs_dir['ROOTFS_DIR']
         else:
             if part.rootfs_dir in rootfs_dir:
@@ -286,24 +241,21 @@
             elif part.rootfs_dir:
                 rootfs_dir = part.rootfs_dir
             else:
-                msg = "Couldn't find --rootfs-dir=%s connection "
-                msg += "or it is not a valid path, exiting.\n"
-                msger.error(msg % part.rootfs_dir)
+                raise WicError("Couldn't find --rootfs-dir=%s connection "
+                               "or it is not a valid path, exiting." %
+                               part.rootfs_dir)
 
         if not os.path.isdir(rootfs_dir):
             rootfs_dir = get_bitbake_var("IMAGE_ROOTFS")
         if not os.path.isdir(rootfs_dir):
-            msger.error("Couldn't find IMAGE_ROOTFS, exiting.\n")
+            raise WicError("Couldn't find IMAGE_ROOTFS, exiting.")
 
         part.rootfs_dir = rootfs_dir
 
         # Prepare rootfs.img
-        hdd_dir = get_bitbake_var("HDDDIR")
+        deploy_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
         img_iso_dir = get_bitbake_var("ISODIR")
-
-        rootfs_img = "%s/rootfs.img" % hdd_dir
-        if not os.path.isfile(rootfs_img):
-            rootfs_img = "%s/rootfs.img" % img_iso_dir
+        rootfs_img = "%s/rootfs.img" % img_iso_dir
         if not os.path.isfile(rootfs_img):
             # check if rootfs.img is in deploydir
             deploy_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
@@ -331,15 +283,22 @@
         if os.path.isfile(part.source_file):
             os.remove(part.source_file)
 
-        # Prepare initial ramdisk
-        initrd = "%s/initrd" % hdd_dir
-        if not os.path.isfile(initrd):
-            initrd = "%s/initrd" % img_iso_dir
-        if not os.path.isfile(initrd):
-            initrd = cls._build_initramfs_path(rootfs_dir, cr_workdir)
+        # Support using a different initrd other than default
+        if source_params.get('initrd'):
+            initrd = source_params['initrd']
+            if not deploy_dir:
+                raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
+            cp_cmd = "cp %s/%s %s" % (deploy_dir, initrd, cr_workdir)
+            exec_cmd(cp_cmd)
+        else:
+            # Prepare initial ramdisk
+            initrd = "%s/initrd" % deploy_dir
+            if not os.path.isfile(initrd):
+                initrd = "%s/initrd" % img_iso_dir
+            if not os.path.isfile(initrd):
+                initrd = cls._build_initramfs_path(rootfs_dir, cr_workdir)
 
-        install_cmd = "install -m 0644 %s %s/initrd" \
-            % (initrd, isodir)
+        install_cmd = "install -m 0644 %s %s/initrd" % (initrd, isodir)
         exec_cmd(install_cmd)
 
         # Remove the temporary file created by _build_initramfs_path function
@@ -348,7 +307,7 @@
 
         # Install bzImage
         install_cmd = "install -m 0644 %s/bzImage %s/bzImage" % \
-            (kernel_dir, isodir)
+                      (kernel_dir, isodir)
         exec_cmd(install_cmd)
 
         #Create bootloader for efi boot
@@ -371,7 +330,7 @@
                 # didn't contains it
                 target_arch = get_bitbake_var("TARGET_SYS")
                 if not target_arch:
-                    msger.error("Coludn't find target architecture\n")
+                    raise WicError("Coludn't find target architecture")
 
                 if re.match("x86_64", target_arch):
                     grub_target = 'x86_64-efi'
@@ -380,21 +339,18 @@
                     grub_target = 'i386-efi'
                     grub_image = "bootia32.efi"
                 else:
-                    msger.error("grub-efi is incompatible with target %s\n" \
-                                % target_arch)
+                    raise WicError("grub-efi is incompatible with target %s" %
+                                   target_arch)
 
                 if not os.path.isfile("%s/EFI/BOOT/%s" \
                                 % (bootimg_dir, grub_image)):
-                    grub_path = get_bitbake_var("STAGING_LIBDIR")
+                    grub_path = get_bitbake_var("STAGING_LIBDIR", "wic-tools")
                     if not grub_path:
-                        msger.error("Couldn't find STAGING_LIBDIR, exiting.\n")
+                        raise WicError("Couldn't find STAGING_LIBDIR, exiting.")
 
                     grub_core = "%s/grub/%s" % (grub_path, grub_target)
                     if not os.path.exists(grub_core):
-                        msger.info("Building grub-efi...\n")
-                        exec_cmd("bitbake grub-efi")
-                    if not os.path.exists(grub_core):
-                        msger.error("Please build grub-efi first\n")
+                        raise WicError("Please build grub-efi first")
 
                     grub_cmd = "grub-mkimage -p '/EFI/BOOT' "
                     grub_cmd += "-d %s "  % grub_core
@@ -410,11 +366,10 @@
                     exec_native_cmd(grub_cmd, native_sysroot)
 
             else:
-                # TODO: insert gummiboot stuff
-                msger.error("unrecognized bootimg-efi loader: %s" \
-                            % source_params['loader'])
+                raise WicError("unrecognized bootimg-efi loader: %s" %
+                               source_params['loader'])
         except KeyError:
-            msger.error("bootimg-efi requires a loader, none specified")
+            raise WicError("bootimg-efi requires a loader, none specified")
 
         if os.path.exists("%s/EFI/BOOT" % isodir):
             shutil.rmtree("%s/EFI/BOOT" % isodir)
@@ -437,9 +392,8 @@
             blocks = int(out.split()[0])
             # Add some extra space for file system overhead
             blocks += 100
-            msg = "Added 100 extra blocks to %s to get to %d total blocks" \
-                    % (part.mountpoint, blocks)
-            msger.debug(msg)
+            logger.debug("Added 100 extra blocks to %s to get to %d "
+                         "total blocks", part.mountpoint, blocks)
 
             # dosfs image for EFI boot
             bootimg = "%s/efi.img" % isodir
@@ -459,9 +413,9 @@
             exec_cmd(chmod_cmd)
 
         # Prepare files for legacy boot
-        syslinux_dir = get_bitbake_var("STAGING_DATADIR")
+        syslinux_dir = get_bitbake_var("STAGING_DATADIR", "wic-tools")
         if not syslinux_dir:
-            msger.error("Couldn't find STAGING_DATADIR, exiting.\n")
+            raise WicError("Couldn't find STAGING_DATADIR, exiting.")
 
         if os.path.exists("%s/isolinux" % isodir):
             shutil.rmtree("%s/isolinux" % isodir)
@@ -501,7 +455,7 @@
         mkisofs_cmd += "-eltorito-platform 0xEF -eltorito-boot %s " % efi_img
         mkisofs_cmd += "-no-emul-boot %s " % isodir
 
-        msger.debug("running command: %s" % mkisofs_cmd)
+        logger.debug("running command: %s", mkisofs_cmd)
         exec_native_cmd(mkisofs_cmd, native_sysroot)
 
         shutil.rmtree(isodir)
@@ -522,23 +476,19 @@
         utility for booting via BIOS from disk storage devices.
         """
 
+        iso_img = "%s.p1" % disk.path
         full_path = creator._full_path(workdir, disk_name, "direct")
-        iso_img = "%s.p1" % full_path
         full_path_iso = creator._full_path(workdir, disk_name, "iso")
 
         isohybrid_cmd = "isohybrid -u %s" % iso_img
-        msger.debug("running command: %s" % \
-                    isohybrid_cmd)
+        logger.debug("running command: %s", isohybrid_cmd)
         exec_native_cmd(isohybrid_cmd, native_sysroot)
 
         # Replace the image created by direct plugin with the one created by
         # mkisofs command. This is necessary because the iso image created by
         # mkisofs has a very specific MBR is system area of the ISO image, and
         # direct plugin adds and configures an another MBR.
-        msger.debug("Replaceing the image created by direct plugin\n")
-        os.remove(full_path)
+        logger.debug("Replaceing the image created by direct plugin\n")
+        os.remove(disk.path)
         shutil.copy2(iso_img, full_path_iso)
         shutil.copy2(full_path_iso, full_path)
-
-        # Remove temporary ISO file
-        os.remove(iso_img)
diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rawcopy.py b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rawcopy.py
index 618dd44..e1c4f5e 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rawcopy.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rawcopy.py
@@ -15,13 +15,16 @@
 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 #
 
+import logging
 import os
 
-from wic import msger
+from wic import WicError
 from wic.pluginbase import SourcePlugin
-from wic.utils.oe.misc import exec_cmd, get_bitbake_var
+from wic.utils.misc import exec_cmd, get_bitbake_var
 from wic.filemap import sparse_copy
 
+logger = logging.getLogger('wic')
+
 class RawCopyPlugin(SourcePlugin):
     """
     Populate partition content from raw image file.
@@ -30,25 +33,6 @@
     name = 'rawcopy'
 
     @classmethod
-    def do_install_disk(cls, disk, disk_name, cr, workdir, oe_builddir,
-                        bootimg_dir, kernel_dir, native_sysroot):
-        """
-        Called after all partitions have been prepared and assembled into a
-        disk image. Do nothing.
-        """
-        pass
-
-    @classmethod
-    def do_configure_partition(cls, part, source_params, cr, cr_workdir,
-                               oe_builddir, bootimg_dir, kernel_dir,
-                               native_sysroot):
-        """
-        Called before do_prepare_partition(). Possibly prepare
-        configuration files of some sort.
-        """
-        pass
-
-    @classmethod
     def do_prepare_partition(cls, part, source_params, cr, cr_workdir,
                              oe_builddir, bootimg_dir, kernel_dir,
                              rootfs_dir, native_sysroot):
@@ -56,18 +40,17 @@
         Called to do the actual content population for a partition i.e. it
         'prepares' the partition to be incorporated into the image.
         """
-        if not bootimg_dir:
-            bootimg_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
-            if not bootimg_dir:
-                msger.error("Couldn't find DEPLOY_DIR_IMAGE, exiting\n")
+        if not kernel_dir:
+            kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE")
+            if not kernel_dir:
+                raise WicError("Couldn't find DEPLOY_DIR_IMAGE, exiting")
 
-        msger.debug('Bootimg dir: %s' % bootimg_dir)
+        logger.debug('Kernel dir: %s', kernel_dir)
 
         if 'file' not in source_params:
-            msger.error("No file specified\n")
-            return
+            raise WicError("No file specified")
 
-        src = os.path.join(bootimg_dir, source_params['file'])
+        src = os.path.join(kernel_dir, source_params['file'])
         dst = os.path.join(cr_workdir, "%s.%s" % (source_params['file'], part.lineno))
 
         if 'skip' in source_params:
@@ -84,4 +67,3 @@
             part.size = filesize
 
         part.source_file = dst
-
diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rootfs.py b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rootfs.py
index 425da8b..f2e2ca8 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rootfs.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rootfs.py
@@ -25,11 +25,17 @@
 # Joao Henrique Ferreira de Freitas <joaohf (at] gmail.com>
 #
 
+import logging
 import os
+import shutil
 
-from wic import msger
+from oe.path import copyhardlinktree
+
+from wic import WicError
 from wic.pluginbase import SourcePlugin
-from wic.utils.oe.misc import get_bitbake_var
+from wic.utils.misc import get_bitbake_var, exec_cmd
+
+logger = logging.getLogger('wic')
 
 class RootfsPlugin(SourcePlugin):
     """
@@ -45,10 +51,9 @@
 
         image_rootfs_dir = get_bitbake_var("IMAGE_ROOTFS", rootfs_dir)
         if not os.path.isdir(image_rootfs_dir):
-            msg = "No valid artifact IMAGE_ROOTFS from image named"
-            msg += " %s has been found at %s, exiting.\n" % \
-                (rootfs_dir, image_rootfs_dir)
-            msger.error(msg)
+            raise WicError("No valid artifact IMAGE_ROOTFS from image "
+                           "named %s has been found at %s, exiting." %
+                           (rootfs_dir, image_rootfs_dir))
 
         return image_rootfs_dir
 
@@ -63,8 +68,8 @@
         """
         if part.rootfs_dir is None:
             if not 'ROOTFS_DIR' in krootfs_dir:
-                msg = "Couldn't find --rootfs-dir, exiting"
-                msger.error(msg)
+                raise WicError("Couldn't find --rootfs-dir, exiting")
+
             rootfs_dir = krootfs_dir['ROOTFS_DIR']
         else:
             if part.rootfs_dir in krootfs_dir:
@@ -72,12 +77,49 @@
             elif part.rootfs_dir:
                 rootfs_dir = part.rootfs_dir
             else:
-                msg = "Couldn't find --rootfs-dir=%s connection"
-                msg += " or it is not a valid path, exiting"
-                msger.error(msg % part.rootfs_dir)
+                raise WicError("Couldn't find --rootfs-dir=%s connection or "
+                               "it is not a valid path, exiting" % part.rootfs_dir)
 
         real_rootfs_dir = cls.__get_rootfs_dir(rootfs_dir)
 
-        part.rootfs_dir = real_rootfs_dir
-        part.prepare_rootfs(cr_workdir, oe_builddir, real_rootfs_dir, native_sysroot)
+        # Handle excluded paths.
+        if part.exclude_path is not None:
+            # We need a new rootfs directory we can delete files from. Copy to
+            # workdir.
+            new_rootfs = os.path.realpath(os.path.join(cr_workdir, "rootfs"))
 
+            if os.path.lexists(new_rootfs):
+                shutil.rmtree(os.path.join(new_rootfs))
+
+            copyhardlinktree(real_rootfs_dir, new_rootfs)
+
+            real_rootfs_dir = new_rootfs
+
+            for orig_path in part.exclude_path:
+                path = orig_path
+                if os.path.isabs(path):
+                    msger.error("Must be relative: --exclude-path=%s" % orig_path)
+
+                full_path = os.path.realpath(os.path.join(new_rootfs, path))
+
+                # Disallow climbing outside of parent directory using '..',
+                # because doing so could be quite disastrous (we will delete the
+                # directory).
+                if not full_path.startswith(new_rootfs):
+                    msger.error("'%s' points to a path outside the rootfs" % orig_path)
+
+                if path.endswith(os.sep):
+                    # Delete content only.
+                    for entry in os.listdir(full_path):
+                        full_entry = os.path.join(full_path, entry)
+                        if os.path.isdir(full_entry) and not os.path.islink(full_entry):
+                            shutil.rmtree(full_entry)
+                        else:
+                            os.remove(full_entry)
+                else:
+                    # Delete whole directory.
+                    shutil.rmtree(full_path)
+
+        part.rootfs_dir = real_rootfs_dir
+        part.prepare_rootfs(cr_workdir, oe_builddir,
+                            real_rootfs_dir, native_sysroot)
diff --git a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rootfs_pcbios_ext.py b/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rootfs_pcbios_ext.py
deleted file mode 100644
index 3d60e6f..0000000
--- a/import-layers/yocto-poky/scripts/lib/wic/plugins/source/rootfs_pcbios_ext.py
+++ /dev/null
@@ -1,177 +0,0 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
-# This program is free software; you can distribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for mo details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# AUTHOR
-# Adrian Freihofer <adrian.freihofer (at] neratec.com>
-#
-
-import os
-from wic import msger
-from wic.utils import syslinux
-from wic.utils import runner
-from wic.utils.oe import misc
-from wic.utils.errors import ImageError
-from wic.pluginbase import SourcePlugin
-
-
-# pylint: disable=no-init
-class RootfsPlugin(SourcePlugin):
-    """
-    Create root partition and install syslinux bootloader
-
-    This plugin creates a disk image containing a bootable root partition with
-    syslinux installed. The filesystem is ext2/3/4, no extra boot partition is
-    required.
-
-    Example kickstart file:
-    part / --source rootfs-pcbios-ext --ondisk sda --fstype=ext4 --label rootfs --align 1024
-    bootloader --source rootfs-pcbios-ext --timeout=0 --append="rootwait rootfstype=ext4"
-
-    The first line generates a root file system including a syslinux.cfg file
-    The "--source rootfs-pcbios-ext" in the second line triggers the installation
-    of ldlinux.sys into the image.
-    """
-
-    name = 'rootfs-pcbios-ext'
-
-    @staticmethod
-    def _get_rootfs_dir(rootfs_dir):
-        """
-        Find rootfs pseudo dir
-
-        If rootfs_dir is a directory consider it as rootfs directory.
-        Otherwise ask bitbake about the IMAGE_ROOTFS directory.
-        """
-        if os.path.isdir(rootfs_dir):
-            return rootfs_dir
-
-        image_rootfs_dir = misc.get_bitbake_var("IMAGE_ROOTFS", rootfs_dir)
-        if not os.path.isdir(image_rootfs_dir):
-            msg = "No valid artifact IMAGE_ROOTFS from image named"
-            msg += " %s has been found at %s, exiting.\n" % \
-                (rootfs_dir, image_rootfs_dir)
-            msger.error(msg)
-
-        return image_rootfs_dir
-
-    # pylint: disable=unused-argument
-    @classmethod
-    def do_configure_partition(cls, part, source_params, image_creator,
-                               image_creator_workdir, oe_builddir, bootimg_dir,
-                               kernel_dir, native_sysroot):
-        """
-        Creates syslinux config in rootfs directory
-
-        Called before do_prepare_partition()
-        """
-        bootloader = image_creator.ks.bootloader
-
-        syslinux_conf = ""
-        syslinux_conf += "PROMPT 0\n"
-
-        syslinux_conf += "TIMEOUT " + str(bootloader.timeout) + "\n"
-        syslinux_conf += "ALLOWOPTIONS 1\n"
-
-        # Derive SERIAL... line from from kernel boot parameters
-        syslinux_conf += syslinux.serial_console_form_kargs(options) + "\n"
-
-        syslinux_conf += "DEFAULT linux\n"
-        syslinux_conf += "LABEL linux\n"
-        syslinux_conf += "  KERNEL /boot/bzImage\n"
-
-        syslinux_conf += "  APPEND label=boot root=%s %s\n" % \
-                             (image_creator.rootdev, bootloader.append)
-
-        syslinux_cfg = os.path.join(image_creator.rootfs_dir['ROOTFS_DIR'], "boot", "syslinux.cfg")
-        msger.debug("Writing syslinux config %s" % syslinux_cfg)
-        with open(syslinux_cfg, "w") as cfg:
-            cfg.write(syslinux_conf)
-
-    @classmethod
-    def do_prepare_partition(cls, part, source_params, image_creator,
-                             image_creator_workdir, oe_builddir, bootimg_dir,
-                             kernel_dir, krootfs_dir, native_sysroot):
-        """
-        Creates partition out of rootfs directory
-
-        Prepare content for a rootfs partition i.e. create a partition
-        and fill it from a /rootfs dir.
-        Install syslinux bootloader into root partition image file
-        """
-        def is_exe(exepath):
-            """Verify exepath is an executable file"""
-            return os.path.isfile(exepath) and os.access(exepath, os.X_OK)
-
-        # Make sure syslinux-nomtools is available in native sysroot or fail
-        native_syslinux_nomtools = os.path.join(native_sysroot, "usr/bin/syslinux-nomtools")
-        if not is_exe(native_syslinux_nomtools):
-            msger.info("building syslinux-native...")
-            misc.exec_cmd("bitbake syslinux-native")
-        if not is_exe(native_syslinux_nomtools):
-            msger.error("Couldn't find syslinux-nomtools (%s), exiting\n" %
-                        native_syslinux_nomtools)
-
-        if part.rootfs is None:
-            if 'ROOTFS_DIR' not in krootfs_dir:
-                msger.error("Couldn't find --rootfs-dir, exiting")
-            rootfs_dir = krootfs_dir['ROOTFS_DIR']
-        else:
-            if part.rootfs in krootfs_dir:
-                rootfs_dir = krootfs_dir[part.rootfs]
-            elif part.rootfs:
-                rootfs_dir = part.rootfs
-            else:
-                msg = "Couldn't find --rootfs-dir=%s connection"
-                msg += " or it is not a valid path, exiting"
-                msger.error(msg % part.rootfs)
-
-        real_rootfs_dir = cls._get_rootfs_dir(rootfs_dir)
-
-        part.rootfs_dir = real_rootfs_dir
-        part.prepare_rootfs(image_creator_workdir, oe_builddir, real_rootfs_dir, native_sysroot)
-
-        # install syslinux into rootfs partition
-        syslinux_cmd = "syslinux-nomtools -d /boot -i %s" % part.source_file
-        misc.exec_native_cmd(syslinux_cmd, native_sysroot)
-
-    @classmethod
-    def do_install_disk(cls, disk, disk_name, image_creator, workdir, oe_builddir,
-                        bootimg_dir, kernel_dir, native_sysroot):
-        """
-        Assemble partitions to disk image
-
-        Called after all partitions have been prepared and assembled into a
-        disk image. In this case, we install the MBR.
-        """
-        mbrfile = os.path.join(native_sysroot, "usr/share/syslinux/")
-        if image_creator.ptable_format == 'msdos':
-            mbrfile += "mbr.bin"
-        elif image_creator.ptable_format == 'gpt':
-            mbrfile += "gptmbr.bin"
-        else:
-            msger.error("Unsupported partition table: %s" % \
-                        image_creator.ptable_format)
-
-        if not os.path.exists(mbrfile):
-            msger.error("Couldn't find %s. Has syslinux-native been baked?" % mbrfile)
-
-        full_path = disk['disk'].device
-        msger.debug("Installing MBR on disk %s as %s with size %s bytes" \
-                    % (disk_name, full_path, disk['min_size']))
-
-        ret_code = runner.show(['dd', 'if=%s' % mbrfile, 'of=%s' % full_path, 'conv=notrunc'])
-        if ret_code != 0:
-            raise ImageError("Unable to set MBR to %s" % full_path)
diff --git a/import-layers/yocto-poky/scripts/lib/wic/test b/import-layers/yocto-poky/scripts/lib/wic/test
deleted file mode 100644
index 9daeafb..0000000
--- a/import-layers/yocto-poky/scripts/lib/wic/test
+++ /dev/null
@@ -1 +0,0 @@
-test
diff --git a/import-layers/yocto-poky/scripts/lib/wic/utils/errors.py b/import-layers/yocto-poky/scripts/lib/wic/utils/errors.py
deleted file mode 100644
index d1b514d..0000000
--- a/import-layers/yocto-poky/scripts/lib/wic/utils/errors.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python -tt
-#
-# Copyright (c) 2007 Red Hat, Inc.
-# Copyright (c) 2011 Intel, Inc.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-class WicError(Exception):
-    pass
-
-class CreatorError(WicError):
-    pass
-
-class Usage(WicError):
-    pass
-
-class ImageError(WicError):
-    pass
diff --git a/import-layers/yocto-poky/scripts/lib/wic/utils/misc.py b/import-layers/yocto-poky/scripts/lib/wic/utils/misc.py
index 1415ae9..37e0ad6 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/utils/misc.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/utils/misc.py
@@ -1,95 +1,230 @@
-#!/usr/bin/env python -tt
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
 #
-# Copyright (c) 2010, 2011 Intel Inc.
+# Copyright (c) 2013, Intel Corporation.
+# All rights reserved.
 #
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
 #
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# for more details.
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION
+# This module provides a place to collect various wic-related utils
+# for the OpenEmbedded Image Tools.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+"""Miscellaneous functions."""
 
+import logging
 import os
-import time
-import wic.engine
+import re
 
-def build_name(kscfg, release=None, prefix=None, suffix=None):
-    """Construct and return an image name string.
+from collections import defaultdict
+from distutils import spawn
 
-    This is a utility function to help create sensible name and fslabel
-    strings. The name is constructed using the sans-prefix-and-extension
-    kickstart filename and the supplied prefix and suffix.
+from wic import WicError
+from wic.utils import runner
 
-    kscfg -- a path to a kickstart file
-    release --  a replacement to suffix for image release
-    prefix -- a prefix to prepend to the name; defaults to None, which causes
-              no prefix to be used
-    suffix -- a suffix to append to the name; defaults to None, which causes
-              a YYYYMMDDHHMM suffix to be used
+logger = logging.getLogger('wic')
 
-    Note, if maxlen is less then the len(suffix), you get to keep both pieces.
+# executable -> recipe pairs for exec_native_cmd
+NATIVE_RECIPES = {"bmaptool": "bmap-tools",
+                  "grub-mkimage": "grub-efi",
+                  "isohybrid": "syslinux",
+                  "mcopy": "mtools",
+                  "mkdosfs": "dosfstools",
+                  "mkisofs": "cdrtools",
+                  "mkfs.btrfs": "btrfs-tools",
+                  "mkfs.ext2": "e2fsprogs",
+                  "mkfs.ext3": "e2fsprogs",
+                  "mkfs.ext4": "e2fsprogs",
+                  "mkfs.vfat": "dosfstools",
+                  "mksquashfs": "squashfs-tools",
+                  "mkswap": "util-linux",
+                  "mmd": "syslinux",
+                  "parted": "parted",
+                  "sfdisk": "util-linux",
+                  "sgdisk": "gptfdisk",
+                  "syslinux": "syslinux"
+                 }
 
+def _exec_cmd(cmd_and_args, as_shell=False):
     """
-    name = os.path.basename(kscfg)
-    idx = name.rfind('.')
-    if idx >= 0:
-        name = name[:idx]
+    Execute command, catching stderr, stdout
 
-    if release is not None:
-        suffix = ""
-    if prefix is None:
-        prefix = ""
-    if suffix is None:
-        suffix = time.strftime("%Y%m%d%H%M")
-
-    if name.startswith(prefix):
-        name = name[len(prefix):]
-
-    prefix = "%s-" % prefix if prefix else ""
-    suffix = "-%s" % suffix if suffix else ""
-
-    ret = prefix + name + suffix
-
-    return ret
-
-def find_canned(scripts_path, file_name):
+    Need to execute as_shell if the command uses wildcards
     """
-    Find a file either by its path or by name in the canned files dir.
+    logger.debug("_exec_cmd: %s", cmd_and_args)
+    args = cmd_and_args.split()
+    logger.debug(args)
 
-    Return None if not found
+    if as_shell:
+        ret, out = runner.runtool(cmd_and_args)
+    else:
+        ret, out = runner.runtool(args)
+    out = out.strip()
+    if ret != 0:
+        raise WicError("_exec_cmd: %s returned '%s' instead of 0\noutput: %s" % \
+                       (cmd_and_args, ret, out))
+
+    logger.debug("_exec_cmd: output for %s (rc = %d): %s",
+                 cmd_and_args, ret, out)
+
+    return ret, out
+
+
+def exec_cmd(cmd_and_args, as_shell=False):
     """
-    if os.path.exists(file_name):
-        return file_name
-
-    layers_canned_wks_dir = wic.engine.build_canned_image_list(scripts_path)
-    for canned_wks_dir in layers_canned_wks_dir:
-        for root, dirs, files in os.walk(canned_wks_dir):
-            for fname in files:
-                if fname == file_name:
-                    fullpath = os.path.join(canned_wks_dir, fname)
-                    return fullpath
-
-def get_custom_config(boot_file):
+    Execute command, return output
     """
-    Get the custom configuration to be used for the bootloader.
+    return _exec_cmd(cmd_and_args, as_shell)[1]
 
-    Return None if the file can't be found.
+
+def exec_native_cmd(cmd_and_args, native_sysroot, pseudo=""):
     """
-    scripts_path = os.path.abspath(os.path.dirname(__file__))
-    # Get the scripts path of poky
-    for x in range(0, 3):
-        scripts_path = os.path.dirname(scripts_path)
+    Execute native command, catching stderr, stdout
 
-    cfg_file = find_canned(scripts_path, boot_file)
-    if cfg_file:
-        with open(cfg_file, "r") as f:
-            config = f.read()
-        return config
+    Need to execute as_shell if the command uses wildcards
 
-    return None
+    Always need to execute native commands as_shell
+    """
+    # The reason -1 is used is because there may be "export" commands.
+    args = cmd_and_args.split(';')[-1].split()
+    logger.debug(args)
+
+    if pseudo:
+        cmd_and_args = pseudo + cmd_and_args
+
+    wtools_sysroot = get_bitbake_var("RECIPE_SYSROOT_NATIVE", "wic-tools")
+
+    native_paths = \
+            "%s/sbin:%s/usr/sbin:%s/usr/bin:%s/sbin:%s/usr/sbin:%s/usr/bin" % \
+            (wtools_sysroot, wtools_sysroot, wtools_sysroot,
+             native_sysroot, native_sysroot, native_sysroot)
+    native_cmd_and_args = "export PATH=%s:$PATH;%s" % \
+                           (native_paths, cmd_and_args)
+    logger.debug("exec_native_cmd: %s", native_cmd_and_args)
+
+    # If the command isn't in the native sysroot say we failed.
+    if spawn.find_executable(args[0], native_paths):
+        ret, out = _exec_cmd(native_cmd_and_args, True)
+    else:
+        ret = 127
+        out = "can't find native executable %s in %s" % (args[0], native_paths)
+
+    prog = args[0]
+    # shell command-not-found
+    if ret == 127 \
+       or (pseudo and ret == 1 and out == "Can't find '%s' in $PATH." % prog):
+        msg = "A native program %s required to build the image "\
+              "was not found (see details above).\n\n" % prog
+        recipe = NATIVE_RECIPES.get(prog)
+        if recipe:
+            msg += "Please make sure wic-tools have %s-native in its DEPENDS, bake it with 'bitbake wic-tools' "\
+                   "and try again.\n" % recipe
+        else:
+            msg += "Wic failed to find a recipe to build native %s. Please "\
+                   "file a bug against wic.\n" % prog
+        raise WicError(msg)
+
+    return ret, out
+
+BOOTDD_EXTRA_SPACE = 16384
+
+class BitbakeVars(defaultdict):
+    """
+    Container for Bitbake variables.
+    """
+    def __init__(self):
+        defaultdict.__init__(self, dict)
+
+        # default_image and vars_dir attributes should be set from outside
+        self.default_image = None
+        self.vars_dir = None
+
+    def _parse_line(self, line, image, matcher=re.compile(r"^([a-zA-Z0-9\-_+./~]+)=(.*)")):
+        """
+        Parse one line from bitbake -e output or from .env file.
+        Put result key-value pair into the storage.
+        """
+        if "=" not in line:
+            return
+        match = matcher.match(line)
+        if not match:
+            return
+        key, val = match.groups()
+        self[image][key] = val.strip('"')
+
+    def get_var(self, var, image=None, cache=True):
+        """
+        Get bitbake variable from 'bitbake -e' output or from .env file.
+        This is a lazy method, i.e. it runs bitbake or parses file only when
+        only when variable is requested. It also caches results.
+        """
+        if not image:
+            image = self.default_image
+
+        if image not in self:
+            if image and self.vars_dir:
+                fname = os.path.join(self.vars_dir, image + '.env')
+                if os.path.isfile(fname):
+                    # parse .env file
+                    with open(fname) as varsfile:
+                        for line in varsfile:
+                            self._parse_line(line, image)
+                else:
+                    print("Couldn't get bitbake variable from %s." % fname)
+                    print("File %s doesn't exist." % fname)
+                    return
+            else:
+                # Get bitbake -e output
+                cmd = "bitbake -e"
+                if image:
+                    cmd += " %s" % image
+
+                log_level = logger.getEffectiveLevel()
+                logger.setLevel(logging.INFO)
+                ret, lines = _exec_cmd(cmd)
+                logger.setLevel(log_level)
+
+                if ret:
+                    logger.error("Couldn't get '%s' output.", cmd)
+                    logger.error("Bitbake failed with error:\n%s\n", lines)
+                    return
+
+                # Parse bitbake -e output
+                for line in lines.split('\n'):
+                    self._parse_line(line, image)
+
+            # Make first image a default set of variables
+            if cache:
+                images = [key for key in self if key]
+                if len(images) == 1:
+                    self[None] = self[image]
+
+        result = self[image].get(var)
+        if not cache:
+            self.pop(image, None)
+
+        return result
+
+# Create BB_VARS singleton
+BB_VARS = BitbakeVars()
+
+def get_bitbake_var(var, image=None, cache=True):
+    """
+    Provide old get_bitbake_var API by wrapping
+    get_var method of BB_VARS singleton.
+    """
+    return BB_VARS.get_var(var, image, cache)
diff --git a/import-layers/yocto-poky/scripts/lib/wic/utils/oe/__init__.py b/import-layers/yocto-poky/scripts/lib/wic/utils/oe/__init__.py
deleted file mode 100644
index 0a81575..0000000
--- a/import-layers/yocto-poky/scripts/lib/wic/utils/oe/__init__.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-# OpenEmbedded wic utils library
-#
-# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# AUTHORS
-# Tom Zanussi <tom.zanussi (at] linux.intel.com>
-#
diff --git a/import-layers/yocto-poky/scripts/lib/wic/utils/oe/misc.py b/import-layers/yocto-poky/scripts/lib/wic/utils/oe/misc.py
deleted file mode 100644
index fe188c9..0000000
--- a/import-layers/yocto-poky/scripts/lib/wic/utils/oe/misc.py
+++ /dev/null
@@ -1,246 +0,0 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
-# Copyright (c) 2013, Intel Corporation.
-# All rights reserved.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# DESCRIPTION
-# This module provides a place to collect various wic-related utils
-# for the OpenEmbedded Image Tools.
-#
-# AUTHORS
-# Tom Zanussi <tom.zanussi (at] linux.intel.com>
-#
-"""Miscellaneous functions."""
-
-import os
-from collections import defaultdict
-from distutils import spawn
-
-from wic import msger
-from wic.utils import runner
-
-# executable -> recipe pairs for exec_native_cmd
-NATIVE_RECIPES = {"bmaptool": "bmap-tools",
-                  "mcopy": "mtools",
-                  "mkdosfs": "dosfstools",
-                  "mkfs.btrfs": "btrfs-tools",
-                  "mkfs.ext2": "e2fsprogs",
-                  "mkfs.ext3": "e2fsprogs",
-                  "mkfs.ext4": "e2fsprogs",
-                  "mkfs.vfat": "dosfstools",
-                  "mksquashfs": "squashfs-tools",
-                  "mkswap": "util-linux",
-                  "parted": "parted",
-                  "sfdisk": "util-linux",
-                  "sgdisk": "gptfdisk",
-                  "syslinux": "syslinux"
-                 }
-
-def _exec_cmd(cmd_and_args, as_shell=False, catch=3):
-    """
-    Execute command, catching stderr, stdout
-
-    Need to execute as_shell if the command uses wildcards
-    """
-    msger.debug("_exec_cmd: %s" % cmd_and_args)
-    args = cmd_and_args.split()
-    msger.debug(args)
-
-    if as_shell:
-        ret, out = runner.runtool(cmd_and_args, catch)
-    else:
-        ret, out = runner.runtool(args, catch)
-    out = out.strip()
-    msger.debug("_exec_cmd: output for %s (rc = %d): %s" % \
-                (cmd_and_args, ret, out))
-
-    return (ret, out)
-
-
-def exec_cmd(cmd_and_args, as_shell=False, catch=3):
-    """
-    Execute command, catching stderr, stdout
-
-    Exits if rc non-zero
-    """
-    ret, out = _exec_cmd(cmd_and_args, as_shell, catch)
-
-    if ret != 0:
-        msger.error("exec_cmd: %s returned '%s' instead of 0" % \
-                    (cmd_and_args, ret))
-
-    return out
-
-def exec_native_cmd(cmd_and_args, native_sysroot, catch=3, pseudo=""):
-    """
-    Execute native command, catching stderr, stdout
-
-    Need to execute as_shell if the command uses wildcards
-
-    Always need to execute native commands as_shell
-    """
-    # The reason -1 is used is because there may be "export" commands.
-    args = cmd_and_args.split(';')[-1].split()
-    msger.debug(args)
-
-    if pseudo:
-        cmd_and_args = pseudo + cmd_and_args
-    native_paths = \
-        "%s/sbin:%s/usr/sbin:%s/usr/bin" % \
-        (native_sysroot, native_sysroot, native_sysroot)
-    native_cmd_and_args = "export PATH=%s:$PATH;%s" % \
-                           (native_paths, cmd_and_args)
-    msger.debug("exec_native_cmd: %s" % cmd_and_args)
-
-    # If the command isn't in the native sysroot say we failed.
-    if spawn.find_executable(args[0], native_paths):
-        ret, out = _exec_cmd(native_cmd_and_args, True, catch)
-    else:
-        ret = 127
-
-    prog = args[0]
-    # shell command-not-found
-    if ret == 127 \
-       or (pseudo and ret == 1 and out == "Can't find '%s' in $PATH." % prog):
-        msg = "A native program %s required to build the image "\
-              "was not found (see details above).\n\n" % prog
-        recipe = NATIVE_RECIPES.get(prog)
-        if recipe:
-            msg += "Please bake it with 'bitbake %s-native' "\
-                   "and try again.\n" % recipe
-        else:
-            msg += "Wic failed to find a recipe to build native %s. Please "\
-                   "file a bug against wic.\n" % prog
-        msger.error(msg)
-    if out:
-        msger.debug('"%s" output: %s' % (args[0], out))
-
-    if ret != 0:
-        msger.error("exec_cmd: '%s' returned '%s' instead of 0" % \
-                    (cmd_and_args, ret))
-
-    return ret, out
-
-BOOTDD_EXTRA_SPACE = 16384
-
-class BitbakeVars(defaultdict):
-    """
-    Container for Bitbake variables.
-    """
-    def __init__(self):
-        defaultdict.__init__(self, dict)
-
-        # default_image and vars_dir attributes should be set from outside
-        self.default_image = None
-        self.vars_dir = None
-
-    def _parse_line(self, line, image):
-        """
-        Parse one line from bitbake -e output or from .env file.
-        Put result key-value pair into the storage.
-        """
-        if "=" not in line:
-            return
-        try:
-            key, val = line.split("=")
-        except ValueError:
-            return
-        key = key.strip()
-        val = val.strip()
-        if key.replace('_', '').isalnum():
-            self[image][key] = val.strip('"')
-
-    def get_var(self, var, image=None):
-        """
-        Get bitbake variable from 'bitbake -e' output or from .env file.
-        This is a lazy method, i.e. it runs bitbake or parses file only when
-        only when variable is requested. It also caches results.
-        """
-        if not image:
-            image = self.default_image
-
-        if image not in self:
-            if image and self.vars_dir:
-                fname = os.path.join(self.vars_dir, image + '.env')
-                if os.path.isfile(fname):
-                    # parse .env file
-                    with open(fname) as varsfile:
-                        for line in varsfile:
-                            self._parse_line(line, image)
-                else:
-                    print("Couldn't get bitbake variable from %s." % fname)
-                    print("File %s doesn't exist." % fname)
-                    return
-            else:
-                # Get bitbake -e output
-                cmd = "bitbake -e"
-                if image:
-                    cmd += " %s" % image
-
-                log_level = msger.get_loglevel()
-                msger.set_loglevel('normal')
-                ret, lines = _exec_cmd(cmd)
-                msger.set_loglevel(log_level)
-
-                if ret:
-                    print("Couldn't get '%s' output." % cmd)
-                    print("Bitbake failed with error:\n%s\n" % lines)
-                    return
-
-                # Parse bitbake -e output
-                for line in lines.split('\n'):
-                    self._parse_line(line, image)
-
-            # Make first image a default set of variables
-            images = [key for key in self if key]
-            if len(images) == 1:
-                self[None] = self[image]
-
-        return self[image].get(var)
-
-# Create BB_VARS singleton
-BB_VARS = BitbakeVars()
-
-def get_bitbake_var(var, image=None):
-    """
-    Provide old get_bitbake_var API by wrapping
-    get_var method of BB_VARS singleton.
-    """
-    return BB_VARS.get_var(var, image)
-
-def parse_sourceparams(sourceparams):
-    """
-    Split sourceparams string of the form key1=val1[,key2=val2,...]
-    into a dict.  Also accepts valueless keys i.e. without =.
-
-    Returns dict of param key/val pairs (note that val may be None).
-    """
-    params_dict = {}
-
-    params = sourceparams.split(',')
-    if params:
-        for par in params:
-            if not par:
-                continue
-            if not '=' in par:
-                key = par
-                val = None
-            else:
-                key, val = par.split('=')
-            params_dict[key] = val
-
-    return params_dict
diff --git a/import-layers/yocto-poky/scripts/lib/wic/utils/partitionedfs.py b/import-layers/yocto-poky/scripts/lib/wic/utils/partitionedfs.py
deleted file mode 100644
index 9ea4a30..0000000
--- a/import-layers/yocto-poky/scripts/lib/wic/utils/partitionedfs.py
+++ /dev/null
@@ -1,379 +0,0 @@
-#!/usr/bin/env python -tt
-#
-# Copyright (c) 2009, 2010, 2011 Intel, Inc.
-# Copyright (c) 2007, 2008 Red Hat, Inc.
-# Copyright (c) 2008 Daniel P. Berrange
-# Copyright (c) 2008 David P. Huff
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-import os
-from wic import msger
-from wic.utils.errors import ImageError
-from wic.utils.oe.misc import exec_cmd, exec_native_cmd
-from wic.filemap import sparse_copy
-
-# Overhead of the MBR partitioning scheme (just one sector)
-MBR_OVERHEAD = 1
-
-# Overhead of the GPT partitioning scheme
-GPT_OVERHEAD = 34
-
-# Size of a sector in bytes
-SECTOR_SIZE = 512
-
-class Image():
-    """
-    Generic base object for an image.
-
-    An Image is a container for a set of DiskImages and associated
-    partitions.
-    """
-    def __init__(self, native_sysroot=None):
-        self.disks = {}
-        self.partitions = []
-        self.partimages = []
-        # Size of a sector used in calculations
-        self.sector_size = SECTOR_SIZE
-        self._partitions_layed_out = False
-        self.native_sysroot = native_sysroot
-
-    def __add_disk(self, disk_name):
-        """ Add a disk 'disk_name' to the internal list of disks. Note,
-        'disk_name' is the name of the disk in the target system
-        (e.g., sdb). """
-
-        if disk_name in self.disks:
-            # We already have this disk
-            return
-
-        assert not self._partitions_layed_out
-
-        self.disks[disk_name] = \
-                {'disk': None,     # Disk object
-                 'numpart': 0,     # Number of allocate partitions
-                 'realpart': 0,    # Number of partitions in the partition table
-                 'partitions': [], # Indexes to self.partitions
-                 'offset': 0,      # Offset of next partition (in sectors)
-                 # Minimum required disk size to fit all partitions (in bytes)
-                 'min_size': 0,
-                 'ptable_format': "msdos", # Partition table format
-                 'identifier': None} # Disk system identifier
-
-    def add_disk(self, disk_name, disk_obj, identifier):
-        """ Add a disk object which have to be partitioned. More than one disk
-        can be added. In case of multiple disks, disk partitions have to be
-        added for each disk separately with 'add_partition()". """
-
-        self.__add_disk(disk_name)
-        self.disks[disk_name]['disk'] = disk_obj
-        self.disks[disk_name]['identifier'] = identifier
-
-    def __add_partition(self, part):
-        """ This is a helper function for 'add_partition()' which adds a
-        partition to the internal list of partitions. """
-
-        assert not self._partitions_layed_out
-
-        self.partitions.append(part)
-        self.__add_disk(part['disk_name'])
-
-    def add_partition(self, size, disk_name, mountpoint, source_file=None, fstype=None,
-                      label=None, fsopts=None, boot=False, align=None, no_table=False,
-                      part_type=None, uuid=None, system_id=None):
-        """ Add the next partition. Partitions have to be added in the
-        first-to-last order. """
-
-        ks_pnum = len(self.partitions)
-
-        # Converting kB to sectors for parted
-        size = size * 1024 // self.sector_size
-
-        part = {'ks_pnum': ks_pnum, # Partition number in the KS file
-                'size': size, # In sectors
-                'mountpoint': mountpoint, # Mount relative to chroot
-                'source_file': source_file, # partition contents
-                'fstype': fstype, # Filesystem type
-                'fsopts': fsopts, # Filesystem mount options
-                'label': label, # Partition label
-                'disk_name': disk_name, # physical disk name holding partition
-                'device': None, # kpartx device node for partition
-                'num': None, # Partition number
-                'boot': boot, # Bootable flag
-                'align': align, # Partition alignment
-                'no_table' : no_table, # Partition does not appear in partition table
-                'part_type' : part_type, # Partition type
-                'uuid': uuid, # Partition UUID
-                'system_id': system_id} # Partition system id
-
-        self.__add_partition(part)
-
-    def layout_partitions(self, ptable_format="msdos"):
-        """ Layout the partitions, meaning calculate the position of every
-        partition on the disk. The 'ptable_format' parameter defines the
-        partition table format and may be "msdos". """
-
-        msger.debug("Assigning %s partitions to disks" % ptable_format)
-
-        if self._partitions_layed_out:
-            return
-
-        self._partitions_layed_out = True
-
-        # Go through partitions in the order they are added in .ks file
-        for num in range(len(self.partitions)):
-            part = self.partitions[num]
-
-            if part['disk_name'] not in self.disks:
-                raise ImageError("No disk %s for partition %s" \
-                                 % (part['disk_name'], part['mountpoint']))
-
-            if ptable_format == 'msdos' and part['part_type']:
-                # The --part-type can also be implemented for MBR partitions,
-                # in which case it would map to the 1-byte "partition type"
-                # filed at offset 3 of the partition entry.
-                raise ImageError("setting custom partition type is not " \
-                                 "implemented for msdos partitions")
-
-            # Get the disk where the partition is located
-            disk = self.disks[part['disk_name']]
-            disk['numpart'] += 1
-            if not part['no_table']:
-                disk['realpart'] += 1
-            disk['ptable_format'] = ptable_format
-
-            if disk['numpart'] == 1:
-                if ptable_format == "msdos":
-                    overhead = MBR_OVERHEAD
-                elif ptable_format == "gpt":
-                    overhead = GPT_OVERHEAD
-
-                # Skip one sector required for the partitioning scheme overhead
-                disk['offset'] += overhead
-
-            if disk['realpart'] > 3:
-                # Reserve a sector for EBR for every logical partition
-                # before alignment is performed.
-                if ptable_format == "msdos":
-                    disk['offset'] += 1
-
-
-            if part['align']:
-                # If not first partition and we do have alignment set we need
-                # to align the partition.
-                # FIXME: This leaves a empty spaces to the disk. To fill the
-                # gaps we could enlargea the previous partition?
-
-                # Calc how much the alignment is off.
-                align_sectors = disk['offset'] % (part['align'] * 1024 // self.sector_size)
-
-                if align_sectors:
-                    # If partition is not aligned as required, we need
-                    # to move forward to the next alignment point
-                    align_sectors = (part['align'] * 1024 // self.sector_size) - align_sectors
-
-                    msger.debug("Realignment for %s%s with %s sectors, original"
-                                " offset %s, target alignment is %sK." %
-                                (part['disk_name'], disk['numpart'], align_sectors,
-                                 disk['offset'], part['align']))
-
-                    # increase the offset so we actually start the partition on right alignment
-                    disk['offset'] += align_sectors
-
-            part['start'] = disk['offset']
-            disk['offset'] += part['size']
-
-            part['type'] = 'primary'
-            if not part['no_table']:
-                part['num'] = disk['realpart']
-            else:
-                part['num'] = 0
-
-            if disk['ptable_format'] == "msdos":
-                if len(self.partitions) > 4:
-                    if disk['realpart'] > 3:
-                        part['type'] = 'logical'
-                        part['num'] = disk['realpart'] + 1
-
-            disk['partitions'].append(num)
-            msger.debug("Assigned %s to %s%d, sectors range %d-%d size %d "
-                        "sectors (%d bytes)." \
-                            % (part['mountpoint'], part['disk_name'], part['num'],
-                               part['start'], part['start'] + part['size'] - 1,
-                               part['size'], part['size'] * self.sector_size))
-
-        # Once all the partitions have been layed out, we can calculate the
-        # minumim disk sizes.
-        for disk in self.disks.values():
-            disk['min_size'] = disk['offset']
-            if disk['ptable_format'] == "gpt":
-                disk['min_size'] += GPT_OVERHEAD
-
-            disk['min_size'] *= self.sector_size
-
-    def __create_partition(self, device, parttype, fstype, start, size):
-        """ Create a partition on an image described by the 'device' object. """
-
-        # Start is included to the size so we need to substract one from the end.
-        end = start + size - 1
-        msger.debug("Added '%s' partition, sectors %d-%d, size %d sectors" %
-                    (parttype, start, end, size))
-
-        cmd = "parted -s %s unit s mkpart %s" % (device, parttype)
-        if fstype:
-            cmd += " %s" % fstype
-        cmd += " %d %d" % (start, end)
-
-        return exec_native_cmd(cmd, self.native_sysroot)
-
-    def __format_disks(self):
-        self.layout_partitions()
-
-        for dev in self.disks:
-            disk = self.disks[dev]
-            msger.debug("Initializing partition table for %s" % \
-                        (disk['disk'].device))
-            exec_native_cmd("parted -s %s mklabel %s" % \
-                            (disk['disk'].device, disk['ptable_format']),
-                            self.native_sysroot)
-
-            if disk['identifier']:
-                msger.debug("Set disk identifier %x" % disk['identifier'])
-                with open(disk['disk'].device, 'r+b') as img:
-                    img.seek(0x1B8)
-                    img.write(disk['identifier'].to_bytes(4, 'little'))
-
-        msger.debug("Creating partitions")
-
-        for part in self.partitions:
-            if part['num'] == 0:
-                continue
-
-            disk = self.disks[part['disk_name']]
-            if disk['ptable_format'] == "msdos" and part['num'] == 5:
-                # Create an extended partition (note: extended
-                # partition is described in MBR and contains all
-                # logical partitions). The logical partitions save a
-                # sector for an EBR just before the start of a
-                # partition. The extended partition must start one
-                # sector before the start of the first logical
-                # partition. This way the first EBR is inside of the
-                # extended partition. Since the extended partitions
-                # starts a sector before the first logical partition,
-                # add a sector at the back, so that there is enough
-                # room for all logical partitions.
-                self.__create_partition(disk['disk'].device, "extended",
-                                        None, part['start'] - 1,
-                                        disk['offset'] - part['start'] + 1)
-
-            if part['fstype'] == "swap":
-                parted_fs_type = "linux-swap"
-            elif part['fstype'] == "vfat":
-                parted_fs_type = "fat32"
-            elif part['fstype'] == "msdos":
-                parted_fs_type = "fat16"
-            elif part['fstype'] == "ontrackdm6aux3":
-                parted_fs_type = "ontrackdm6aux3"
-            else:
-                # Type for ext2/ext3/ext4/btrfs
-                parted_fs_type = "ext2"
-
-            # Boot ROM of OMAP boards require vfat boot partition to have an
-            # even number of sectors.
-            if part['mountpoint'] == "/boot" and part['fstype'] in ["vfat", "msdos"] \
-               and part['size'] % 2:
-                msger.debug("Subtracting one sector from '%s' partition to " \
-                            "get even number of sectors for the partition" % \
-                            part['mountpoint'])
-                part['size'] -= 1
-
-            self.__create_partition(disk['disk'].device, part['type'],
-                                    parted_fs_type, part['start'], part['size'])
-
-            if part['part_type']:
-                msger.debug("partition %d: set type UID to %s" % \
-                            (part['num'], part['part_type']))
-                exec_native_cmd("sgdisk --typecode=%d:%s %s" % \
-                                         (part['num'], part['part_type'],
-                                          disk['disk'].device), self.native_sysroot)
-
-            if part['uuid'] and disk['ptable_format'] == "gpt":
-                msger.debug("partition %d: set UUID to %s" % \
-                            (part['num'], part['uuid']))
-                exec_native_cmd("sgdisk --partition-guid=%d:%s %s" % \
-                                (part['num'], part['uuid'], disk['disk'].device),
-                                self.native_sysroot)
-
-            if part['boot']:
-                flag_name = "legacy_boot" if disk['ptable_format'] == 'gpt' else "boot"
-                msger.debug("Set '%s' flag for partition '%s' on disk '%s'" % \
-                            (flag_name, part['num'], disk['disk'].device))
-                exec_native_cmd("parted -s %s set %d %s on" % \
-                                (disk['disk'].device, part['num'], flag_name),
-                                self.native_sysroot)
-            if part['system_id']:
-                exec_native_cmd("sfdisk --part-type %s %s %s" % \
-                                (disk['disk'].device, part['num'], part['system_id']),
-                                self.native_sysroot)
-
-            # Parted defaults to enabling the lba flag for fat16 partitions,
-            # which causes compatibility issues with some firmware (and really
-            # isn't necessary).
-            if parted_fs_type == "fat16":
-                if disk['ptable_format'] == 'msdos':
-                    msger.debug("Disable 'lba' flag for partition '%s' on disk '%s'" % \
-                                (part['num'], disk['disk'].device))
-                    exec_native_cmd("parted -s %s set %d lba off" % \
-                                    (disk['disk'].device, part['num']),
-                                    self.native_sysroot)
-
-    def cleanup(self):
-        if self.disks:
-            for dev in self.disks:
-                disk = self.disks[dev]
-                try:
-                    disk['disk'].cleanup()
-                except:
-                    pass
-        # remove partition images
-        for image in self.partimages:
-            if os.path.isfile(image):
-                os.remove(image)
-
-    def assemble(self, image_file):
-        msger.debug("Installing partitions")
-
-        for part in self.partitions:
-            source = part['source_file']
-            if source:
-                # install source_file contents into a partition
-                sparse_copy(source, image_file, part['start'] * self.sector_size)
-
-                msger.debug("Installed %s in partition %d, sectors %d-%d, "
-                            "size %d sectors" % \
-                            (source, part['num'], part['start'],
-                             part['start'] + part['size'] - 1, part['size']))
-
-                partimage = image_file + '.p%d' % part['num']
-                os.rename(source, partimage)
-                self.partimages.append(partimage)
-
-    def create(self):
-        for dev in self.disks:
-            disk = self.disks[dev]
-            disk['disk'].create()
-
-        self.__format_disks()
-
-        return
diff --git a/import-layers/yocto-poky/scripts/lib/wic/utils/runner.py b/import-layers/yocto-poky/scripts/lib/wic/utils/runner.py
index db536ba..4aa00fb 100644
--- a/import-layers/yocto-poky/scripts/lib/wic/utils/runner.py
+++ b/import-layers/yocto-poky/scripts/lib/wic/utils/runner.py
@@ -14,29 +14,17 @@
 # You should have received a copy of the GNU General Public License along
 # with this program; if not, write to the Free Software Foundation, Inc., 59
 # Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-import os
 import subprocess
 
-from wic import msger
+from wic import WicError
 
-def runtool(cmdln_or_args, catch=1):
+def runtool(cmdln_or_args):
     """ wrapper for most of the subprocess calls
     input:
         cmdln_or_args: can be both args and cmdln str (shell=True)
-        catch: 0, quitely run
-               1, only STDOUT
-               2, only STDERR
-               3, both STDOUT and STDERR
     return:
-        (rc, output)
-        if catch==0: the output will always None
+        rc, output
     """
-
-    if catch not in (0, 1, 2, 3):
-        # invalid catch selection, will cause exception, that's good
-        return None
-
     if isinstance(cmdln_or_args, list):
         cmd = cmdln_or_args[0]
         shell = False
@@ -45,66 +33,20 @@
         cmd = shlex.split(cmdln_or_args)[0]
         shell = True
 
-    if catch != 3:
-        dev_null = os.open("/dev/null", os.O_WRONLY)
-
-    if catch == 0:
-        sout = dev_null
-        serr = dev_null
-    elif catch == 1:
-        sout = subprocess.PIPE
-        serr = dev_null
-    elif catch == 2:
-        sout = dev_null
-        serr = subprocess.PIPE
-    elif catch == 3:
-        sout = subprocess.PIPE
-        serr = subprocess.STDOUT
+    sout = subprocess.PIPE
+    serr = subprocess.STDOUT
 
     try:
         process = subprocess.Popen(cmdln_or_args, stdout=sout,
                                    stderr=serr, shell=shell)
-        (sout, serr) = process.communicate()
+        sout, serr = process.communicate()
         # combine stdout and stderr, filter None out and decode
         out = ''.join([out.decode('utf-8') for out in [sout, serr] if out])
     except OSError as err:
         if err.errno == 2:
             # [Errno 2] No such file or directory
-            msger.error('Cannot run command: %s, lost dependency?' % cmd)
+            raise WicError('Cannot run command: %s, lost dependency?' % cmd)
         else:
             raise # relay
-    finally:
-        if catch != 3:
-            os.close(dev_null)
 
-    return (process.returncode, out)
-
-def show(cmdln_or_args):
-    # show all the message using msger.verbose
-
-    rcode, out = runtool(cmdln_or_args, catch=3)
-
-    if isinstance(cmdln_or_args, list):
-        cmd = ' '.join(cmdln_or_args)
-    else:
-        cmd = cmdln_or_args
-
-    msg = 'running command: "%s"' % cmd
-    if out:
-        out = out.strip()
-    if out:
-        msg += ', with output::'
-        msg += '\n  +----------------'
-        for line in out.splitlines():
-            msg += '\n  | %s' % line
-        msg += '\n  +----------------'
-
-    msger.verbose(msg)
-    return rcode
-
-def outs(cmdln_or_args, catch=1):
-    # get the outputs of tools
-    return runtool(cmdln_or_args, catch)[1].strip()
-
-def quiet(cmdln_or_args):
-    return runtool(cmdln_or_args, catch=0)[0]
+    return process.returncode, out
diff --git a/import-layers/yocto-poky/scripts/lib/wic/utils/syslinux.py b/import-layers/yocto-poky/scripts/lib/wic/utils/syslinux.py
deleted file mode 100644
index aace286..0000000
--- a/import-layers/yocto-poky/scripts/lib/wic/utils/syslinux.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# ex:ts=4:sw=4:sts=4:et
-# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by the Free
-# Software Foundation; version 2 of the License
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
-# or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc., 59
-# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-#
-# AUTHOR
-# Adrian Freihofer <adrian.freihofer (at] neratec.com>
-
-
-import re
-from wic import msger
-
-
-def serial_console_form_kargs(kernel_args):
-    """
-    Create SERIAL... line from kernel parameters
-
-    syslinux needs a line SERIAL port [baudrate [flowcontrol]]
-    in the syslinux.cfg file. The config line is generated based
-    on kernel boot parameters. The the parameters of the first
-    ttyS console are considered for syslinux config.
-    @param kernel_args kernel command line
-    @return line for syslinux config file e.g. "SERIAL 0 115200"
-    """
-    syslinux_conf = ""
-    for param in kernel_args.split():
-        param_match = re.match("console=ttyS([0-9]+),?([0-9]*)([noe]?)([0-9]?)(r?)", param)
-        if param_match:
-            syslinux_conf += "SERIAL " + param_match.group(1)
-            # baudrate
-            if param_match.group(2):
-                syslinux_conf += " " + param_match.group(2)
-            # parity
-            if param_match.group(3) and param_match.group(3) != 'n':
-                msger.warning("syslinux does not support parity for console. {} is ignored."
-                              .format(param_match.group(3)))
-            # number of bits
-            if param_match.group(4) and param_match.group(4) != '8':
-                msger.warning("syslinux supports 8 bit console configuration only. {} is ignored."
-                              .format(param_match.group(4)))
-            # flow control
-            if param_match.group(5) and param_match.group(5) != '':
-                msger.warning("syslinux console flowcontrol configuration. {} is ignored."
-                              .format(param_match.group(5)))
-            break
-
-    return syslinux_conf
diff --git a/import-layers/yocto-poky/scripts/multilib_header_wrapper.h b/import-layers/yocto-poky/scripts/multilib_header_wrapper.h
index 5a87540..f516673 100644
--- a/import-layers/yocto-poky/scripts/multilib_header_wrapper.h
+++ b/import-layers/yocto-poky/scripts/multilib_header_wrapper.h
@@ -21,11 +21,23 @@
  * 
  */
 
+
+#if defined (__arm__)
+#define __MHWORDSIZE			32
+#elif defined (__aarch64__) && defined ( __LP64__)
+#define __MHWORDSIZE			64
+#elif defined (__aarch64__)
+#define __MHWORDSIZE			32
+#else
 #include <bits/wordsize.h>
+#if defined (__WORDSIZE)
+#define __MHWORDSIZE			__WORDSIZE
+#else
+#error "__WORDSIZE is not defined"
+#endif
+#endif
 
-#ifdef __WORDSIZE
-
-#if __WORDSIZE == 32
+#if __MHWORDSIZE == 32
 
 #ifdef _MIPS_SIM
 
@@ -41,15 +53,9 @@
 #include <ENTER_HEADER_FILENAME_HERE-32.h>
 #endif
 
-#elif __WORDSIZE == 64
+#elif __MHWORDSIZE == 64
 #include <ENTER_HEADER_FILENAME_HERE-64.h>
 #else
 #error "Unknown __WORDSIZE detected"
 #endif /* matches #if __WORDSIZE == 32 */
-
-#else /* __WORDSIZE is not defined */
-
-#error "__WORDSIZE is not defined"
-
-#endif
   
diff --git a/import-layers/yocto-poky/scripts/oe-build-perf-report b/import-layers/yocto-poky/scripts/oe-build-perf-report
new file mode 100755
index 0000000..6f0b84f
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/oe-build-perf-report
@@ -0,0 +1,534 @@
+#!/usr/bin/python3
+#
+# Examine build performance test results
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+import argparse
+import json
+import logging
+import os
+import re
+import sys
+from collections import namedtuple, OrderedDict
+from operator import attrgetter
+from xml.etree import ElementTree as ET
+
+# Import oe libs
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(scripts_path, 'lib'))
+import scriptpath
+from build_perf import print_table
+from build_perf.report import (metadata_xml_to_json, results_xml_to_json,
+                               aggregate_data, aggregate_metadata, measurement_stats)
+from build_perf import html
+
+scriptpath.add_oe_lib_path()
+
+from oeqa.utils.git import GitRepo
+
+
+# Setup logging
+logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
+log = logging.getLogger('oe-build-perf-report')
+
+
+# Container class for tester revisions
+TestedRev = namedtuple('TestedRev', 'commit commit_number tags')
+
+
+def get_test_runs(repo, tag_name, **kwargs):
+    """Get a sorted list of test runs, matching given pattern"""
+    # First, get field names from the tag name pattern
+    field_names = [m.group(1) for m in re.finditer(r'{(\w+)}', tag_name)]
+    undef_fields = [f for f in field_names if f not in kwargs.keys()]
+
+    # Fields for formatting tag name pattern
+    str_fields = dict([(f, '*') for f in field_names])
+    str_fields.update(kwargs)
+
+    # Get a list of all matching tags
+    tag_pattern = tag_name.format(**str_fields)
+    tags = repo.run_cmd(['tag', '-l', tag_pattern]).splitlines()
+    log.debug("Found %d tags matching pattern '%s'", len(tags), tag_pattern)
+
+    # Parse undefined fields from tag names
+    str_fields = dict([(f, r'(?P<{}>[\w\-.()]+)'.format(f)) for f in field_names])
+    str_fields['branch'] = r'(?P<branch>[\w\-.()/]+)'
+    str_fields['commit'] = '(?P<commit>[0-9a-f]{7,40})'
+    str_fields['commit_number'] = '(?P<commit_number>[0-9]{1,7})'
+    str_fields['tag_number'] = '(?P<tag_number>[0-9]{1,5})'
+    # escape parenthesis in fields in order to not messa up the regexp
+    fixed_fields = dict([(k, v.replace('(', r'\(').replace(')', r'\)')) for k, v in kwargs.items()])
+    str_fields.update(fixed_fields)
+    tag_re = re.compile(tag_name.format(**str_fields))
+
+    # Parse fields from tags
+    revs = []
+    for tag in tags:
+        m = tag_re.match(tag)
+        groups = m.groupdict()
+        revs.append([groups[f] for f in undef_fields] + [tag])
+
+    # Return field names and a sorted list of revs
+    return undef_fields, sorted(revs)
+
+def list_test_revs(repo, tag_name, **kwargs):
+    """Get list of all tested revisions"""
+    fields, revs = get_test_runs(repo, tag_name, **kwargs)
+    ignore_fields = ['tag_number']
+    print_fields = [i for i, f in enumerate(fields) if f not in ignore_fields]
+
+    # Sort revs
+    rows = [[fields[i].upper() for i in print_fields] + ['TEST RUNS']]
+    prev = [''] * len(revs)
+    for rev in revs:
+        # Only use fields that we want to print
+        rev = [rev[i] for i in print_fields]
+
+        if rev != prev:
+            new_row = [''] * len(print_fields) + [1]
+            for i in print_fields:
+                if rev[i] != prev[i]:
+                    break
+            new_row[i:-1] = rev[i:]
+            rows.append(new_row)
+        else:
+            rows[-1][-1] += 1
+        prev = rev
+
+    print_table(rows)
+
+def get_test_revs(repo, tag_name, **kwargs):
+    """Get list of all tested revisions"""
+    fields, runs = get_test_runs(repo, tag_name, **kwargs)
+
+    revs = {}
+    commit_i = fields.index('commit')
+    commit_num_i = fields.index('commit_number')
+    for run in runs:
+        commit = run[commit_i]
+        commit_num = run[commit_num_i]
+        tag = run[-1]
+        if not commit in revs:
+            revs[commit] = TestedRev(commit, commit_num, [tag])
+        else:
+            assert commit_num == revs[commit].commit_number, "Commit numbers do not match"
+            revs[commit].tags.append(tag)
+
+    # Return in sorted table
+    revs = sorted(revs.values(), key=attrgetter('commit_number'))
+    log.debug("Found %d tested revisions:\n    %s", len(revs),
+              "\n    ".join(['{} ({})'.format(rev.commit_number, rev.commit) for rev in revs]))
+    return revs
+
+def rev_find(revs, attr, val):
+    """Search from a list of TestedRev"""
+    for i, rev in enumerate(revs):
+        if getattr(rev, attr) == val:
+            return i
+    raise ValueError("Unable to find '{}' value '{}'".format(attr, val))
+
+def is_xml_format(repo, commit):
+    """Check if the commit contains xml (or json) data"""
+    if repo.rev_parse(commit + ':results.xml'):
+        log.debug("Detected report in xml format in %s", commit)
+        return True
+    else:
+        log.debug("No xml report in %s, assuming json formatted results", commit)
+        return False
+
+def read_results(repo, tags, xml=True):
+    """Read result files from repo"""
+
+    def parse_xml_stream(data):
+        """Parse multiple concatenated XML objects"""
+        objs = []
+        xml_d = ""
+        for line in data.splitlines():
+            if xml_d and line.startswith('<?xml version='):
+                objs.append(ET.fromstring(xml_d))
+                xml_d = line
+            else:
+                xml_d += line
+        objs.append(ET.fromstring(xml_d))
+        return objs
+
+    def parse_json_stream(data):
+        """Parse multiple concatenated JSON objects"""
+        objs = []
+        json_d = ""
+        for line in data.splitlines():
+            if line == '}{':
+                json_d += '}'
+                objs.append(json.loads(json_d, object_pairs_hook=OrderedDict))
+                json_d = '{'
+            else:
+                json_d += line
+        objs.append(json.loads(json_d, object_pairs_hook=OrderedDict))
+        return objs
+
+    num_revs = len(tags)
+
+    # Optimize by reading all data with one git command
+    log.debug("Loading raw result data from %d tags, %s...", num_revs, tags[0])
+    if xml:
+        git_objs = [tag + ':metadata.xml' for tag in tags] + [tag + ':results.xml' for tag in tags]
+        data = parse_xml_stream(repo.run_cmd(['show'] + git_objs + ['--']))
+        return ([metadata_xml_to_json(e) for e in data[0:num_revs]],
+                [results_xml_to_json(e) for e in data[num_revs:]])
+    else:
+        git_objs = [tag + ':metadata.json' for tag in tags] + [tag + ':results.json' for tag in tags]
+        data = parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--']))
+        return data[0:num_revs], data[num_revs:]
+
+
+def get_data_item(data, key):
+    """Nested getitem lookup"""
+    for k in key.split('.'):
+        data = data[k]
+    return data
+
+
+def metadata_diff(metadata_l, metadata_r):
+    """Prepare a metadata diff for printing"""
+    keys = [('Hostname', 'hostname', 'hostname'),
+            ('Branch', 'branch', 'layers.meta.branch'),
+            ('Commit number', 'commit_num', 'layers.meta.commit_count'),
+            ('Commit', 'commit', 'layers.meta.commit'),
+            ('Number of test runs', 'testrun_count', 'testrun_count')
+           ]
+
+    def _metadata_diff(key):
+        """Diff metadata from two test reports"""
+        try:
+            val1 = get_data_item(metadata_l, key)
+        except KeyError:
+            val1 = '(N/A)'
+        try:
+            val2 = get_data_item(metadata_r, key)
+        except KeyError:
+            val2 = '(N/A)'
+        return val1, val2
+
+    metadata = OrderedDict()
+    for title, key, key_json in keys:
+        value_l, value_r = _metadata_diff(key_json)
+        metadata[key] = {'title': title,
+                         'value_old': value_l,
+                         'value': value_r}
+    return metadata
+
+
+def print_diff_report(metadata_l, data_l, metadata_r, data_r):
+    """Print differences between two data sets"""
+
+    # First, print general metadata
+    print("\nTEST METADATA:\n==============")
+    meta_diff = metadata_diff(metadata_l, metadata_r)
+    rows = []
+    row_fmt = ['{:{wid}} ', '{:<{wid}}   ', '{:<{wid}}']
+    rows = [['', 'CURRENT COMMIT', 'COMPARING WITH']]
+    for key, val in meta_diff.items():
+        # Shorten commit hashes
+        if key == 'commit':
+            rows.append([val['title'] + ':', val['value'][:20], val['value_old'][:20]])
+        else:
+            rows.append([val['title'] + ':', val['value'], val['value_old']])
+    print_table(rows, row_fmt)
+
+
+    # Print test results
+    print("\nTEST RESULTS:\n=============")
+
+    tests = list(data_l['tests'].keys())
+    # Append tests that are only present in 'right' set
+    tests += [t for t in list(data_r['tests'].keys()) if t not in tests]
+
+    # Prepare data to be printed
+    rows = []
+    row_fmt = ['{:8}', '{:{wid}}', '{:{wid}}', '  {:>{wid}}', ' {:{wid}} ', '{:{wid}}',
+               '  {:>{wid}}', '  {:>{wid}}']
+    num_cols = len(row_fmt)
+    for test in tests:
+        test_l = data_l['tests'][test] if test in data_l['tests'] else None
+        test_r = data_r['tests'][test] if test in data_r['tests'] else None
+        pref = ' '
+        if test_l is None:
+            pref = '+'
+        elif test_r is None:
+            pref = '-'
+        descr = test_l['description'] if test_l else test_r['description']
+        heading = "{} {}: {}".format(pref, test, descr)
+
+        rows.append([heading])
+
+        # Generate the list of measurements
+        meas_l = test_l['measurements'] if test_l else {}
+        meas_r = test_r['measurements'] if test_r else {}
+        measurements = list(meas_l.keys())
+        measurements += [m for m in list(meas_r.keys()) if m not in measurements]
+
+        for meas in measurements:
+            m_pref = ' '
+            if meas in meas_l:
+                stats_l = measurement_stats(meas_l[meas], 'l.')
+            else:
+                stats_l = measurement_stats(None, 'l.')
+                m_pref = '+'
+            if meas in meas_r:
+                stats_r = measurement_stats(meas_r[meas], 'r.')
+            else:
+                stats_r = measurement_stats(None, 'r.')
+                m_pref = '-'
+            stats = stats_l.copy()
+            stats.update(stats_r)
+
+            absdiff = stats['val_cls'](stats['r.mean'] - stats['l.mean'])
+            reldiff = "{:+.1f} %".format(absdiff * 100 / stats['l.mean'])
+            if stats['r.mean'] > stats['l.mean']:
+                absdiff = '+' + str(absdiff)
+            else:
+                absdiff = str(absdiff)
+            rows.append(['', m_pref, stats['name'] + ' ' + stats['quantity'],
+                         str(stats['l.mean']), '->', str(stats['r.mean']),
+                         absdiff, reldiff])
+        rows.append([''] * num_cols)
+
+    print_table(rows, row_fmt)
+
+    print()
+
+
+def print_html_report(data, id_comp):
+    """Print report in html format"""
+    # Handle metadata
+    metadata = {'branch': {'title': 'Branch', 'value': 'master'},
+                'hostname': {'title': 'Hostname', 'value': 'foobar'},
+                'commit': {'title': 'Commit', 'value': '1234'}
+               }
+    metadata = metadata_diff(data[id_comp][0], data[-1][0])
+
+
+    # Generate list of tests
+    tests = []
+    for test in data[-1][1]['tests'].keys():
+        test_r = data[-1][1]['tests'][test]
+        new_test = {'name': test_r['name'],
+                    'description': test_r['description'],
+                    'status': test_r['status'],
+                    'measurements': [],
+                    'err_type': test_r.get('err_type'),
+                   }
+        # Limit length of err output shown
+        if 'message' in test_r:
+            lines = test_r['message'].splitlines()
+            if len(lines) > 20:
+                new_test['message'] = '...\n' + '\n'.join(lines[-20:])
+            else:
+                new_test['message'] = test_r['message']
+
+
+        # Generate the list of measurements
+        for meas in test_r['measurements'].keys():
+            meas_r = test_r['measurements'][meas]
+            meas_type = 'time' if meas_r['type'] == 'sysres' else 'size'
+            new_meas = {'name': meas_r['name'],
+                        'legend': meas_r['legend'],
+                        'description': meas_r['name'] + ' ' + meas_type,
+                       }
+            samples = []
+
+            # Run through all revisions in our data
+            for meta, test_data in data:
+                if (not test in test_data['tests'] or
+                        not meas in test_data['tests'][test]['measurements']):
+                    samples.append(measurement_stats(None))
+                    continue
+                test_i = test_data['tests'][test]
+                meas_i = test_i['measurements'][meas]
+                commit_num = get_data_item(meta, 'layers.meta.commit_count')
+                samples.append(measurement_stats(meas_i))
+                samples[-1]['commit_num'] = commit_num
+
+            absdiff = samples[-1]['val_cls'](samples[-1]['mean'] - samples[id_comp]['mean'])
+            new_meas['absdiff'] = absdiff
+            new_meas['absdiff_str'] = str(absdiff) if absdiff < 0 else '+' + str(absdiff)
+            new_meas['reldiff'] = "{:+.1f} %".format(absdiff * 100 / samples[id_comp]['mean'])
+            new_meas['samples'] = samples
+            new_meas['value'] = samples[-1]
+            new_meas['value_type'] = samples[-1]['val_cls']
+
+            new_test['measurements'].append(new_meas)
+        tests.append(new_test)
+
+    # Chart options
+    chart_opts = {'haxis': {'min': get_data_item(data[0][0], 'layers.meta.commit_count'),
+                            'max': get_data_item(data[-1][0], 'layers.meta.commit_count')}
+                 }
+
+    print(html.template.render(metadata=metadata, test_data=tests, chart_opts=chart_opts))
+
+
+def auto_args(repo, args):
+    """Guess arguments, if not defined by the user"""
+    # Get the latest commit in the repo
+    log.debug("Guessing arguments from the latest commit")
+    msg = repo.run_cmd(['log', '-1', '--branches', '--remotes', '--format=%b'])
+    for line in msg.splitlines():
+        split = line.split(':', 1)
+        if len(split) != 2:
+            continue
+
+        key = split[0]
+        val = split[1].strip()
+        if key == 'hostname':
+            log.debug("Using hostname %s", val)
+            args.hostname = val
+        elif key == 'branch':
+            log.debug("Using branch %s", val)
+            args.branch = val
+
+
+def parse_args(argv):
+    """Parse command line arguments"""
+    description = """
+Examine build performance test results from a Git repository"""
+    parser = argparse.ArgumentParser(
+        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
+        description=description)
+
+    parser.add_argument('--debug', '-d', action='store_true',
+                        help="Verbose logging")
+    parser.add_argument('--repo', '-r', required=True,
+                        help="Results repository (local git clone)")
+    parser.add_argument('--list', '-l', action='store_true',
+                        help="List available test runs")
+    parser.add_argument('--html', action='store_true',
+                        help="Generate report in html format")
+    group = parser.add_argument_group('Tag and revision')
+    group.add_argument('--tag-name', '-t',
+                       default='{hostname}/{branch}/{machine}/{commit_number}-g{commit}/{tag_number}',
+                       help="Tag name (pattern) for finding results")
+    group.add_argument('--hostname', '-H')
+    group.add_argument('--branch', '-B', default='master')
+    group.add_argument('--machine', default='qemux86')
+    group.add_argument('--history-length', default=25, type=int,
+                       help="Number of tested revisions to plot in html report")
+    group.add_argument('--commit',
+                       help="Revision to search for")
+    group.add_argument('--commit-number',
+                       help="Revision number to search for, redundant if "
+                            "--commit is specified")
+    group.add_argument('--commit2',
+                       help="Revision to compare with")
+    group.add_argument('--commit-number2',
+                       help="Revision number to compare with, redundant if "
+                            "--commit2 is specified")
+
+    return parser.parse_args(argv)
+
+
+def main(argv=None):
+    """Script entry point"""
+    args = parse_args(argv)
+    if args.debug:
+        log.setLevel(logging.DEBUG)
+
+    repo = GitRepo(args.repo)
+
+    if args.list:
+        list_test_revs(repo, args.tag_name)
+        return 0
+
+    # Determine hostname which to use
+    if not args.hostname:
+        auto_args(repo, args)
+
+    revs = get_test_revs(repo, args.tag_name, hostname=args.hostname,
+                         branch=args.branch, machine=args.machine)
+    if len(revs) < 2:
+        log.error("%d tester revisions found, unable to generate report",
+                  len(revs))
+        return 1
+
+    # Pick revisions
+    if args.commit:
+        if args.commit_number:
+            log.warning("Ignoring --commit-number as --commit was specified")
+        index1 = rev_find(revs, 'commit', args.commit)
+    elif args.commit_number:
+        index1 = rev_find(revs, 'commit_number', args.commit_number)
+    else:
+        index1 = len(revs) - 1
+
+    if args.commit2:
+        if args.commit_number2:
+            log.warning("Ignoring --commit-number2 as --commit2 was specified")
+        index2 = rev_find(revs, 'commit', args.commit2)
+    elif args.commit_number2:
+        index2 = rev_find(revs, 'commit_number', args.commit_number2)
+    else:
+        if index1 > 0:
+            index2 = index1 - 1
+        else:
+            log.error("Unable to determine the other commit, use "
+                      "--commit2 or --commit-number2 to specify it")
+            return 1
+
+    index_l = min(index1, index2)
+    index_r = max(index1, index2)
+
+    rev_l = revs[index_l]
+    rev_r = revs[index_r]
+    log.debug("Using 'left' revision %s (%s), %s test runs:\n    %s",
+              rev_l.commit_number, rev_l.commit, len(rev_l.tags),
+              '\n    '.join(rev_l.tags))
+    log.debug("Using 'right' revision %s (%s), %s test runs:\n    %s",
+              rev_r.commit_number, rev_r.commit, len(rev_r.tags),
+              '\n    '.join(rev_r.tags))
+
+    # Check report format used in the repo (assume all reports in the same fmt)
+    xml = is_xml_format(repo, revs[index_r].tags[-1])
+
+    if args.html:
+        index_0 = max(0, index_r - args.history_length)
+        rev_range = range(index_0, index_r + 1)
+    else:
+        # We do not need range of commits for text report (no graphs)
+        index_0 = index_l
+        rev_range = (index_l, index_r)
+
+    # Read raw data
+    log.debug("Reading %d revisions, starting from %s (%s)",
+              len(rev_range), revs[index_0].commit_number, revs[index_0].commit)
+    raw_data = [read_results(repo, revs[i].tags, xml) for i in rev_range]
+
+    data = []
+    for raw_m, raw_d in raw_data:
+        data.append((aggregate_metadata(raw_m), aggregate_data(raw_d)))
+
+    # Re-map list indexes to the new table starting from index 0
+    index_r = index_r - index_0
+    index_l = index_l - index_0
+
+    # Print report
+    if not args.html:
+        print_diff_report(data[index_l][0], data[index_l][1],
+                          data[index_r][0], data[index_r][1])
+    else:
+        print_html_report(data, index_l)
+
+    return 0
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/import-layers/yocto-poky/scripts/oe-build-perf-test b/import-layers/yocto-poky/scripts/oe-build-perf-test
index 638e195..669470f 100755
--- a/import-layers/yocto-poky/scripts/oe-build-perf-test
+++ b/import-layers/yocto-poky/scripts/oe-build-perf-test
@@ -17,21 +17,23 @@
 import argparse
 import errno
 import fcntl
+import json
 import logging
 import os
+import re
 import shutil
 import sys
-import unittest
 from datetime import datetime
 
 sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '/lib')
 import scriptpath
 scriptpath.add_oe_lib_path()
+scriptpath.add_bitbake_lib_path()
 import oeqa.buildperf
 from oeqa.buildperf import (BuildPerfTestLoader, BuildPerfTestResult,
                             BuildPerfTestRunner, KernelDropCaches)
 from oeqa.utils.commands import runCmd
-from oeqa.utils.git import GitRepo, GitError
+from oeqa.utils.metadata import metadata_from_bb, write_metadata_file
 
 
 # Set-up logging
@@ -71,31 +73,6 @@
         return False
     return True
 
-def init_git_repo(path):
-    """Check/create Git repository where to store results"""
-    path = os.path.abspath(path)
-    if os.path.isfile(path):
-        log.error("Invalid Git repo %s: path exists but is not a directory", path)
-        return False
-    if not os.path.isdir(path):
-        try:
-            os.mkdir(path)
-        except (FileNotFoundError, PermissionError) as err:
-            log.error("Failed to mkdir %s: %s", path, err)
-            return False
-    if not os.listdir(path):
-        log.info("Initializing a new Git repo at %s", path)
-        GitRepo.init(path)
-    try:
-        GitRepo(path, is_topdir=True)
-    except GitError:
-        log.error("No Git repository but a non-empty directory found at %s.\n"
-                  "Please specify a Git repository, an empty directory or "
-                  "a non-existing directory", path)
-        return False
-    return True
-
-
 def setup_file_logging(log_file):
     """Setup loggin to file"""
     log_dir = os.path.dirname(log_file)
@@ -115,6 +92,38 @@
     shutil.copytree(src_dir, tgt_dir)
 
 
+def update_globalres_file(result_obj, filename, metadata):
+    """Write results to globalres csv file"""
+    # Map test names to time and size columns in globalres
+    # The tuples represent index and length of times and sizes
+    # respectively
+    gr_map = {'test1': ((0, 1), (8, 1)),
+              'test12': ((1, 1), (None, None)),
+              'test13': ((2, 1), (9, 1)),
+              'test2': ((3, 1), (None, None)),
+              'test3': ((4, 3), (None, None)),
+              'test4': ((7, 1), (10, 2))}
+
+    values = ['0'] * 12
+    for status, test, _ in result_obj.all_results():
+        if status in ['ERROR', 'SKIPPED']:
+            continue
+        (t_ind, t_len), (s_ind, s_len) = gr_map[test.name]
+        if t_ind is not None:
+            values[t_ind:t_ind + t_len] = test.times
+        if s_ind is not None:
+            values[s_ind:s_ind + s_len] = test.sizes
+
+    log.debug("Writing globalres log to %s", filename)
+    rev_info = metadata['layers']['meta']
+    with open(filename, 'a') as fobj:
+        fobj.write('{},{}:{},{},'.format(metadata['hostname'],
+                                         rev_info['branch'],
+                                         rev_info['commit'],
+                                         rev_info['commit']))
+        fobj.write(','.join(values) + '\n')
+
+
 def parse_args(argv):
     """Parse command line arguments"""
     parser = argparse.ArgumentParser(
@@ -131,20 +140,13 @@
     parser.add_argument('-o', '--out-dir', default='results-{date}',
                         type=os.path.abspath,
                         help="Output directory for test results")
+    parser.add_argument('-x', '--xml', action='store_true',
+                        help='Enable JUnit xml output')
     parser.add_argument('--log-file',
                         default='{out_dir}/oe-build-perf-test.log',
                         help="Log file of this script")
     parser.add_argument('--run-tests', nargs='+', metavar='TEST',
                         help="List of tests to run")
-    parser.add_argument('--commit-results', metavar='GIT_DIR',
-                        type=os.path.abspath,
-                        help="Commit result data to a (local) git repository")
-    parser.add_argument('--commit-results-branch', metavar='BRANCH',
-                        default="{git_branch}",
-                        help="Commit results to branch BRANCH.")
-    parser.add_argument('--commit-results-tag', metavar='TAG',
-                        default="{git_branch}/{git_commit_count}-g{git_commit}/{tag_num}",
-                        help="Tag results commit with TAG.")
 
     return parser.parse_args(argv)
 
@@ -167,9 +169,6 @@
 
     if not pre_run_sanity_check():
         return 1
-    if args.commit_results:
-        if not init_git_repo(args.commit_results):
-            return 1
 
     # Check our capability to drop caches and ask pass if needed
     KernelDropCaches.check()
@@ -181,7 +180,19 @@
     else:
         suite = loader.loadTestsFromModule(oeqa.buildperf)
 
+    # Save test metadata
+    metadata = metadata_from_bb()
+    log.info("Testing Git revision branch:commit %s:%s (%s)",
+             metadata['layers']['meta']['branch'],
+             metadata['layers']['meta']['commit'],
+             metadata['layers']['meta']['commit_count'])
+    if args.xml:
+        write_metadata_file(os.path.join(out_dir, 'metadata.xml'), metadata)
+    else:
+        with open(os.path.join(out_dir, 'metadata.json'), 'w') as fobj:
+            json.dump(metadata, fobj, indent=2)
     archive_build_conf(out_dir)
+
     runner = BuildPerfTestRunner(out_dir, verbosity=2)
 
     # Suppress logger output to stderr so that the output from unittest
@@ -194,12 +205,13 @@
     # Restore logger output to stderr
     log.handlers[0].setLevel(log.level)
 
+    if args.xml:
+        result.write_results_xml()
+    else:
+        result.write_results_json()
+    result.write_buildstats_json()
     if args.globalres_file:
-        result.update_globalres_file(args.globalres_file)
-    if args.commit_results:
-        result.git_commit_results(args.commit_results,
-                                  args.commit_results_branch,
-                                  args.commit_results_tag)
+        update_globalres_file(result, args.globalres_file, metadata)
     if result.wasSuccessful():
         return 0
 
diff --git a/import-layers/yocto-poky/scripts/oe-buildenv-internal b/import-layers/yocto-poky/scripts/oe-buildenv-internal
index 9fae3b4..c890552 100755
--- a/import-layers/yocto-poky/scripts/oe-buildenv-internal
+++ b/import-layers/yocto-poky/scripts/oe-buildenv-internal
@@ -18,6 +18,18 @@
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
+if ! $(return >/dev/null 2>&1) ; then
+    echo 'oe-buildenv-internal: error: this script must be sourced'
+    echo ''
+    echo 'Usage: . $OEROOT/scripts/oe-buildenv-internal &&'
+    echo ''
+    echo 'OpenEmbedded oe-buildenv-internal - an internal script that is'
+    echo 'used in oe-init-build-env and oe-init-build-env-memres to'
+    echo 'initialize oe build environment'
+    echo ''
+    exit 2
+fi
+
 # It is assumed OEROOT is already defined when this is called
 if [ -z "$OEROOT" ]; then
     echo >&2 "Error: OEROOT is not defined!"
diff --git a/import-layers/yocto-poky/scripts/oe-find-native-sysroot b/import-layers/yocto-poky/scripts/oe-find-native-sysroot
index 81d62b8..235a67c 100755
--- a/import-layers/yocto-poky/scripts/oe-find-native-sysroot
+++ b/import-layers/yocto-poky/scripts/oe-find-native-sysroot
@@ -2,14 +2,14 @@
 #
 # Find a native sysroot to use - either from an in-tree OE build or
 # from a toolchain installation. It then ensures the variable
-# $OECORE_NATIVE_SYSROOT is set to the sysroot's base directory, and sets 
+# $OECORE_NATIVE_SYSROOT is set to the sysroot's base directory, and sets
 # $PSEUDO to the path of the pseudo binary.
 #
 # This script is intended to be run within other scripts by source'ing
 # it, e.g:
 #
 # SYSROOT_SETUP_SCRIPT=`which oe-find-native-sysroot`
-# . $SYSROOT_SETUP_SCRIPT
+# . $SYSROOT_SETUP_SCRIPT <recipe>
 #
 # This script will terminate execution of your calling program unless
 # you set a variable $SKIP_STRICT_SYSROOT_CHECK to a non-empty string
@@ -30,6 +30,38 @@
 # with this program; if not, write to the Free Software Foundation, Inc.,
 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 
+if [ "$1" = '--help' -o "$1" = '-h' -o $# -ne 1 ] ; then
+    echo 'Usage: oe-find-native-sysroot <recipe> [-h|--help]'
+    echo ''
+    echo 'OpenEmbedded find-native-sysroot - helper script to set'
+    echo 'environment variables OECORE_NATIVE_SYSROOT and PSEUDO'
+    echo 'to the path of the native sysroot directory and pseudo'
+    echo 'executable binary'
+    echo ''
+    echo 'options:'
+    echo '  recipe              its STAGING_DIR_NATIVE is used as native sysroot'
+    echo '  -h, --help          show this help message and exit'
+    echo ''
+    exit 2
+fi
+
+# Global vars
+BITBAKE_E=""
+set_oe_native_sysroot(){
+    echo "Running bitbake -e $1"
+    BITBAKE_E="`bitbake -e $1`"
+    OECORE_NATIVE_SYSROOT=`echo "$BITBAKE_E" | grep ^STAGING_DIR_NATIVE | cut -d '"' -f2`
+
+    if [ "x$OECORE_NATIVE_SYSROOT" = "x" ]; then
+        # This indicates that there was an error running bitbake -e that
+        # the user needs to be informed of
+        echo "There was an error running bitbake to determine STAGING_DIR_NATIVE"
+        echo "Here is the output from bitbake -e $1"
+        echo $BITBAKE_E
+        exit 1
+    fi
+}
+
 if [ "x$OECORE_NATIVE_SYSROOT" = "x" ]; then
     BITBAKE=`which bitbake 2> /dev/null`
     if [ "x$BITBAKE" != "x" ]; then
@@ -40,10 +72,10 @@
                 exit 1
             fi
             touch conf/sanity.conf
-            OECORE_NATIVE_SYSROOT=`bitbake -e | grep ^STAGING_DIR_NATIVE | cut -d '"' -f2`
+            set_oe_native_sysroot $1
             rm -f conf/sanity.conf
         else
-            OECORE_NATIVE_SYSROOT=`bitbake -e | grep ^STAGING_DIR_NATIVE | cut -d '"' -f2`
+            set_oe_native_sysroot $1
         fi
     else
         echo "Error: Unable to locate bitbake command."
@@ -55,21 +87,15 @@
     fi
 fi
 
-if [ "x$OECORE_NATIVE_SYSROOT" = "x" ]; then
-    # This indicates that there was an error running bitbake -e that
-    # the user needs to be informed of
-    echo "There was an error running bitbake to determine STAGING_DIR_NATIVE"
-    echo "Here is the output from bitbake -e"
-    bitbake -e
-    exit 1
-fi
-
-# Set up pseudo command
-if [ ! -e "$OECORE_NATIVE_SYSROOT/usr/bin/pseudo" ]; then
-    echo "Error: Unable to find pseudo binary in $OECORE_NATIVE_SYSROOT/usr/bin/"
+if [ ! -e "$OECORE_NATIVE_SYSROOT/" ]; then
+    echo "Error: $OECORE_NATIVE_SYSROOT doesn't exist."
 
     if [ "x$OECORE_DISTRO_VERSION" = "x" ]; then
-        echo "Have you run 'bitbake meta-ide-support'?"
+        if [[ $1 =~ .*native.* ]]; then
+            echo "Have you run 'bitbake $1 -caddto_recipe_sysroot'?"
+        else
+            echo "Have you run 'bitbake $1 '?"
+        fi
     else
         echo "This shouldn't happen - something is wrong with your toolchain installation"
     fi
@@ -78,4 +104,12 @@
         exit 1
     fi
 fi
-PSEUDO="$OECORE_NATIVE_SYSROOT/usr/bin/pseudo"
+
+# Set up pseudo command
+pseudo="$OECORE_NATIVE_SYSROOT/usr/bin/pseudo"
+if [ -e "$pseudo" ]; then
+    echo "PSEUDO=$pseudo"
+    PSEUDO="$pseudo"
+else
+    echo "PSEUDO $pseudo is not found."
+fi
diff --git a/import-layers/yocto-poky/scripts/oe-git-archive b/import-layers/yocto-poky/scripts/oe-git-archive
new file mode 100755
index 0000000..ab19cb9
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/oe-git-archive
@@ -0,0 +1,271 @@
+#!/usr/bin/python3
+#
+# Helper script for committing data to git and pushing upstream
+#
+# Copyright (c) 2017, Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+import argparse
+import glob
+import json
+import logging
+import math
+import os
+import re
+import sys
+from collections import namedtuple, OrderedDict
+from datetime import datetime, timedelta, tzinfo
+from operator import attrgetter
+
+# Import oe and bitbake libs
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+sys.path.append(os.path.join(scripts_path, 'lib'))
+import scriptpath
+scriptpath.add_bitbake_lib_path()
+scriptpath.add_oe_lib_path()
+
+from oeqa.utils.git import GitRepo, GitError
+from oeqa.utils.metadata import metadata_from_bb
+
+
+# Setup logging
+logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
+log = logging.getLogger()
+
+
+class ArchiveError(Exception):
+    """Internal error handling of this script"""
+
+
+def format_str(string, fields):
+    """Format string using the given fields (dict)"""
+    try:
+        return string.format(**fields)
+    except KeyError as err:
+        raise ArchiveError("Unable to expand string '{}': unknown field {} "
+                           "(valid fields are: {})".format(
+                               string, err, ', '.join(sorted(fields.keys()))))
+
+
+def init_git_repo(path, no_create, bare):
+    """Initialize local Git repository"""
+    path = os.path.abspath(path)
+    if os.path.isfile(path):
+        raise ArchiveError("Invalid Git repo at {}: path exists but is not a "
+                           "directory".format(path))
+    if not os.path.isdir(path) or not os.listdir(path):
+        if no_create:
+            raise ArchiveError("No git repo at {}, refusing to create "
+                               "one".format(path))
+        if not os.path.isdir(path):
+            try:
+                os.mkdir(path)
+            except (FileNotFoundError, PermissionError) as err:
+                raise ArchiveError("Failed to mkdir {}: {}".format(path, err))
+        if not os.listdir(path):
+            log.info("Initializing a new Git repo at %s", path)
+            repo = GitRepo.init(path, bare)
+    try:
+        repo = GitRepo(path, is_topdir=True)
+    except GitError:
+        raise ArchiveError("Non-empty directory that is not a Git repository "
+                           "at {}\nPlease specify an existing Git repository, "
+                           "an empty directory or a non-existing directory "
+                           "path.".format(path))
+    return repo
+
+
+def git_commit_data(repo, data_dir, branch, message, exclude, notes):
+    """Commit data into a Git repository"""
+    log.info("Committing data into to branch %s", branch)
+    tmp_index = os.path.join(repo.git_dir, 'index.oe-git-archive')
+    try:
+        # Create new tree object from the data
+        env_update = {'GIT_INDEX_FILE': tmp_index,
+                      'GIT_WORK_TREE': os.path.abspath(data_dir)}
+        repo.run_cmd('add .', env_update)
+
+        # Remove files that are excluded
+        if exclude:
+            repo.run_cmd(['rm', '--cached'] + [f for f in exclude], env_update)
+
+        tree = repo.run_cmd('write-tree', env_update)
+
+        # Create new commit object from the tree
+        parent = repo.rev_parse(branch)
+        git_cmd = ['commit-tree', tree, '-m', message]
+        if parent:
+            git_cmd += ['-p', parent]
+        commit = repo.run_cmd(git_cmd, env_update)
+
+        # Create git notes
+        for ref, filename in notes:
+            ref = ref.format(branch_name=branch)
+            repo.run_cmd(['notes', '--ref', ref, 'add',
+                          '-F', os.path.abspath(filename), commit])
+
+        # Update branch head
+        git_cmd = ['update-ref', 'refs/heads/' + branch, commit]
+        if parent:
+            git_cmd.append(parent)
+        repo.run_cmd(git_cmd)
+
+        # Update current HEAD, if we're on branch 'branch'
+        if not repo.bare and repo.get_current_branch() == branch:
+            log.info("Updating %s HEAD to latest commit", repo.top_dir)
+            repo.run_cmd('reset --hard')
+
+        return commit
+    finally:
+        if os.path.exists(tmp_index):
+            os.unlink(tmp_index)
+
+
+def expand_tag_strings(repo, name_pattern, msg_subj_pattern, msg_body_pattern,
+                       keywords):
+    """Generate tag name and message, with support for running id number"""
+    keyws = keywords.copy()
+    # Tag number is handled specially: if not defined, we autoincrement it
+    if 'tag_number' not in keyws:
+        # Fill in all other fields than 'tag_number'
+        keyws['tag_number'] = '{tag_number}'
+        tag_re = format_str(name_pattern, keyws)
+        # Replace parentheses for proper regex matching
+        tag_re = tag_re.replace('(', '\(').replace(')', '\)') + '$'
+        # Inject regex group pattern for 'tag_number'
+        tag_re = tag_re.format(tag_number='(?P<tag_number>[0-9]{1,5})')
+
+        keyws['tag_number'] = 0
+        for existing_tag in repo.run_cmd('tag').splitlines():
+            match = re.match(tag_re, existing_tag)
+
+            if match and int(match.group('tag_number')) >= keyws['tag_number']:
+                keyws['tag_number'] = int(match.group('tag_number')) + 1
+
+    tag_name = format_str(name_pattern, keyws)
+    msg_subj= format_str(msg_subj_pattern.strip(), keyws)
+    msg_body = format_str(msg_body_pattern, keyws)
+    return tag_name, msg_subj + '\n\n' + msg_body
+
+
+def parse_args(argv):
+    """Parse command line arguments"""
+    parser = argparse.ArgumentParser(
+            description="Commit data to git and push upstream",
+            formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+
+    parser.add_argument('--debug', '-D', action='store_true',
+                        help="Verbose logging")
+    parser.add_argument('--git-dir', '-g', required=True,
+                        help="Local git directory to use")
+    parser.add_argument('--no-create', action='store_true',
+                        help="If GIT_DIR is not a valid Git repository, do not "
+                             "try to create one")
+    parser.add_argument('--bare', action='store_true',
+                        help="Initialize a bare repository when creating a "
+                             "new one")
+    parser.add_argument('--push', '-p', nargs='?', default=False, const=True,
+                        help="Push to remote")
+    parser.add_argument('--branch-name', '-b',
+                        default='{hostname}/{branch}/{machine}',
+                        help="Git branch name (pattern) to use")
+    parser.add_argument('--no-tag', action='store_true',
+                        help="Do not create Git tag")
+    parser.add_argument('--tag-name', '-t',
+                        default='{hostname}/{branch}/{machine}/{commit_count}-g{commit}/{tag_number}',
+                        help="Tag name (pattern) to use")
+    parser.add_argument('--commit-msg-subject',
+                        default='Results of {branch}:{commit} on {hostname}',
+                        help="Subject line (pattern) to use in the commit message")
+    parser.add_argument('--commit-msg-body',
+                        default='branch: {branch}\ncommit: {commit}\nhostname: {hostname}',
+                        help="Commit message body (pattern)")
+    parser.add_argument('--tag-msg-subject',
+                        default='Test run #{tag_number} of {branch}:{commit} on {hostname}',
+                        help="Subject line (pattern) of the tag message")
+    parser.add_argument('--tag-msg-body',
+                        default='',
+                        help="Tag message body (pattern)")
+    parser.add_argument('--exclude', action='append', default=[],
+                        help="Glob to exclude files from the commit. Relative "
+                             "to DATA_DIR. May be specified multiple times")
+    parser.add_argument('--notes', nargs=2, action='append', default=[],
+                        metavar=('GIT_REF', 'FILE'),
+                        help="Add a file as a note under refs/notes/GIT_REF. "
+                             "{branch_name} in GIT_REF will be expanded to the "
+                             "actual target branch name (specified by "
+                             "--branch-name). This option may be specified "
+                             "multiple times.")
+    parser.add_argument('data_dir', metavar='DATA_DIR',
+                        help="Data to commit")
+    return parser.parse_args(argv)
+
+
+def main(argv=None):
+    """Script entry point"""
+    args = parse_args(argv)
+    if args.debug:
+        log.setLevel(logging.DEBUG)
+
+    try:
+        if not os.path.isdir(args.data_dir):
+            raise ArchiveError("Not a directory: {}".format(args.data_dir))
+
+        data_repo = init_git_repo(args.git_dir, args.no_create, args.bare)
+
+        # Get keywords to be used in tag and branch names and messages
+        metadata = metadata_from_bb()
+        keywords = {'hostname': metadata['hostname'],
+                    'branch': metadata['layers']['meta']['branch'],
+                    'commit': metadata['layers']['meta']['commit'],
+                    'commit_count': metadata['layers']['meta']['commit_count'],
+                    'machine': metadata['config']['MACHINE']}
+
+        # Expand strings early in order to avoid getting into inconsistent
+        # state (e.g. no tag even if data was committed)
+        commit_msg = format_str(args.commit_msg_subject.strip(), keywords)
+        commit_msg += '\n\n' + format_str(args.commit_msg_body, keywords)
+        branch_name = format_str(args.branch_name, keywords)
+        tag_name = None
+        if not args.no_tag and args.tag_name:
+            tag_name, tag_msg = expand_tag_strings(data_repo, args.tag_name,
+                                                   args.tag_msg_subject,
+                                                   args.tag_msg_body, keywords)
+
+        # Commit data
+        commit = git_commit_data(data_repo, args.data_dir, branch_name,
+                                 commit_msg, args.exclude, args.notes)
+
+        # Create tag
+        if tag_name:
+            log.info("Creating tag %s", tag_name)
+            data_repo.run_cmd(['tag', '-a', '-m', tag_msg, tag_name, commit])
+
+        # Push data to remote
+        if args.push:
+            cmd = ['push', '--tags']
+            # If no remote is given we push with the default settings from
+            # gitconfig
+            if args.push is not True:
+                notes_refs = ['refs/notes/' + ref.format(branch_name=branch_name)
+                                for ref, _ in args.notes]
+                cmd.extend([args.push, branch_name] + notes_refs)
+            log.info("Pushing data to remote")
+            data_repo.run_cmd(cmd)
+
+    except ArchiveError as err:
+        log.error(str(err))
+        return 1
+
+    return 0
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/import-layers/yocto-poky/scripts/oe-git-proxy b/import-layers/yocto-poky/scripts/oe-git-proxy
index 0078e95..7a43fe6 100755
--- a/import-layers/yocto-poky/scripts/oe-git-proxy
+++ b/import-layers/yocto-poky/scripts/oe-git-proxy
@@ -18,6 +18,27 @@
 # AUTHORS
 # Darren Hart <dvhart@linux.intel.com>
 
+if [ $# -lt 2 -o "$1" = '--help' -o "$1" = '-h' ] ; then
+    echo 'oe-git-proxy: error: the following arguments are required: host port'
+    echo 'Usage: oe-git-proxy host port'
+    echo ''
+    echo 'OpenEmbedded git-proxy - a simple tool to be used via GIT_PROXY_COMMAND.'
+    echo 'It uses socat to make SOCKS or HTTPS proxy connections.'
+    echo 'It uses ALL_PROXY to determine the proxy server, protocol, and port.'
+    echo 'It uses NO_PROXY to skip using the proxy for a comma delimited list'
+    echo 'of hosts, host globs (*.example.com), IPs, or CIDR masks (192.168.1.0/24).'
+    echo 'It is known to work with both bash and dash shells.runs native tools'
+    echo ''
+    echo 'arguments:'
+    echo '  host                proxy host to use'
+    echo '  port                proxy port to use'
+    echo ''
+    echo 'options:'
+    echo '  -h, --help          show this help message and exit'
+    echo ''
+    exit 2
+fi
+
 # Locate the netcat binary
 SOCAT=$(which socat 2>/dev/null)
 if [ $? -ne 0 ]; then
diff --git a/import-layers/yocto-poky/scripts/oe-pkgdata-util b/import-layers/yocto-poky/scripts/oe-pkgdata-util
index dbffd6a..677effe 100755
--- a/import-layers/yocto-poky/scripts/oe-pkgdata-util
+++ b/import-layers/yocto-poky/scripts/oe-pkgdata-util
@@ -174,15 +174,16 @@
             logger.error("No packages specified")
             sys.exit(1)
 
-    def readvar(pkgdata_file, valuename):
+    def readvar(pkgdata_file, valuename, mappedpkg):
         val = ""
         with open(pkgdata_file, 'r') as f:
             for line in f:
-                if line.startswith(valuename + ":"):
+                if (line.startswith(valuename + ":") or
+                    line.startswith(valuename + "_" + mappedpkg + ":")):
                     val = line.split(': ', 1)[1].rstrip()
         return val
 
-    logger.debug("read-value('%s', '%s' '%s'" % (args.pkgdata_dir, args.valuename, packages))
+    logger.debug("read-value('%s', '%s' '%s')" % (args.pkgdata_dir, args.valuename, packages))
     for package in packages:
         pkg_split = package.split('_')
         pkg_name = pkg_split[0]
@@ -192,18 +193,17 @@
         if os.path.exists(revlink):
             mappedpkg = os.path.basename(os.readlink(revlink))
             qvar = args.valuename
+            value = readvar(revlink, qvar, mappedpkg)
             if qvar == "PKGSIZE":
-                # append packagename
-                qvar = "%s_%s" % (args.valuename, mappedpkg)
                 # PKGSIZE is now in bytes, but we we want it in KB
-                pkgsize = (int(readvar(revlink, qvar)) + 1024 // 2) // 1024
+                pkgsize = (int(value) + 1024 // 2) // 1024
                 value = "%d" % pkgsize
-            else:
-                value = readvar(revlink, qvar)
             if args.prefix_name:
                 print('%s %s' % (pkg_name, value))
             else:
                 print(value)
+        else:
+            logger.debug("revlink %s does not exist", revlink)
 
 def lookup_pkglist(pkgs, pkgdata_dir, reverse):
     if reverse:
@@ -325,8 +325,15 @@
             recipe_version = recipe_version + "-" + mappings[pkg]['PR']
         pkg_size = mappings[pkg]['PKGSIZE']
 
-        items.append("%s %s %s %s %s" %
-                     (pkg, pkg_version, recipe, recipe_version, pkg_size))
+        line = "%s %s %s %s %s" % (pkg, pkg_version, recipe, recipe_version, pkg_size)
+
+        if args.extra:
+            for var in args.extra:
+                val = mappings[pkg][var].strip()
+                val = re.sub(r'\s+', ' ', val)
+                line += ' "%s"' % val
+
+        items.append(line)
     print('\n'.join(items))
 
 def get_recipe_pkgs(pkgdata_dir, recipe, unpackaged):
@@ -530,6 +537,7 @@
                                           description='Looks up the specified runtime package(s) and display information')
     parser_package_info.add_argument('pkg', nargs='*', help='Runtime package name to look up')
     parser_package_info.add_argument('-f', '--file', help='Read package names from the specified file (one per line, first field only)')
+    parser_package_info.add_argument('-e', '--extra', help='Extra variables to display, e.g., LICENSE (can be specified multiple times)', action='append')
     parser_package_info.set_defaults(func=package_info)
 
     parser_find_path = subparsers.add_parser('find-path',
@@ -570,7 +578,7 @@
         logger.debug('Found bitbake path: %s' % bitbakepath)
         tinfoil = tinfoil_init()
         try:
-            args.pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR', True)
+            args.pkgdata_dir = tinfoil.config_data.getVar('PKGDATA_DIR')
         finally:
             tinfoil.shutdown()
         logger.debug('Value of PKGDATA_DIR is "%s"' % args.pkgdata_dir)
diff --git a/import-layers/yocto-poky/scripts/oe-publish-sdk b/import-layers/yocto-poky/scripts/oe-publish-sdk
index 4fe8974..9f7963c 100755
--- a/import-layers/yocto-poky/scripts/oe-publish-sdk
+++ b/import-layers/yocto-poky/scripts/oe-publish-sdk
@@ -114,9 +114,9 @@
 
     # Setting up the git repo
     if not is_remote:
-        cmd = 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; mv .git/hooks/post-update.sample .git/hooks/post-update; echo "*.pyc\n*.pyo" > .gitignore; fi; git add -A .; git config user.email "oe@oe.oe" && git config user.name "OE" && git commit -q -m "init repo" || true; git update-server-info' % (destination, destination)
+        cmd = 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; mv .git/hooks/post-update.sample .git/hooks/post-update; echo "*.pyc\n*.pyo\npyshtables.py" > .gitignore; fi; git add -A .; git config user.email "oe@oe.oe" && git config user.name "OE" && git commit -q -m "init repo" || true; git update-server-info' % (destination, destination)
     else:
-        cmd = "ssh %s 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; mv .git/hooks/post-update.sample .git/hooks/post-update; echo '*.pyc\n*.pyo' > .gitignore; fi; git add -A .; git config user.email 'oe@oe.oe' && git config user.name 'OE' && git commit -q -m \"init repo\" || true; git update-server-info'" % (host, destdir, destdir)
+        cmd = "ssh %s 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; mv .git/hooks/post-update.sample .git/hooks/post-update; echo '*.pyc\n*.pyo\npyshtables.py' > .gitignore; fi; git add -A .; git config user.email 'oe@oe.oe' && git config user.name 'OE' && git commit -q -m \"init repo\" || true; git update-server-info'" % (host, destdir, destdir)
     ret = subprocess.call(cmd, shell=True)
     if ret == 0:
         logger.info('SDK published successfully')
diff --git a/import-layers/yocto-poky/scripts/oe-run-native b/import-layers/yocto-poky/scripts/oe-run-native
index 496e34f..1131122 100755
--- a/import-layers/yocto-poky/scripts/oe-run-native
+++ b/import-layers/yocto-poky/scripts/oe-run-native
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 #
 # Copyright (c) 2016,  Intel Corporation.
 # All Rights Reserved
@@ -22,27 +22,47 @@
 #
 
 if [ $# -lt 1 -o "$1" = '--help' -o "$1" = '-h' ] ; then
-    echo "Usage: $0 <native tool> [parameters]"
+    echo 'oe-run-native: the following arguments are required: <native recipe> <native tool>'
+    echo 'Usage: oe-run-native native-recipe tool [parameters]'
+    echo ''
+    echo 'OpenEmbedded run-native - runs native tools'
+    echo ''
+    echo 'arguments:'
+    echo '  native-recipe       The recipe which provoides tool'
+    echo '  tool                Native tool to run'
+    echo ''
+    exit 2
+fi
+
+native_recipe="$1"
+tool="$2"
+
+if [ "${native_recipe%-native}" = "$native_recipe" ]; then
+    echo Error: $native_recipe is not a native recipe
+    echo Error: Use \"oe-run-native -h\" for help
     exit 1
 fi
 
+shift
+
 SYSROOT_SETUP_SCRIPT=`which oe-find-native-sysroot 2> /dev/null`
 if [ -z "$SYSROOT_SETUP_SCRIPT" ]; then
         echo "Error: Unable to find oe-find-native-sysroot script"
         exit 1
 fi
-. $SYSROOT_SETUP_SCRIPT
+. $SYSROOT_SETUP_SCRIPT $native_recipe
 
-OLDPATH=$PATH
+OLD_PATH=$PATH
 
 # look for a tool only in native sysroot
 PATH=$OECORE_NATIVE_SYSROOT/usr/bin:$OECORE_NATIVE_SYSROOT/bin:$OECORE_NATIVE_SYSROOT/usr/sbin:$OECORE_NATIVE_SYSROOT/sbin
-tool=`/usr/bin/which $1 2>/dev/null`
+tool_find=`/usr/bin/which $tool 2>/dev/null`
 
-if [ -n "$tool" ] ; then
+if [ -n "$tool_find" ] ; then
     # add old path to allow usage of host tools
     PATH=$PATH:$OLD_PATH $@
 else
-    echo "Error: Unable to find '$1' in native sysroot"
+    echo "Error: Unable to find '$tool' in $PATH"
+    echo "Error: Have you run 'bitbake $native_recipe -caddto_recipe_sysroot'?"
     exit 1
 fi
diff --git a/import-layers/yocto-poky/scripts/oe-selftest b/import-layers/yocto-poky/scripts/oe-selftest
index d9ffd40..52366b1 100755
--- a/import-layers/yocto-poky/scripts/oe-selftest
+++ b/import-layers/yocto-poky/scripts/oe-selftest
@@ -46,6 +46,7 @@
 import oeqa.selftest
 import oeqa.utils.ftools as ftools
 from oeqa.utils.commands import runCmd, get_bb_var, get_test_layer
+from oeqa.utils.metadata import metadata_from_bb, write_metadata_file
 from oeqa.selftest.base import oeSelfTest, get_available_machines
 
 try:
@@ -61,7 +62,8 @@
 
 def logger_create():
     log_file = log_prefix + ".log"
-    if os.path.exists("oe-selftest.log"): os.remove("oe-selftest.log")
+    if os.path.lexists("oe-selftest.log"):
+        os.remove("oe-selftest.log")
     os.symlink(log_file, "oe-selftest.log")
 
     log = logging.getLogger("selftest")
@@ -85,7 +87,7 @@
 log = logger_create()
 
 def get_args_parser():
-    description = "Script that runs unit tests agains bitbake and other Yocto related tools. The goal is to validate tools functionality and metadata integrity. Refer to https://wiki.yoctoproject.org/wiki/Oe-selftest for more information."
+    description = "Script that runs unit tests against bitbake and other Yocto related tools. The goal is to validate tools functionality and metadata integrity. Refer to https://wiki.yoctoproject.org/wiki/Oe-selftest for more information."
     parser = argparse_oe.ArgumentParser(description=description)
     group = parser.add_mutually_exclusive_group(required=True)
     group.add_argument('-r', '--run-tests', required=False, action='store', nargs='*', dest="run_tests", default=None, help='Select what tests to run (modules, classes or test methods). Format should be: <module>.<class>.<test_method>')
@@ -106,11 +108,17 @@
                        help='List all tags that have been set to test cases.')
     parser.add_argument('--machine', required=False, dest='machine', choices=['random', 'all'], default=None,
                         help='Run tests on different machines (random/all).')
+    parser.add_argument('--repository', required=False, dest='repository', default='', action='store',
+                        help='Submit test results to a repository')
     return parser
 
+builddir = None
+
 
 def preflight_check():
 
+    global builddir
+
     log.info("Checking that everything is in order before running the tests")
 
     if not os.environ.get("BUILDDIR"):
@@ -123,7 +131,27 @@
         os.chdir(builddir)
 
     if not "meta-selftest" in get_bb_var("BBLAYERS"):
-        log.error("You don't seem to have the meta-selftest layer in BBLAYERS")
+        log.warn("meta-selftest layer not found in BBLAYERS, adding it")
+        meta_selftestdir = os.path.join(
+                get_bb_var("BBLAYERS_FETCH_DIR"),
+                'meta-selftest')
+        if os.path.isdir(meta_selftestdir):
+            runCmd("bitbake-layers add-layer %s" %meta_selftestdir)
+        else:
+            log.error("could not locate meta-selftest in:\n%s"
+                    %meta_selftestdir)
+            return False
+
+    if "buildhistory.bbclass" in get_bb_var("BBINCLUDED"):
+        log.error("You have buildhistory enabled already and this isn't recommended for selftest, please disable it first.")
+        return False
+
+    if get_bb_var("PRSERV_HOST"):
+        log.error("Please unset PRSERV_HOST in order to run oe-selftest")
+        return False
+
+    if get_bb_var("SANITY_TESTED_DISTROS"):
+        log.error("Please unset SANITY_TESTED_DISTROS in order to run oe-selftest")
         return False
 
     log.info("Running bitbake -p")
@@ -132,7 +160,7 @@
     return True
 
 def add_include():
-    builddir = os.environ.get("BUILDDIR")
+    global builddir
     if "#include added by oe-selftest.py" \
         not in ftools.read_file(os.path.join(builddir, "conf/local.conf")):
             log.info("Adding: \"include selftest.inc\" in local.conf")
@@ -146,7 +174,7 @@
                     "\n#include added by oe-selftest.py\ninclude bblayers.inc")
 
 def remove_include():
-    builddir = os.environ.get("BUILDDIR")
+    global builddir
     if builddir is None:
         return
     if "#include added by oe-selftest.py" \
@@ -162,18 +190,21 @@
                     "\n#include added by oe-selftest.py\ninclude bblayers.inc")
 
 def remove_inc_files():
+    global builddir
+    if builddir is None:
+        return
     try:
-        os.remove(os.path.join(os.environ.get("BUILDDIR"), "conf/selftest.inc"))
+        os.remove(os.path.join(builddir, "conf/selftest.inc"))
         for root, _, files in os.walk(get_test_layer()):
             for f in files:
                 if f == 'test_recipe.inc':
                     os.remove(os.path.join(root, f))
-    except (AttributeError, OSError,) as e:    # AttributeError may happen if BUILDDIR is not set
+    except OSError as e:
         pass
 
     for incl_file in ['conf/bblayers.inc', 'conf/machine.inc']:
         try:
-            os.remove(os.path.join(os.environ.get("BUILDDIR"), incl_file))
+            os.remove(os.path.join(builddir, incl_file))
         except:
             pass
 
@@ -336,10 +367,15 @@
     # Get a testsuite based on 'keyword'
     # criteria: name, class, module, id, tag
     # keyword: a list of tests, classes, modules, ids, tags
-
-    ts = sorted([ (tc.tcid, tc.tctag, tc.tcname, tc.tcclass, tc.tcmodule) for tc in get_testsuite_by(criteria, keyword) ])
-
-    print('%-4s\t%-20s\t%-60s\t%-25s\t%-20s' % ('id', 'tag', 'name', 'class', 'module'))
+    def tc_key(t):
+        if t[0] is None:
+            return  (0,) + t[1:]
+        return t
+    # tcid may be None if no ID was assigned, in which case sorted() will throw
+    # a TypeError as Python 3 does not allow comparison (<,<=,>=,>) of
+    # heterogeneous types, handle this by using a custom key generator
+    ts = sorted([ (tc.tcid, tc.tctag, tc.tcname, tc.tcclass, tc.tcmodule) \
+                  for tc in get_testsuite_by(criteria, keyword) ], key=tc_key)
     print('_' * 150)
     for t in ts:
         if isinstance(t[1], (tuple, list)):
@@ -386,7 +422,7 @@
     """ Set up the coverage measurement for the testcases to be run """
     import datetime
     import subprocess
-    builddir = os.environ.get("BUILDDIR")
+    global builddir
     pokydir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
     curcommit= subprocess.check_output(["git", "--git-dir", os.path.join(pokydir, ".git"), "rev-parse", "HEAD"]).decode('utf-8')
     coveragerc = "%s/.coveragerc" % builddir
@@ -463,6 +499,9 @@
     sys.path.extend(layer_libdirs)
     imp.reload(oeqa.selftest)
 
+    # act like bitbake and enforce en_US.UTF-8 locale
+    os.environ["LC_ALL"] = "en_US.UTF-8"
+
     if args.run_tests_by and len(args.run_tests_by) >= 2:
         valid_options = ['name', 'class', 'module', 'id', 'tag']
         if args.run_tests_by[0] not in valid_options:
@@ -564,6 +603,76 @@
 
         log.info("Finished")
 
+        if args.repository:
+            import git
+            # Commit tests results to repository
+            metadata = metadata_from_bb()
+            git_dir = os.path.join(os.getcwd(), 'selftest')
+            if not os.path.isdir(git_dir):
+                os.mkdir(git_dir)
+
+            log.debug('Checking for git repository in %s' % git_dir)
+            try:
+                repo = git.Repo(git_dir)
+            except git.exc.InvalidGitRepositoryError:
+                log.debug("Couldn't find git repository %s; "
+                         "cloning from %s" % (git_dir, args.repository))
+                repo = git.Repo.clone_from(args.repository, git_dir)
+
+            r_branches = repo.git.branch(r=True)
+            r_branches = set(r_branches.replace('origin/', '').split())
+            l_branches = {str(branch) for branch in repo.branches}
+            branch = '%s/%s/%s' % (metadata['hostname'],
+                                   metadata['layers']['meta'].get('branch', '(nogit)'),
+                                   metadata['config']['MACHINE'])
+
+            if branch in l_branches:
+                log.debug('Found branch in local repository, checking out')
+                repo.git.checkout(branch)
+            elif branch in r_branches:
+                log.debug('Found branch in remote repository, checking'
+                          ' out and pulling')
+                repo.git.checkout(branch)
+                repo.git.pull()
+            else:
+                log.debug('New branch %s' % branch)
+                repo.git.checkout('master')
+                repo.git.checkout(b=branch)
+
+            cleanResultsDir(repo)
+            xml_dir = os.path.join(os.getcwd(), log_prefix)
+            copyResultFiles(xml_dir, git_dir, repo)
+            metadata_file = os.path.join(git_dir, 'metadata.xml')
+            write_metadata_file(metadata_file, metadata)
+            repo.index.add([metadata_file])
+            repo.index.write()
+
+            # Get information for commit message
+            layer_info = ''
+            for layer, values in metadata['layers'].items():
+                layer_info = '%s%-17s = %s:%s\n' % (layer_info, layer,
+                              values.get('branch', '(nogit)'), values.get('commit', '0'*40))
+            msg = 'Selftest for build %s of %s for machine %s on %s\n\n%s' % (
+                   log_prefix[12:], metadata['distro']['pretty_name'],
+                   metadata['config']['MACHINE'], metadata['hostname'], layer_info)
+
+            log.debug('Commiting results to local repository')
+            repo.index.commit(msg)
+            if not repo.is_dirty():
+                try:
+                    if branch in r_branches:
+                        log.debug('Pushing changes to remote repository')
+                        repo.git.push()
+                    else:
+                        log.debug('Pushing changes to remote repository '
+                                  'creating new branch')
+                        repo.git.push('-u', 'origin', branch)
+                except GitCommandError:
+                    log.error('Falied to push to remote repository')
+                    return 1
+            else:
+                log.error('Local repository is dirty, not pushing commits')
+
         if result.wasSuccessful():
             return 0
         else:
@@ -647,6 +756,35 @@
 
     return StampedResult
 
+def cleanResultsDir(repo):
+    """ Remove result files from directory """
+
+    xml_files = []
+    directory = repo.working_tree_dir
+    for f in os.listdir(directory):
+        path = os.path.join(directory, f)
+        if os.path.isfile(path) and path.endswith('.xml'):
+            xml_files.append(f)
+    repo.index.remove(xml_files, working_tree=True)
+
+def copyResultFiles(src, dst, repo):
+    """ Copy result files from src to dst removing the time stamp. """
+
+    import shutil
+
+    re_time = re.compile("-[0-9]+")
+    file_list = []
+
+    for root, subdirs, files in os.walk(src):
+        tmp_dir = root.replace(src, '').lstrip('/')
+        for s in subdirs:
+            os.mkdir(os.path.join(dst, tmp_dir, s))
+        for f in files:
+            file_name = os.path.join(dst, tmp_dir, re_time.sub("", f))
+            shutil.copy2(os.path.join(root, f), file_name)
+            file_list.append(file_name)
+    repo.index.add(file_list)
+
 class TestRunner(_TestRunner):
     """Test runner class aware of exporting tests."""
     def __init__(self, *args, **kwargs):
diff --git a/import-layers/yocto-poky/scripts/oe-setup-builddir b/import-layers/yocto-poky/scripts/oe-setup-builddir
index e53f73c..ef49551 100755
--- a/import-layers/yocto-poky/scripts/oe-setup-builddir
+++ b/import-layers/yocto-poky/scripts/oe-setup-builddir
@@ -23,6 +23,14 @@
     exit 1
 fi
 
+if [ "$1" = '--help' -o "$1" = '-h' ]; then
+    echo 'Usage: oe-setup-builddir'
+    echo ''
+    echo "OpenEmbedded setup-builddir - setup build directory $BUILDDIR"
+    echo ''
+    exit 2
+fi
+
 mkdir -p "$BUILDDIR/conf"
 
 if [ ! -d "$BUILDDIR" ]; then
diff --git a/import-layers/yocto-poky/scripts/oe-setup-rpmrepo b/import-layers/yocto-poky/scripts/oe-setup-rpmrepo
index 917b98b..df1c614 100755
--- a/import-layers/yocto-poky/scripts/oe-setup-rpmrepo
+++ b/import-layers/yocto-poky/scripts/oe-setup-rpmrepo
@@ -23,16 +23,6 @@
 # Instead, use OE_TMPDIR for passing this in externally.
 TMPDIR="$OE_TMPDIR"
 
-function usage() {
-	echo "Usage: $0 <rpm-dir>"
-	echo "  <rpm-dir>: default is $TMPDIR/deploy/rpm"
-}
-
-if [ $# -gt 1 ]; then
-	usage
-	exit 1
-fi
-
 setup_tmpdir() {
     if [ -z "$TMPDIR" ]; then
         # Try to get TMPDIR from bitbake
@@ -53,6 +43,23 @@
     fi
 }
 
+setup_tmpdir
+
+function usage() {
+    echo 'Usage: oe-setup-rpmrepo rpm-dir'
+    echo ''
+    echo 'OpenEmbedded setup-rpmrepo - setup rpm repository'
+    echo ''
+    echo 'arguments:'
+    echo "  rpm-dir               rpm repo directory, default is $TMPDIR/deploy/rpm"
+    echo ''
+}
+
+if [ $# -gt 1 -o "$1" = '--help' -o "$1" = '-h' ]; then
+    usage
+    exit 2
+fi
+
 setup_sysroot() {
 	# Toolchain installs set up $OECORE_NATIVE_SYSROOT in their
 	# environment script. If that variable isn't set, we're
@@ -68,7 +75,6 @@
 	fi 
 }
 
-setup_tmpdir
 setup_sysroot
 
 
@@ -83,7 +89,7 @@
 	exit 1
 fi
 
-CREATEREPO=$OECORE_NATIVE_SYSROOT/usr/bin/createrepo
+CREATEREPO=$OECORE_NATIVE_SYSROOT/usr/bin/createrepo_c
 if [ ! -e "$CREATEREPO" ]; then
    	echo "Error: can't find createrepo binary"
 	echo "please run bitbake createrepo-native first"
diff --git a/import-layers/yocto-poky/scripts/oe-test b/import-layers/yocto-poky/scripts/oe-test
new file mode 100755
index 0000000..a1d282d
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/oe-test
@@ -0,0 +1,105 @@
+#!/usr/bin/env python3
+
+# OpenEmbedded test tool
+#
+# Copyright (C) 2016 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import sys
+import argparse
+import importlib
+import logging
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+import argparse_oe
+import scriptutils
+
+# oe-test is used for testexport and it doesn't have oe lib
+# so we just skip adding these libraries (not used in testexport)
+try:
+    import scriptpath
+    scriptpath.add_oe_lib_path()
+except ImportError:
+    pass
+
+from oeqa.core.context import OETestContextExecutor
+
+logger = scriptutils.logger_create('oe-test')
+
+def _load_test_components(logger):
+    components = {}
+
+    for path in sys.path:
+        base_dir = os.path.join(path, 'oeqa')
+        if os.path.exists(base_dir) and os.path.isdir(base_dir):
+            for file in os.listdir(base_dir):
+                comp_name = file
+                comp_context = os.path.join(base_dir, file, 'context.py')
+                if os.path.exists(comp_context):
+                    comp_plugin = importlib.import_module('oeqa.%s.%s' % \
+                            (comp_name, 'context'))
+                    try:
+                        if not issubclass(comp_plugin._executor_class,
+                                OETestContextExecutor):
+                            raise TypeError("Component %s in %s, _executor_class "\
+                                "isn't derived from OETestContextExecutor."\
+                                % (comp_name, comp_context))
+
+                        components[comp_name] = comp_plugin._executor_class()
+                    except AttributeError:
+                        raise AttributeError("Component %s in %s don't have "\
+                                "_executor_class defined." % (comp_name, comp_context))
+
+    return components
+
+def main():
+    parser = argparse_oe.ArgumentParser(description="OpenEmbedded test tool",
+                                        add_help=False,
+                                        epilog="Use %(prog)s <subcommand> --help to get help on a specific command")
+    parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
+    parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
+    global_args, unparsed_args = parser.parse_known_args()
+
+    # Help is added here rather than via add_help=True, as we don't want it to
+    # be handled by parse_known_args()
+    parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
+                        help='show this help message and exit')
+
+    if global_args.debug:
+        logger.setLevel(logging.DEBUG)
+    elif global_args.quiet:
+        logger.setLevel(logging.ERROR)
+
+    components = _load_test_components(logger)
+
+    subparsers = parser.add_subparsers(dest="subparser_name", title='subcommands', metavar='<subcommand>')
+    subparsers.add_subparser_group('components', 'Test components')
+    subparsers.required = True
+    for comp_name in sorted(components.keys()):
+        comp = components[comp_name]
+        comp.register_commands(logger, subparsers)
+
+    try:
+        args = parser.parse_args(unparsed_args, namespace=global_args)
+        results = args.func(logger, args)
+        ret = 0 if results.wasSuccessful() else 1
+    except SystemExit as err:
+        if err.code != 0:
+            raise err
+        ret = err.code
+    except argparse_oe.ArgumentUsageError as ae:
+        parser.error_subcommand(ae.message, ae.subcommand)
+
+    return ret
+
+if __name__ == '__main__':
+    try:
+        ret = main()
+    except Exception:
+        ret = 1
+        import traceback
+        traceback.print_exc()
+    sys.exit(ret)
diff --git a/import-layers/yocto-poky/scripts/oe-trim-schemas b/import-layers/yocto-poky/scripts/oe-trim-schemas
index 66a1b8d..7c199ef 100755
--- a/import-layers/yocto-poky/scripts/oe-trim-schemas
+++ b/import-layers/yocto-poky/scripts/oe-trim-schemas
@@ -18,6 +18,15 @@
         l = [e for e in l if e.tag == name]
     return l
 
+if len(sys.argv) < 2 or sys.argv[1] in ('-h', '--help'):
+    print('oe-trim-schemas: error: the following arguments are required: schema\n'
+          'Usage: oe-trim-schemas schema\n\n'
+          'OpenEmbedded trim schemas - remove unneeded schema locale translations\n'
+          '                            from gconf schema files\n\n'
+          'arguments:\n'
+          '  schema                gconf schema file to trim\n')
+    sys.exit(2)
+
 xml = etree.parse(sys.argv[1])
 
 for schema in child(xml.getroot(), "schemalist").getchildren():
diff --git a/import-layers/yocto-poky/scripts/oepydevshell-internal.py b/import-layers/yocto-poky/scripts/oepydevshell-internal.py
index a22bec3..04621ae 100755
--- a/import-layers/yocto-poky/scripts/oepydevshell-internal.py
+++ b/import-layers/yocto-poky/scripts/oepydevshell-internal.py
@@ -22,9 +22,16 @@
     old[3] = old[3] &~ termios.ECHO &~ termios.ICANON
     termios.tcsetattr(fd, termios.TCSADRAIN, old)
 
-if len(sys.argv) != 3:
-    print("Incorrect parameters")
-    sys.exit(1)
+if len(sys.argv) != 3 or sys.argv[1] in ('-h', '--help'):
+    print('oepydevshell-internal.py: error: the following arguments are required: pty, pid\n'
+          'Usage: oepydevshell-internal.py pty pid\n\n'
+          'OpenEmbedded oepydevshell-internal.py - internal script called from meta/classes/devshell.bbclass\n\n'
+          'arguments:\n'
+          '  pty                   pty device name\n'
+          '  pid                   parent process id\n\n'
+          'options:\n'
+          '  -h, --help            show this help message and exit\n')
+    sys.exit(2)
 
 pty = open(sys.argv[1], "w+b", 0)
 parent = int(sys.argv[2])
@@ -38,7 +45,7 @@
 try:
     readline.read_history_file(histfile)
 except IOError:
-    pass 
+    pass
 
 try:
 
diff --git a/import-layers/yocto-poky/scripts/postinst-intercepts/update_gio_module_cache b/import-layers/yocto-poky/scripts/postinst-intercepts/update_gio_module_cache
index fe46809..fc3f9d0 100644
--- a/import-layers/yocto-poky/scripts/postinst-intercepts/update_gio_module_cache
+++ b/import-layers/yocto-poky/scripts/postinst-intercepts/update_gio_module_cache
@@ -3,5 +3,7 @@
 set -e
 
 PSEUDO_UNLOAD=1 qemuwrapper -L $D -E LD_LIBRARY_PATH=$D${libdir}:$D${base_libdir} \
-        $D${libexecdir}/${binprefix}gio-querymodules $D${libdir}/gio/modules/
+	$D${libexecdir}/${binprefix}gio-querymodules $D${libdir}/gio/modules/
 
+[ ! -e $D${libdir}/gio/modules/giomodule.cache ] ||
+	chown root:root $D${libdir}/gio/modules/giomodule.cache
diff --git a/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/draw.py b/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/draw.py
index 8c574be..201ce45 100644
--- a/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/draw.py
+++ b/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/draw.py
@@ -133,6 +133,16 @@
 # Package Write RPM/DEB/IPK task color
 TASK_COLOR_PACKAGE_WRITE = (0.0, 0.50, 0.50, 1.0)
 
+# Distinct colors used for different disk volumnes.
+# If we have more volumns, colors get re-used.
+VOLUME_COLORS = [
+	(1.0, 1.0, 0.00, 1.0),
+	(0.0, 1.00, 0.00, 1.0),
+	(1.0, 0.00, 1.00, 1.0),
+	(0.0, 0.00, 1.00, 1.0),
+	(0.0, 1.00, 1.00, 1.0),
+]
+
 # Process states
 STATE_UNDEFINED = 0
 STATE_RUNNING   = 1
@@ -256,7 +266,7 @@
 	# avoid divide by zero
 	if max_y == 0:
 		max_y = 1.0
-	xscale = float (chart_bounds[2]) / max_x
+	xscale = float (chart_bounds[2]) / (max_x - x_shift)
 	# If data_range is given, scale the chart so that the value range in
 	# data_range matches the chart bounds exactly.
 	# Otherwise, scale so that the actual data matches the chart bounds.
@@ -321,6 +331,16 @@
 	w = int ((end - start) * sec_w_base * xscale) + 2 * off_x
 	h = proc_h * processes + header_h + 2 * off_y
 
+	if options.charts:
+		if trace.cpu_stats:
+			h += 30 + bar_h
+		if trace.disk_stats:
+			h += 30 + bar_h
+		if trace.monitor_disk:
+			h += 30 + bar_h
+		if trace.mem_stats:
+			h += meminfo_bar_h
+
 	return (w, h)
 
 def clip_visible(clip, rect):
@@ -334,80 +354,134 @@
 	proc_tree = options.proc_tree(trace)
 
 	# render bar legend
-	ctx.set_font_size(LEGEND_FONT_SIZE)
+	if trace.cpu_stats:
+		ctx.set_font_size(LEGEND_FONT_SIZE)
 
-	draw_legend_box(ctx, "CPU (user+sys)", CPU_COLOR, off_x, curr_y+20, leg_s)
-	draw_legend_box(ctx, "I/O (wait)", IO_COLOR, off_x + 120, curr_y+20, leg_s)
+		draw_legend_box(ctx, "CPU (user+sys)", CPU_COLOR, off_x, curr_y+20, leg_s)
+		draw_legend_box(ctx, "I/O (wait)", IO_COLOR, off_x + 120, curr_y+20, leg_s)
 
-	# render I/O wait
-	chart_rect = (off_x, curr_y+30, w, bar_h)
-	if clip_visible (clip, chart_rect):
-		draw_box_ticks (ctx, chart_rect, sec_w)
-		draw_annotations (ctx, proc_tree, trace.times, chart_rect)
-		draw_chart (ctx, IO_COLOR, True, chart_rect, \
-			    [(sample.time, sample.user + sample.sys + sample.io) for sample in trace.cpu_stats], \
-			    proc_tree, None)
-		# render CPU load
-		draw_chart (ctx, CPU_COLOR, True, chart_rect, \
-			    [(sample.time, sample.user + sample.sys) for sample in trace.cpu_stats], \
-			    proc_tree, None)
+		# render I/O wait
+		chart_rect = (off_x, curr_y+30, w, bar_h)
+		if clip_visible (clip, chart_rect):
+			draw_box_ticks (ctx, chart_rect, sec_w)
+			draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+			draw_chart (ctx, IO_COLOR, True, chart_rect, \
+				    [(sample.time, sample.user + sample.sys + sample.io) for sample in trace.cpu_stats], \
+				    proc_tree, None)
+			# render CPU load
+			draw_chart (ctx, CPU_COLOR, True, chart_rect, \
+				    [(sample.time, sample.user + sample.sys) for sample in trace.cpu_stats], \
+				    proc_tree, None)
 
-	curr_y = curr_y + 30 + bar_h
+		curr_y = curr_y + 30 + bar_h
 
 	# render second chart
-	draw_legend_line(ctx, "Disk throughput", DISK_TPUT_COLOR, off_x, curr_y+20, leg_s)
-	draw_legend_box(ctx, "Disk utilization", IO_COLOR, off_x + 120, curr_y+20, leg_s)
+	if trace.disk_stats:
+		draw_legend_line(ctx, "Disk throughput", DISK_TPUT_COLOR, off_x, curr_y+20, leg_s)
+		draw_legend_box(ctx, "Disk utilization", IO_COLOR, off_x + 120, curr_y+20, leg_s)
 
-        # render I/O utilization
-	chart_rect = (off_x, curr_y+30, w, bar_h)
-	if clip_visible (clip, chart_rect):
-		draw_box_ticks (ctx, chart_rect, sec_w)
-		draw_annotations (ctx, proc_tree, trace.times, chart_rect)
-		draw_chart (ctx, IO_COLOR, True, chart_rect, \
-			    [(sample.time, sample.util) for sample in trace.disk_stats], \
-			    proc_tree, None)
+		# render I/O utilization
+		chart_rect = (off_x, curr_y+30, w, bar_h)
+		if clip_visible (clip, chart_rect):
+			draw_box_ticks (ctx, chart_rect, sec_w)
+			draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+			draw_chart (ctx, IO_COLOR, True, chart_rect, \
+				    [(sample.time, sample.util) for sample in trace.disk_stats], \
+				    proc_tree, None)
 
-	# render disk throughput
-	max_sample = max (trace.disk_stats, key = lambda s: s.tput)
-	if clip_visible (clip, chart_rect):
-		draw_chart (ctx, DISK_TPUT_COLOR, False, chart_rect, \
-			    [(sample.time, sample.tput) for sample in trace.disk_stats], \
-			    proc_tree, None)
+		# render disk throughput
+		max_sample = max (trace.disk_stats, key = lambda s: s.tput)
+		if clip_visible (clip, chart_rect):
+			draw_chart (ctx, DISK_TPUT_COLOR, False, chart_rect, \
+				    [(sample.time, sample.tput) for sample in trace.disk_stats], \
+				    proc_tree, None)
 
-	pos_x = off_x + ((max_sample.time - proc_tree.start_time) * w / proc_tree.duration)
+		pos_x = off_x + ((max_sample.time - proc_tree.start_time) * w / proc_tree.duration)
 
-	shift_x, shift_y = -20, 20
-	if (pos_x < off_x + 245):
-		shift_x, shift_y = 5, 40
+		shift_x, shift_y = -20, 20
+		if (pos_x < off_x + 245):
+			shift_x, shift_y = 5, 40
 
-	label = "%dMB/s" % round ((max_sample.tput) / 1024.0)
-	draw_text (ctx, label, DISK_TPUT_COLOR, pos_x + shift_x, curr_y + shift_y)
+		label = "%dMB/s" % round ((max_sample.tput) / 1024.0)
+		draw_text (ctx, label, DISK_TPUT_COLOR, pos_x + shift_x, curr_y + shift_y)
 
-	curr_y = curr_y + 30 + bar_h
+		curr_y = curr_y + 30 + bar_h
+
+	# render disk space usage
+	#
+	# Draws the amount of disk space used on each volume relative to the
+	# lowest recorded amount. The graphs for each volume are stacked above
+	# each other so that total disk usage is visible.
+	if trace.monitor_disk:
+		ctx.set_font_size(LEGEND_FONT_SIZE)
+		# Determine set of volumes for which we have
+		# information and the minimal amount of used disk
+		# space for each. Currently samples are allowed to
+		# not have a values for all volumes; drawing could be
+		# made more efficient if that wasn't the case.
+		volumes = set()
+		min_used = {}
+		for sample in trace.monitor_disk:
+			for volume, used in sample.records.items():
+				volumes.add(volume)
+				if volume not in min_used or min_used[volume] > used:
+					min_used[volume] = used
+		volumes = sorted(list(volumes))
+		disk_scale = 0
+		for i, volume in enumerate(volumes):
+			volume_scale = max([sample.records[volume] - min_used[volume]
+			                    for sample in trace.monitor_disk
+			                    if volume in sample.records])
+			# Does not take length of volume name into account, but fixed offset
+			# works okay in practice.
+			draw_legend_box(ctx, '%s (max: %u MiB)' % (volume, volume_scale / 1024 / 1024),
+			                VOLUME_COLORS[i % len(VOLUME_COLORS)],
+			                off_x + i * 250, curr_y+20, leg_s)
+			disk_scale += volume_scale
+
+		# render used amount of disk space
+		chart_rect = (off_x, curr_y+30, w, bar_h)
+		if clip_visible (clip, chart_rect):
+			draw_box_ticks (ctx, chart_rect, sec_w)
+			draw_annotations (ctx, proc_tree, trace.times, chart_rect)
+			for i in range(len(volumes), 0, -1):
+				draw_chart (ctx, VOLUME_COLORS[(i - 1) % len(VOLUME_COLORS)], True, chart_rect, \
+				            [(sample.time,
+				              # Sum up used space of all volumes including the current one
+				              # so that the graphs appear as stacked on top of each other.
+				              reduce(lambda x,y: x+y,
+				                     [sample.records[volume] - min_used[volume]
+				                      for volume in volumes[0:i]
+				                      if volume in sample.records],
+				                     0))
+				             for sample in trace.monitor_disk], \
+				            proc_tree, [0, disk_scale])
+
+		curr_y = curr_y + 30 + bar_h
 
 	# render mem usage
 	chart_rect = (off_x, curr_y+30, w, meminfo_bar_h)
 	mem_stats = trace.mem_stats
 	if mem_stats and clip_visible (clip, chart_rect):
-		mem_scale = max(sample.records['MemTotal'] - sample.records['MemFree'] for sample in mem_stats)
+		mem_scale = max(sample.buffers for sample in mem_stats)
 		draw_legend_box(ctx, "Mem cached (scale: %u MiB)" % (float(mem_scale) / 1024), MEM_CACHED_COLOR, off_x, curr_y+20, leg_s)
 		draw_legend_box(ctx, "Used", MEM_USED_COLOR, off_x + 240, curr_y+20, leg_s)
 		draw_legend_box(ctx, "Buffers", MEM_BUFFERS_COLOR, off_x + 360, curr_y+20, leg_s)
-		draw_legend_line(ctx, "Swap (scale: %u MiB)" % max([(sample.records['SwapTotal'] - sample.records['SwapFree'])/1024 for sample in mem_stats]), \
+		draw_legend_line(ctx, "Swap (scale: %u MiB)" % max([(sample.swap)/1024 for sample in mem_stats]), \
 				 MEM_SWAP_COLOR, off_x + 480, curr_y+20, leg_s)
 		draw_box_ticks(ctx, chart_rect, sec_w)
 		draw_annotations(ctx, proc_tree, trace.times, chart_rect)
 		draw_chart(ctx, MEM_BUFFERS_COLOR, True, chart_rect, \
-			   [(sample.time, sample.records['MemTotal'] - sample.records['MemFree']) for sample in trace.mem_stats], \
+			   [(sample.time, sample.buffers) for sample in trace.mem_stats], \
 			   proc_tree, [0, mem_scale])
 		draw_chart(ctx, MEM_USED_COLOR, True, chart_rect, \
-			   [(sample.time, sample.records['MemTotal'] - sample.records['MemFree'] - sample.records['Buffers']) for sample in mem_stats], \
+			   [(sample.time, sample.used) for sample in mem_stats], \
 			   proc_tree, [0, mem_scale])
 		draw_chart(ctx, MEM_CACHED_COLOR, True, chart_rect, \
-			   [(sample.time, sample.records['Cached']) for sample in mem_stats], \
+			   [(sample.time, sample.cached) for sample in mem_stats], \
 			   proc_tree, [0, mem_scale])
 		draw_chart(ctx, MEM_SWAP_COLOR, False, chart_rect, \
-			   [(sample.time, float(sample.records['SwapTotal'] - sample.records['SwapFree'])) for sample in mem_stats], \
+			   [(sample.time, float(sample.swap)) for sample in mem_stats], \
 			   proc_tree, None)
 
 		curr_y = curr_y + meminfo_bar_h
@@ -415,7 +489,7 @@
 	return curr_y
 
 def render_processes_chart(ctx, options, trace, curr_y, w, h, sec_w):
-        chart_rect = [off_x, curr_y+header_h, w, h - 2 * off_y - (curr_y+header_h) + proc_h]
+        chart_rect = [off_x, curr_y+header_h, w, h - 2 * off_y - header_h - leg_s + proc_h]
 
 	draw_legend_box (ctx, "Configure", \
 			 TASK_COLOR_CONFIGURE, off_x  , curr_y + 45, leg_s)
@@ -496,6 +570,9 @@
 	w -= 2*off_x
 	curr_y = off_y;
 
+	if options.charts:
+		curr_y = render_charts (ctx, options, clip, trace, curr_y, w, h, sec_w)
+
 	curr_y = render_processes_chart (ctx, options, trace, curr_y, w, h, sec_w)
 
 	return
@@ -513,9 +590,6 @@
 	else:
 		curr_y = off_y;
 
-	if options.charts:
-		curr_y = render_charts (ctx, options, clip, trace, curr_y, w, h, sec_w)
-
 	# draw process boxes
 	proc_height = h
 	if proc_tree.taskstats and options.cumulative:
diff --git a/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/parsing.py b/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/parsing.py
index a3a0b0b..bcfb2da 100644
--- a/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/parsing.py
+++ b/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/parsing.py
@@ -38,16 +38,18 @@
         self.min = None
         self.max = None
         self.headers = None
-        self.disk_stats = None
+        self.disk_stats =  []
         self.ps_stats = None
         self.taskstats = None
-        self.cpu_stats = None
+        self.cpu_stats = []
         self.cmdline = None
         self.kernel = None
         self.kernel_tree = None
         self.filename = None
         self.parent_map = None
-        self.mem_stats = None
+        self.mem_stats = []
+        self.monitor_disk = None
+        self.times = [] # Always empty, but expected by draw.py when drawing system charts.
 
         if len(paths):
             parse_paths (writer, self, paths)
@@ -58,6 +60,19 @@
                 self.min = min(self.start.keys())
                 self.max = max(self.end.keys())
 
+
+        # Rendering system charts depends on start and end
+        # time. Provide them where the original drawing code expects
+        # them, i.e. in proc_tree.
+        class BitbakeProcessTree:
+            def __init__(self, start_time, end_time):
+                self.start_time = start_time
+                self.end_time = end_time
+                self.duration = self.end_time - self.start_time
+        self.proc_tree = BitbakeProcessTree(min(self.start.keys()),
+                                            max(self.end.keys()))
+
+
         return
 
         # Turn that parsed information into something more useful
@@ -427,7 +442,13 @@
         # skip the rest of statistics lines
     return samples
 
-def _parse_proc_disk_stat_log(file, numCpu):
+def _parse_reduced_log(file, sample_class):
+    samples = []
+    for time, lines in _parse_timed_blocks(file):
+        samples.append(sample_class(time, *[float(x) for x in lines[0].split()]))
+    return samples
+
+def _parse_proc_disk_stat_log(file):
     """
     Parse file for disk stats, but only look at the whole device, eg. sda,
     not sda1, sda2 etc. The format of relevant lines should be:
@@ -462,12 +483,31 @@
         sums = [ a - b for a, b in zip(sample1.diskdata, sample2.diskdata) ]
         readTput = sums[0] / 2.0 * 100.0 / interval
         writeTput = sums[1] / 2.0 * 100.0 / interval
-        util = float( sums[2] ) / 10 / interval / numCpu
+        util = float( sums[2] ) / 10 / interval
         util = max(0.0, min(1.0, util))
         disk_stats.append(DiskSample(sample2.time, readTput, writeTput, util))
 
     return disk_stats
 
+def _parse_reduced_proc_meminfo_log(file):
+    """
+    Parse file for global memory statistics with
+    'MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree' values
+    (in that order) directly stored on one line.
+    """
+    used_values = ('MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree',)
+
+    mem_stats = []
+    for time, lines in _parse_timed_blocks(file):
+        sample = MemSample(time)
+        for name, value in zip(used_values, lines[0].split()):
+            sample.add_value(name, int(value))
+
+        if sample.valid():
+            mem_stats.append(DrawMemSample(sample))
+
+    return mem_stats
+
 def _parse_proc_meminfo_log(file):
     """
     Parse file for global memory statistics.
@@ -484,14 +524,37 @@
         for line in lines:
             match = meminfo_re.match(line)
             if not match:
-                raise ParseError("Invalid meminfo line \"%s\"" % match.groups(0))
+                raise ParseError("Invalid meminfo line \"%s\"" % line)
             sample.add_value(match.group(1), int(match.group(2)))
 
         if sample.valid():
-            mem_stats.append(sample)
+            mem_stats.append(DrawMemSample(sample))
 
     return mem_stats
 
+def _parse_monitor_disk_log(file):
+    """
+    Parse file with information about amount of diskspace used.
+    The format of relevant lines should be: ^volume path: number-of-bytes?
+    """
+    disk_stats = []
+    diskinfo_re = re.compile(r'^(.+):\s*(\d+)$')
+
+    for time, lines in _parse_timed_blocks(file):
+        sample = DiskSpaceSample(time)
+
+        for line in lines:
+            match = diskinfo_re.match(line)
+            if not match:
+                raise ParseError("Invalid monitor_disk line \"%s\"" % line)
+            sample.add_value(match.group(1), int(match.group(2)))
+
+        if sample.valid():
+            disk_stats.append(sample)
+
+    return disk_stats
+
+
 # if we boot the kernel with: initcall_debug printk.time=1 we can
 # get all manner of interesting data from the dmesg output
 # We turn this into a pseudo-process tree: each event is
@@ -628,6 +691,20 @@
             cmdLines[pid] = values
     return cmdLines
 
+def _parse_bitbake_buildstats(writer, state, filename, file):
+    paths = filename.split("/")
+    task = paths[-1]
+    pn = paths[-2]
+    start = None
+    end = None
+    for line in file:
+        if line.startswith("Started:"):
+            start = int(float(line.split()[-1]))
+        elif line.startswith("Ended:"):
+            end = int(float(line.split()[-1]))
+    if start and end:
+        state.add_process(pn + ":" + task, start, end)
+
 def get_num_cpus(headers):
     """Get the number of CPUs from the system.cpu header property. As the
     CPU utilization graphs are relative, the number of CPUs currently makes
@@ -647,18 +724,25 @@
 def _do_parse(writer, state, filename, file):
     writer.info("parsing '%s'" % filename)
     t1 = clock()
-    paths = filename.split("/")
-    task = paths[-1]
-    pn = paths[-2]
-    start = None
-    end = None
-    for line in file:
-        if line.startswith("Started:"):
-            start = int(float(line.split()[-1]))
-        elif line.startswith("Ended:"):
-            end = int(float(line.split()[-1]))
-    if start and end:
-        state.add_process(pn + ":" + task, start, end)
+    name = os.path.basename(filename)
+    if name == "proc_diskstats.log":
+        state.disk_stats = _parse_proc_disk_stat_log(file)
+    elif name == "reduced_proc_diskstats.log":
+        state.disk_stats = _parse_reduced_log(file, DiskSample)
+    elif name == "proc_stat.log":
+        state.cpu_stats = _parse_proc_stat_log(file)
+    elif name == "reduced_proc_stat.log":
+        state.cpu_stats = _parse_reduced_log(file, CPUSample)
+    elif name == "proc_meminfo.log":
+        state.mem_stats = _parse_proc_meminfo_log(file)
+    elif name == "reduced_proc_meminfo.log":
+        state.mem_stats = _parse_reduced_proc_meminfo_log(file)
+    elif name == "cmdline2.log":
+        state.cmdline = _parse_cmdline_log(writer, file)
+    elif name == "monitor_disk.log":
+        state.monitor_disk = _parse_monitor_disk_log(file)
+    elif not filename.endswith('.log'):
+        _parse_bitbake_buildstats(writer, state, filename, file)
     t2 = clock()
     writer.info("  %s seconds" % str(t2-t1))
     return state
diff --git a/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/samples.py b/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/samples.py
index 015d743..9fc309b 100644
--- a/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/samples.py
+++ b/import-layers/yocto-poky/scripts/pybootchartgui/pybootchartgui/samples.py
@@ -53,6 +53,33 @@
         # discard incomplete samples
         return [v for v in MemSample.used_values if v not in keys] == []
 
+class DrawMemSample:
+    """
+    Condensed version of a MemSample with exactly the values used by the drawing code.
+    Initialized either from a valid MemSample or
+    a tuple/list of buffer/used/cached/swap values.
+    """
+    def __init__(self, mem_sample):
+        self.time = mem_sample.time
+        if isinstance(mem_sample, MemSample):
+            self.buffers = mem_sample.records['MemTotal'] - mem_sample.records['MemFree']
+            self.used = mem_sample.records['MemTotal'] - mem_sample.records['MemFree'] - mem_sample.records['Buffers']
+            self.cached = mem_sample.records['Cached']
+            self.swap = mem_sample.records['SwapTotal'] - mem_sample.records['SwapFree']
+        else:
+            self.buffers, self.used, self.cached, self.swap = mem_sample
+
+class DiskSpaceSample:
+    def __init__(self, time):
+        self.time = time
+        self.records = {}
+
+    def add_value(self, name, value):
+        self.records[name] = value
+
+    def valid(self):
+        return bool(self.records)
+
 class ProcessSample:
     def __init__(self, time, state, cpu_sample):
         self.time = time
diff --git a/import-layers/yocto-poky/scripts/recipetool b/import-layers/yocto-poky/scripts/recipetool
index 1052cd2..3765ec7 100755
--- a/import-layers/yocto-poky/scripts/recipetool
+++ b/import-layers/yocto-poky/scripts/recipetool
@@ -73,13 +73,14 @@
         logger.error("Unable to find bitbake by searching parent directory of this script or PATH")
         sys.exit(1)
     logger.debug('Found bitbake path: %s' % bitbakepath)
+    scriptpath.add_oe_lib_path()
 
     scriptutils.logger_setup_color(logger, global_args.color)
 
     tinfoil = tinfoil_init(False)
     try:
-        for path in ([scripts_path] +
-                    tinfoil.config_data.getVar('BBPATH', True).split(':')):
+        for path in (tinfoil.config_data.getVar('BBPATH').split(':')
+                     + [scripts_path]):
             pluginpath = os.path.join(path, 'lib', 'recipetool')
             scriptutils.load_plugins(logger, plugins, pluginpath)
 
diff --git a/import-layers/yocto-poky/scripts/relocate_sdk.py b/import-layers/yocto-poky/scripts/relocate_sdk.py
index e47b4d9..c752fa2 100755
--- a/import-layers/yocto-poky/scripts/relocate_sdk.py
+++ b/import-layers/yocto-poky/scripts/relocate_sdk.py
@@ -103,6 +103,8 @@
                fname.startswith(b("/lib32/")) or fname.startswith(b("/usr/lib32/")) or \
                fname.startswith(b("/usr/lib32/")) or fname.startswith(b("/usr/lib64/")):
                 break
+            if p_filesz == 0:
+                break
             if (len(new_dl_path) >= p_filesz):
                 print("ERROR: could not relocate %s, interp size = %i and %i is needed." \
                     % (elf_file_name, p_memsz, len(new_dl_path) + 1))
diff --git a/import-layers/yocto-poky/scripts/rpm2cpio.sh b/import-layers/yocto-poky/scripts/rpm2cpio.sh
index 5df8c0f..cf23472 100755
--- a/import-layers/yocto-poky/scripts/rpm2cpio.sh
+++ b/import-layers/yocto-poky/scripts/rpm2cpio.sh
@@ -1,53 +1,55 @@
-#!/bin/sh
+#!/bin/sh -efu
 
-# This comes from the RPM5 5.4.0 distribution.
+# This file comes from rpm 4.x distribution
 
-pkg=$1
-if [ "$pkg" = "" -o ! -e "$pkg" ]; then
-    echo "no package supplied" 1>&2
-   exit 1
-fi
+fatal() {
+	echo "$*" >&2
+	exit 1
+}
 
-leadsize=96
-o=`expr $leadsize + 8`
-set `od -j $o -N 8 -t u1 $pkg`
-il=`expr 256 \* \( 256 \* \( 256 \* $2 + $3 \) + $4 \) + $5`
-dl=`expr 256 \* \( 256 \* \( 256 \* $6 + $7 \) + $8 \) + $9`
-# echo "sig il: $il dl: $dl"
+pkg="$1"
+[ -n "$pkg" -a -e "$pkg" ] ||
+	fatal "No package supplied"
 
-sigsize=`expr 8 + 16 \* $il + $dl`
-o=`expr $o + $sigsize + \( 8 - \( $sigsize \% 8 \) \) \% 8 + 8`
-set `od -j $o -N 8 -t u1 $pkg`
-il=`expr 256 \* \( 256 \* \( 256 \* $2 + $3 \) + $4 \) + $5`
-dl=`expr 256 \* \( 256 \* \( 256 \* $6 + $7 \) + $8 \) + $9`
-# echo "hdr il: $il dl: $dl"
+_dd() {
+	local o="$1"; shift
+	dd if="$pkg" skip="$o" iflag=skip_bytes status=none $*
+}
 
-hdrsize=`expr 8 + 16 \* $il + $dl`
-o=`expr $o + $hdrsize`
-EXTRACTOR="dd if=$pkg ibs=$o skip=1"
+calcsize() {
+	offset=$(($1 + 8))
 
-COMPRESSION=`($EXTRACTOR |file -) 2>/dev/null`
-if echo $COMPRESSION |grep -iq gzip; then
-	DECOMPRESSOR=gunzip
-elif echo $COMPRESSION |grep -iq bzip2; then
-	DECOMPRESSOR=bunzip2
-elif echo $COMPRESSION |grep -iq xz; then
-	DECOMPRESSOR=unxz
-elif echo $COMPRESSION |grep -iq cpio; then
-	DECOMPRESSOR=cat
-else
-	# Most versions of file don't support LZMA, therefore we assume
-	# anything not detected is LZMA
-	DECOMPRESSOR=`which unlzma 2>/dev/null`
-	case "$DECOMPRESSOR" in
-	    /* ) ;;
-	    *  ) DECOMPRESSOR=`which lzmash 2>/dev/null`
-	         case "$DECOMPRESSOR" in
-	             /* ) DECOMPRESSOR="lzmash -d -c" ;;
-	             *  ) DECOMPRESSOR=cat ;;
-	         esac
-	         ;;
-	esac
-fi
+	local i b b0 b1 b2 b3 b4 b5 b6 b7
 
-$EXTRACTOR 2>/dev/null | $DECOMPRESSOR
+	i=0
+	while [ $i -lt 8 ]; do
+		b="$(_dd $(($offset + $i)) bs=1 count=1)"
+		[ -z "$b" ] &&
+			b="0" ||
+			b="$(exec printf '%u\n' "'$b")"
+		eval "b$i=\$b"
+		i=$(($i + 1))
+	done
+
+	rsize=$((8 + ((($b0 << 24) + ($b1 << 16) + ($b2 << 8) + $b3) << 4) + ($b4 << 24) + ($b5 << 16) + ($b6 << 8) + $b7))
+	offset=$(($offset + $rsize))
+}
+
+case "$(_dd 0 bs=8 count=1)" in
+	"$(printf '\355\253\356\333')"*) ;; # '\xed\xab\xee\xdb'
+	*) fatal "File doesn't look like rpm: $pkg" ;;
+esac
+
+calcsize 96
+sigsize=$rsize
+
+calcsize $(($offset + (8 - ($sigsize % 8)) % 8))
+hdrsize=$rsize
+
+case "$(_dd $offset bs=3 count=1)" in
+	"$(printf '\102\132')"*) _dd $offset | bunzip2 ;; # '\x42\x5a'
+	"$(printf '\037\213')"*) _dd $offset | gunzip  ;; # '\x1f\x8b'
+	"$(printf '\375\067')"*) _dd $offset | xzcat   ;; # '\xfd\x37'
+	"$(printf '\135\000')"*) _dd $offset | unlzma  ;; # '\x5d\x00'
+	*) fatal "Unrecognized rpm file: $pkg" ;;
+esac
diff --git a/import-layers/yocto-poky/scripts/runqemu b/import-layers/yocto-poky/scripts/runqemu
index 6748cb2..9b6d330 100755
--- a/import-layers/yocto-poky/scripts/runqemu
+++ b/import-layers/yocto-poky/scripts/runqemu
@@ -74,17 +74,19 @@
     kvm-vhost - enable KVM with vhost when running x86/x86_64 (VT-capable CPU required)
     publicvnc - enable a VNC server open to all hosts
     audio - enable audio
+    [*/]ovmf* - OVMF firmware file or base name for booting with UEFI
   tcpserial=<port> - specify tcp serial port number
   biosdir=<dir> - specify custom bios dir
   biosfilename=<filename> - specify bios filename
   qemuparams=<xyz> - specify custom parameters to QEMU
   bootparams=<xyz> - specify custom kernel parameters during boot
-  help: print this text
+  help, -h, --help: print this text
 
 Examples:
+  runqemu
   runqemu qemuarm
   runqemu tmp/deploy/images/qemuarm
-  runqemu tmp/deploy/images/qemux86/.qemuboot.conf
+  runqemu tmp/deploy/images/qemux86/<qemuboot.conf>
   runqemu qemux86-64 core-image-sato ext4
   runqemu qemux86-64 wic-image-minimal wic
   runqemu path/to/bzImage-qemux86.bin path/to/nfsrootdir/ serial
@@ -96,7 +98,7 @@
 """)
 
 def check_tun():
-    """Check /dev/net/run"""
+    """Check /dev/net/tun"""
     dev_tun = '/dev/net/tun'
     if not os.path.exists(dev_tun):
         raise Exception("TUN control device %s is unavailable; you may need to enable TUN (e.g. sudo modprobe tun)" % dev_tun)
@@ -147,21 +149,46 @@
                     return f
     return ''
 
+def check_free_port(host, port):
+    """ Check whether the port is free or not """
+    import socket
+    from contextlib import closing
+
+    with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
+        if sock.connect_ex((host, port)) == 0:
+            # Port is open, so not free
+            return False
+        else:
+            # Port is not open, so free
+            return True
+
 class BaseConfig(object):
     def __init__(self):
-        # Vars can be merged with .qemuboot.conf, use a dict to manage them.
-        self.d = {
-            'MACHINE': '',
-            'DEPLOY_DIR_IMAGE': '',
-            'QB_KERNEL_ROOT': '/dev/vda',
-        }
+        # The self.d saved vars from self.set(), part of them are from qemuboot.conf
+        self.d = {'QB_KERNEL_ROOT': '/dev/vda'}
+
+        # Supported env vars, add it here if a var can be got from env,
+        # and don't use os.getenv in the code.
+        self.env_vars = ('MACHINE',
+                        'ROOTFS',
+                        'KERNEL',
+                        'DEPLOY_DIR_IMAGE',
+                        'OE_TMPDIR',
+                        'OECORE_NATIVE_SYSROOT',
+                        )
 
         self.qemu_opt = ''
         self.qemu_opt_script = ''
-        self.nfs_dir = ''
         self.clean_nfs_dir = False
         self.nfs_server = ''
         self.rootfs = ''
+        # File name(s) of a OVMF firmware file or variable store,
+        # to be added with -drive if=pflash.
+        # Found in the same places as the rootfs, with or without one of
+        # these suffices: qcow2, bin.
+        # Setting one also adds "-vga std" because that is all that
+        # OVMF supports.
+        self.ovmf_bios = []
         self.qemuboot = ''
         self.qbconfload = False
         self.kernel = ''
@@ -187,6 +214,15 @@
         self.snapshot = False
         self.fstypes = ('ext2', 'ext3', 'ext4', 'jffs2', 'nfs', 'btrfs', 'cpio.gz', 'cpio', 'ramfs')
         self.vmtypes = ('hddimg', 'hdddirect', 'wic', 'vmdk', 'qcow2', 'vdi', 'iso')
+        self.network_device = "-device e1000,netdev=net0,mac=@MAC@"
+        # Use different mac section for tap and slirp to avoid
+        # conflicts, e.g., when one is running with tap, the other is
+        # running with slirp.
+        # The last section is dynamic, which is for avoiding conflicts,
+        # when multiple qemus are running, e.g., when multiple tap or
+        # slirp qemus are running.
+        self.mac_tap = "52:54:00:12:34:"
+        self.mac_slirp = "52:54:00:12:35:"
 
     def acquire_lock(self):
         logger.info("Acquiring lockfile %s..." % self.lock)
@@ -208,6 +244,8 @@
     def get(self, key):
         if key in self.d:
             return self.d.get(key)
+        elif os.getenv(key):
+            return os.getenv(key)
         else:
             return ''
 
@@ -219,7 +257,7 @@
             if not re.search('.qemuboot.conf$', '\n'.join(os.listdir(p)), re.M):
                 logger.info("Can't find required *.qemuboot.conf in %s" % p)
                 return False
-            if not re.search('-image-', '\n'.join(os.listdir(p))):
+            if not any(map(lambda name: '-image-' in name, os.listdir(p))):
                 logger.info("Can't find *-image-* in %s" % p)
                 return False
             return True
@@ -246,12 +284,11 @@
 
     def check_arg_nfs(self, p):
         if os.path.isdir(p):
-            self.nfs_dir = p
+            self.rootfs = p
         else:
             m = re.match('(.*):(.*)', p)
             self.nfs_server = m.group(1)
-            self.nfs_dir = m.group(2)
-        self.rootfs = ""
+            self.rootfs = m.group(2)
         self.check_arg_fstype('nfs')
 
     def check_arg_path(self, p):
@@ -260,6 +297,7 @@
         - Check whether is a kernel file
         - Check whether is a image file
         - Check whether it is a nfs dir
+        - Check whether it is a OVMF flash file
         """
         if p.endswith('.qemuboot.conf'):
             self.qemuboot = p
@@ -268,37 +306,52 @@
              re.search('zImage', p) or re.search('vmlinux', p) or \
              re.search('fitImage', p) or re.search('uImage', p):
             self.kernel =  p
-        elif os.path.exists(p) and (not os.path.isdir(p)) and re.search('-image-', os.path.basename(p)):
+        elif os.path.exists(p) and (not os.path.isdir(p)) and '-image-' in os.path.basename(p):
             self.rootfs = p
-            dirpath = os.path.dirname(p)
-            m = re.search('(.*)\.(.*)$', p)
-            if m:
-                qb = '%s%s' % (re.sub('\.rootfs$', '', m.group(1)), '.qemuboot.conf')
+            # Check filename against self.fstypes can hanlde <file>.cpio.gz,
+            # otherwise, its type would be "gz", which is incorrect.
+            fst = ""
+            for t in self.fstypes:
+                if p.endswith(t):
+                    fst = t
+                    break
+            if not fst:
+                m = re.search('.*\.(.*)$', self.rootfs)
+                if m:
+                    fst =  m.group(1)
+            if fst:
+                self.check_arg_fstype(fst)
+                qb = re.sub('\.' + fst + "$", '', self.rootfs)
+                qb = '%s%s' % (re.sub('\.rootfs$', '', qb), '.qemuboot.conf')
                 if os.path.exists(qb):
                     self.qemuboot = qb
                     self.qbconfload = True
                 else:
                     logger.warn("%s doesn't exist" % qb)
-                fst = m.group(2)
-                self.check_arg_fstype(fst)
             else:
                 raise Exception("Can't find FSTYPE from: %s" % p)
-        elif os.path.isdir(p) or re.search(':', arg) and re.search('/', arg):
+
+        elif os.path.isdir(p) or re.search(':', p) and re.search('/', p):
             if self.is_deploy_dir_image(p):
                 logger.info('DEPLOY_DIR_IMAGE: %s' % p)
                 self.set("DEPLOY_DIR_IMAGE", p)
             else:
                 logger.info("Assuming %s is an nfs rootfs" % p)
                 self.check_arg_nfs(p)
+        elif os.path.basename(p).startswith('ovmf'):
+            self.ovmf_bios.append(p)
         else:
             raise Exception("Unknown path arg %s" % p)
 
     def check_arg_machine(self, arg):
         """Check whether it is a machine"""
-        if self.get('MACHINE') and self.get('MACHINE') != arg or re.search('/', arg):
-            raise Exception("Unknown arg: %s" % arg)
-        elif self.get('MACHINE') == arg:
+        if self.get('MACHINE') == arg:
             return
+        elif self.get('MACHINE') and self.get('MACHINE') != arg:
+            raise Exception("Maybe conflicted MACHINE: %s vs %s" % (self.get('MACHINE'), arg))
+        elif re.search('/', arg):
+            raise Exception("Unknown arg: %s" % arg)
+
         logger.info('Assuming MACHINE = %s' % arg)
 
         # if we're running under testimage, or similarly as a child
@@ -307,14 +360,14 @@
         # FIXME: testimage.bbclass exports these two variables into env,
         # are there other scenarios in which we need to support being
         # invoked by bitbake?
-        deploy = os.environ.get('DEPLOY_DIR_IMAGE')
-        bbchild = deploy and os.environ.get('OE_TMPDIR')
+        deploy = self.get('DEPLOY_DIR_IMAGE')
+        bbchild = deploy and self.get('OE_TMPDIR')
         if bbchild:
             self.set_machine_deploy_dir(arg, deploy)
             return
         # also check whether we're running under a sourced toolchain
         # environment file
-        if os.environ.get('OECORE_NATIVE_SYSROOT'):
+        if self.get('OECORE_NATIVE_SYSROOT'):
             self.set("MACHINE", arg)
             return
 
@@ -372,11 +425,13 @@
                 self.bootparams = arg[len('bootparams='):]
             elif os.path.exists(arg) or (re.search(':', arg) and re.search('/', arg)):
                 self.check_arg_path(os.path.abspath(arg))
-            elif re.search('-image-', arg):
+            elif re.search(r'-image-|-image$', arg):
                 # Lazy rootfs
                 self.rootfs = arg
+            elif arg.startswith('ovmf'):
+                self.ovmf_bios.append(arg)
             else:
-                # At last, assume is it the MACHINE
+                # At last, assume it is the MACHINE
                 if (not unknown_arg) or unknown_arg == arg:
                     unknown_arg = arg
                 else:
@@ -385,19 +440,20 @@
         if unknown_arg:
             if self.get('MACHINE') == unknown_arg:
                 return
-            if not self.get('DEPLOY_DIR_IMAGE'):
-                # Trying to get DEPLOY_DIR_IMAGE from env.
-                p = os.getenv('DEPLOY_DIR_IMAGE')
-                if p and self.is_deploy_dir_image(p):
-                    machine = os.path.basename(p)
-                    if unknown_arg == machine:
-                        self.set_machine_deploy_dir(machine, p)
-                        return
-                    else:
-                        logger.info('DEPLOY_DIR_IMAGE: %s' % p)
-                        self.set("DEPLOY_DIR_IMAGE", p)
+            if self.get('DEPLOY_DIR_IMAGE'):
+                machine = os.path.basename(self.get('DEPLOY_DIR_IMAGE'))
+                if unknown_arg == machine:
+                    self.set("MACHINE", machine)
+                    return
+
             self.check_arg_machine(unknown_arg)
 
+        if not self.get('DEPLOY_DIR_IMAGE'):
+            self.load_bitbake_env()
+            s = re.search('^DEPLOY_DIR_IMAGE="(.*)"', self.bitbake_e, re.M)
+            if s:
+                self.set("DEPLOY_DIR_IMAGE", s.group(1))
+
     def check_kvm(self):
         """Check kvm and kvm-host"""
         if not (self.kvm_enabled or self.vhost_enabled):
@@ -426,6 +482,11 @@
 
         if os.access(dev_kvm, os.W_OK|os.R_OK):
             self.qemu_opt_script += ' -enable-kvm'
+            if self.get('MACHINE') == "qemux86":
+                # Workaround for broken APIC window on pre 4.15 host kernels which causes boot hangs
+                # See YOCTO #12301
+                # On 64 bit we use x2apic
+                self.kernel_cmdline_script += " clocksource=kvm-clock hpet=disable noapic nolapic"
         else:
             logger.error("You have no read or write permission on /dev/kvm.")
             logger.error("Please change the ownership of this file as described at:")
@@ -454,6 +515,15 @@
     def check_rootfs(self):
         """Check and set rootfs"""
 
+        if self.fstype == "none":
+            return
+
+        if self.get('ROOTFS'):
+            if not self.rootfs:
+                self.rootfs = self.get('ROOTFS')
+            elif self.get('ROOTFS') != self.rootfs:
+                raise Exception("Maybe conflicted ROOTFS: %s vs %s" % (self.get('ROOTFS'), self.rootfs))
+
         if self.fstype == 'nfs':
             return
 
@@ -473,15 +543,36 @@
         if not os.path.exists(self.rootfs):
             raise Exception("Can't find rootfs: %s" % self.rootfs)
 
+    def check_ovmf(self):
+        """Check and set full path for OVMF firmware and variable file(s)."""
+
+        for index, ovmf in enumerate(self.ovmf_bios):
+            if os.path.exists(ovmf):
+                continue
+            for suffix in ('qcow2', 'bin'):
+                path = '%s/%s.%s' % (self.get('DEPLOY_DIR_IMAGE'), ovmf, suffix)
+                if os.path.exists(path):
+                    self.ovmf_bios[index] = path
+                    break
+            else:
+                raise Exception("Can't find OVMF firmware: %s" % ovmf)
+
     def check_kernel(self):
         """Check and set kernel, dtb"""
         # The vm image doesn't need a kernel
         if self.fstype in self.vmtypes:
             return
 
+        # QB_DEFAULT_KERNEL is always a full file path
+        kernel_name = os.path.basename(self.get('QB_DEFAULT_KERNEL'))
+
+        # The user didn't want a kernel to be loaded
+        if kernel_name == "none":
+            return
+
         deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
         if not self.kernel:
-            kernel_match_name = "%s/%s" % (deploy_dir_image, self.get('QB_DEFAULT_KERNEL'))
+            kernel_match_name = "%s/%s" % (deploy_dir_image, kernel_name)
             kernel_match_link = "%s/%s" % (deploy_dir_image, self.get('KERNEL_IMAGETYPE'))
             kernel_startswith = "%s/%s*" % (deploy_dir_image, self.get('KERNEL_IMAGETYPE'))
             cmds = (kernel_match_name, kernel_match_link, kernel_startswith)
@@ -543,7 +634,8 @@
     def check_and_set(self):
         """Check configs sanity and set when needed"""
         self.validate_paths()
-        check_tun()
+        if not self.slirp_enabled:
+            check_tun()
         # Check audio
         if self.audio_enabled:
             if not self.get('QB_AUDIO_DRV'):
@@ -559,6 +651,7 @@
         self.check_kvm()
         self.check_fstype()
         self.check_rootfs()
+        self.check_ovmf()
         self.check_kernel()
         self.check_biosdir()
         self.check_mem()
@@ -568,8 +661,6 @@
         if not self.qemuboot:
             if self.get('DEPLOY_DIR_IMAGE'):
                 deploy_dir_image = self.get('DEPLOY_DIR_IMAGE')
-            elif os.getenv('DEPLOY_DIR_IMAGE'):
-                deploy_dir_image = os.getenv('DEPLOY_DIR_IMAGE')
             else:
                 logger.info("Can't find qemuboot conf file, DEPLOY_DIR_IMAGE is NULL!")
                 return
@@ -586,7 +677,15 @@
                 logger.info('Running %s...' % cmd)
                 qbs = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
                 if qbs:
-                    self.qemuboot = qbs.split()[0]
+                    for qb in qbs.split():
+                        # Don't use initramfs when other choices unless fstype is ramfs
+                        if '-initramfs-' in os.path.basename(qb) and self.fstype != 'cpio.gz':
+                                continue
+                        self.qemuboot = qb
+                        break
+                    if not self.qemuboot:
+                        # Use the first one when no choice
+                        self.qemuboot = qbs.split()[0]
                     self.qbconfload = True
 
         if not self.qemuboot:
@@ -595,7 +694,7 @@
             return
 
         if not os.path.exists(self.qemuboot):
-            raise Exception("Failed to find <image>.qemuboot.conf!")
+            raise Exception("Failed to find %s (wrong image name or BSP does not support running under qemu?)." % self.qemuboot)
 
         logger.info('CONFFILE: %s' % self.qemuboot)
 
@@ -611,8 +710,8 @@
         # artefacts are relative to that file, rather than in whatever
         # directory DEPLOY_DIR_IMAGE in the conf file points to.
         if self.qbconfload:
-            imgdir = os.path.dirname(self.qemuboot)
-            if imgdir != self.get('DEPLOY_DIR_IMAGE'):
+            imgdir = os.path.realpath(os.path.dirname(self.qemuboot))
+            if imgdir != os.path.realpath(self.get('DEPLOY_DIR_IMAGE')):
                 logger.info('Setting DEPLOY_DIR_IMAGE to folder containing %s (%s)' % (self.qemuboot, imgdir))
                 self.set('DEPLOY_DIR_IMAGE', imgdir)
 
@@ -627,7 +726,7 @@
                 self.load_bitbake_env()
 
             if self.bitbake_e:
-                native_vars = ['STAGING_DIR_NATIVE', 'STAGING_BINDIR_NATIVE']
+                native_vars = ['STAGING_DIR_NATIVE']
                 for nv in native_vars:
                     s = re.search('^%s="(.*)"' % nv, self.bitbake_e, re.M)
                     if s and s.group(1) != self.get(nv):
@@ -638,8 +737,8 @@
                 # be able to call `bitbake -e`, then try:
                 # - get OE_TMPDIR from environment and guess paths based on it
                 # - get OECORE_NATIVE_SYSROOT from environment (for sdk)
-                tmpdir = os.environ.get('OE_TMPDIR', None)
-                oecore_native_sysroot = os.environ.get('OECORE_NATIVE_SYSROOT', None)
+                tmpdir = self.get('OE_TMPDIR')
+                oecore_native_sysroot = self.get('OECORE_NATIVE_SYSROOT')
                 if tmpdir:
                     logger.info('Setting STAGING_DIR_NATIVE and STAGING_BINDIR_NATIVE relative to OE_TMPDIR (%s)' % tmpdir)
                     hostos, _, _, _, machine = os.uname()
@@ -664,9 +763,11 @@
         print('MACHINE: [%s]' % self.get('MACHINE'))
         print('FSTYPE: [%s]' % self.fstype)
         if self.fstype  == 'nfs':
-            print('NFS_DIR: [%s]' % self.nfs_dir)
+            print('NFS_DIR: [%s]' % self.rootfs)
         else:
             print('ROOTFS: [%s]' % self.rootfs)
+        if self.ovmf_bios:
+            print('OVMF: %s' % self.ovmf_bios)
         print('CONFFILE: [%s]' % self.qemuboot)
         print('')
 
@@ -707,13 +808,13 @@
 
         self.unfs_opts="nfsvers=3,port=%s,mountprog=%s,nfsprog=%s,udp,mountport=%s" % (nfsd_port, mountd_rpcport, nfsd_rpcport, mountd_port)
 
-        # Extract .tar.bz2 or .tar.bz if no self.nfs_dir
-        if not self.nfs_dir:
+        # Extract .tar.bz2 or .tar.bz if no nfs dir
+        if not (self.rootfs and os.path.isdir(self.rootfs)):
             src_prefix = '%s/%s' % (self.get('DEPLOY_DIR_IMAGE'), self.get('IMAGE_LINK_NAME'))
             dest = "%s-nfsroot" % src_prefix
             if os.path.exists('%s.pseudo_state' % dest):
                 logger.info('Use %s as NFS_DIR' % dest)
-                self.nfs_dir = dest
+                self.rootfs = dest
             else:
                 src = ""
                 src1 = '%s.tar.bz2' % src_prefix
@@ -730,24 +831,49 @@
                 if subprocess.call(cmd, shell=True) != 0:
                     raise Exception('Failed to run %s' % cmd)
                 self.clean_nfs_dir = True
-                self.nfs_dir = dest
+                self.rootfs = dest
 
         # Start the userspace NFS server
-        cmd = 'runqemu-export-rootfs start %s' % self.nfs_dir
+        cmd = 'runqemu-export-rootfs start %s' % self.rootfs
         logger.info('Running %s...' % cmd)
         if subprocess.call(cmd, shell=True) != 0:
             raise Exception('Failed to run %s' % cmd)
 
         self.nfs_running = True
 
-
     def setup_slirp(self):
         """Setup user networking"""
 
         if self.fstype == 'nfs':
             self.setup_nfs()
         self.kernel_cmdline_script += ' ip=dhcp'
-        self.set('NETWORK_CMD', self.get('QB_SLIRP_OPT'))
+        # Port mapping
+        hostfwd = ",hostfwd=tcp::2222-:22,hostfwd=tcp::2323-:23"
+        qb_slirp_opt_default = "-netdev user,id=net0%s" % hostfwd
+        qb_slirp_opt = self.get('QB_SLIRP_OPT') or qb_slirp_opt_default
+        # Figure out the port
+        ports = re.findall('hostfwd=[^-]*:([0-9]+)-[^,-]*', qb_slirp_opt)
+        ports = [int(i) for i in ports]
+        mac = 2
+        # Find a free port to avoid conflicts
+        for p in ports[:]:
+            p_new = p
+            while not check_free_port('localhost', p_new):
+                p_new += 1
+                mac += 1
+                while p_new in ports:
+                        p_new += 1
+                        mac += 1
+            if p != p_new:
+                ports.append(p_new)
+                qb_slirp_opt = re.sub(':%s-' % p, ':%s-' % p_new, qb_slirp_opt)
+                logger.info("Port forward changed: %s -> %s" % (p, p_new))
+        mac = "%s%02x" % (self.mac_slirp, mac)
+        self.set('NETWORK_CMD', '%s %s' % (self.network_device.replace('@MAC@', mac), qb_slirp_opt))
+        # Print out port foward
+        hostfwd = re.findall('(hostfwd=[^,]*)', qb_slirp_opt)
+        if hostfwd:
+            logger.info('Port forward: %s' % ' '.join(hostfwd))
 
     def setup_tap(self):
         """Setup tap"""
@@ -799,7 +925,7 @@
             gid = os.getgid()
             uid = os.getuid()
             logger.info("Setting up tap interface under sudo")
-            cmd = 'sudo %s %s %s %s' % (self.qemuifup, uid, gid, self.get('STAGING_DIR_NATIVE'))
+            cmd = 'sudo %s %s %s %s' % (self.qemuifup, uid, gid, self.bindir_native)
             tap = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8').rstrip('\n')
             lockfile = os.path.join(lockdir, tap)
             self.lock = lockfile + '.lock'
@@ -816,27 +942,35 @@
         client = gateway + 1
         if self.fstype == 'nfs':
             self.setup_nfs()
-        self.kernel_cmdline_script += " ip=192.168.7.%s::192.168.7.%s:255.255.255.0" % (client, gateway)
-        mac = "52:54:00:12:34:%02x" % client
+        netconf = "192.168.7.%s::192.168.7.%s:255.255.255.0" % (client, gateway)
+        logger.info("Network configuration: %s", netconf)
+        self.kernel_cmdline_script += " ip=%s" % netconf
+        mac = "%s%02x" % (self.mac_tap, client)
         qb_tap_opt = self.get('QB_TAP_OPT')
         if qb_tap_opt:
-            qemu_tap_opt = qb_tap_opt.replace('@TAP@', tap).replace('@MAC@', mac)
+            qemu_tap_opt = qb_tap_opt.replace('@TAP@', tap)
         else:
-            qemu_tap_opt = "-device virtio-net-pci,netdev=net0,mac=%s -netdev tap,id=net0,ifname=%s,script=no,downscript=no" % (mac, self.tap)
+            qemu_tap_opt = "-netdev tap,id=net0,ifname=%s,script=no,downscript=no" % (self.tap)
 
         if self.vhost_enabled:
             qemu_tap_opt += ',vhost=on'
 
-        self.set('NETWORK_CMD', qemu_tap_opt)
+        self.set('NETWORK_CMD', '%s %s' % (self.network_device.replace('@MAC@', mac), qemu_tap_opt))
 
     def setup_network(self):
+        if self.get('QB_NET') == 'none':
+            return
         cmd = "stty -g"
         self.saved_stty = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')
+        self.network_device = self.get('QB_NETWORK_DEVICE') or self.network_device
         if self.slirp_enabled:
             self.setup_slirp()
         else:
             self.setup_tap()
 
+    def setup_rootfs(self):
+        if self.get('QB_ROOTFS') == 'none':
+            return
         rootfs_format = self.fstype if self.fstype in ('vmdk', 'qcow2', 'vdi') else 'raw'
 
         qb_rootfs_opt = self.get('QB_ROOTFS_OPT')
@@ -849,31 +983,40 @@
             self.kernel_cmdline = 'root=/dev/ram0 rw debugshell'
             self.rootfs_options = '-initrd %s' % self.rootfs
         else:
+            vm_drive = ''
             if self.fstype in self.vmtypes:
                 if self.fstype == 'iso':
                     vm_drive = '-cdrom %s' % self.rootfs
-                else:
-                    cmd1 = "grep -q 'root=/dev/sd' %s" % self.rootfs
-                    cmd2 = "grep -q 'root=/dev/hd' %s" % self.rootfs
-                    if subprocess.call(cmd1, shell=True) == 0:
+                elif self.get('QB_DRIVE_TYPE'):
+                    drive_type = self.get('QB_DRIVE_TYPE')
+                    if drive_type.startswith("/dev/sd"):
                         logger.info('Using scsi drive')
                         vm_drive = '-drive if=none,id=hd,file=%s,format=%s -device virtio-scsi-pci,id=scsi -device scsi-hd,drive=hd' \
                                        % (self.rootfs, rootfs_format)
-                    elif subprocess.call(cmd2, shell=True) == 0:
+                    elif drive_type.startswith("/dev/hd"):
                         logger.info('Using ide drive')
                         vm_drive = "%s,format=%s" % (self.rootfs, rootfs_format)
                     else:
-                        logger.warn("Can't detect drive type %s" % self.rootfs)
-                        logger.warn('Trying to use virtio block drive')
+                        # virtio might have been selected explicitly (just use it), or
+                        # is used as fallback (then warn about that).
+                        if not drive_type.startswith("/dev/vd"):
+                            logger.warn("Unknown QB_DRIVE_TYPE: %s" % drive_type)
+                            logger.warn("Failed to figure out drive type, consider define or fix QB_DRIVE_TYPE")
+                            logger.warn('Trying to use virtio block drive')
                         vm_drive = '-drive if=virtio,file=%s,format=%s' % (self.rootfs, rootfs_format)
+
+                # All branches above set vm_drive.
                 self.rootfs_options = '%s -no-reboot' % vm_drive
             self.kernel_cmdline = 'root=%s rw highres=off' % (self.get('QB_KERNEL_ROOT'))
 
         if self.fstype == 'nfs':
             self.rootfs_options = ''
-            k_root = '/dev/nfs nfsroot=%s:%s,%s' % (self.nfs_server, self.nfs_dir, self.unfs_opts)
+            k_root = '/dev/nfs nfsroot=%s:%s,%s' % (self.nfs_server, self.rootfs, self.unfs_opts)
             self.kernel_cmdline = 'root=%s rw highres=off' % k_root
 
+        if self.fstype == 'none':
+            self.rootfs_options = ''
+
         self.set('ROOTFS_OPTIONS', self.rootfs_options)
 
     def guess_qb_system(self):
@@ -921,13 +1064,38 @@
         if not qemu_system:
             raise Exception("Failed to boot, QB_SYSTEM_NAME is NULL!")
 
-        qemu_bin = '%s/%s' % (self.get('STAGING_BINDIR_NATIVE'), qemu_system)
+        qemu_bin = '%s/%s' % (self.bindir_native, qemu_system)
+
+        # It is possible to have qemu-native in ASSUME_PROVIDED, and it won't
+        # find QEMU in sysroot, it needs to use host's qemu.
+        if not os.path.exists(qemu_bin):
+            logger.info("QEMU binary not found in %s, trying host's QEMU" % qemu_bin)
+            for path in (os.environ['PATH'] or '').split(':'):
+                qemu_bin_tmp = os.path.join(path, qemu_system)
+                logger.info("Trying: %s" % qemu_bin_tmp)
+                if os.path.exists(qemu_bin_tmp):
+                    qemu_bin = qemu_bin_tmp
+                    if not os.path.isabs(qemu_bin):
+                        qemu_bin = os.path.abspath(qemu_bin)
+                    logger.info("Using host's QEMU: %s" % qemu_bin)
+                    break
+
         if not os.access(qemu_bin, os.X_OK):
             raise OEPathError("No QEMU binary '%s' could be found" % qemu_bin)
 
         check_libgl(qemu_bin)
 
-        self.qemu_opt = "%s %s %s %s %s" % (qemu_bin, self.get('NETWORK_CMD'), self.qemu_opt_script, self.get('ROOTFS_OPTIONS'), self.get('QB_OPT_APPEND'))
+        self.qemu_opt = "%s %s %s %s" % (qemu_bin, self.get('NETWORK_CMD'), self.get('ROOTFS_OPTIONS'), self.get('QB_OPT_APPEND'))
+
+        for ovmf in self.ovmf_bios:
+            format = ovmf.rsplit('.', 1)[-1]
+            self.qemu_opt += ' -drive if=pflash,format=%s,file=%s' % (format, ovmf)
+        if self.ovmf_bios:
+            # OVMF only supports normal VGA, i.e. we need to override a -vga vmware
+            # that gets added for example for normal qemux86.
+            self.qemu_opt += ' -vga std'
+
+        self.qemu_opt += ' ' + self.qemu_opt_script
 
         if self.snapshot:
             self.qemu_opt += " -snapshot"
@@ -953,6 +1121,17 @@
             elif serial_num == 1:
                 self.qemu_opt += " %s" % self.get("QB_SERIAL_OPT")
 
+        # We always wants ttyS0 and ttyS1 in qemu machines (see SERIAL_CONSOLES),
+        # if not serial or serialtcp options was specified only ttyS0 is created
+        # and sysvinit shows an error trying to enable ttyS1:
+        #     INIT: Id "S1" respawning too fast: disabled for 5 minutes
+        serial_num = len(re.findall("-serial", self.qemu_opt))
+        if serial_num == 0:
+            if re.search("-nographic", self.qemu_opt):
+                self.qemu_opt += " -serial mon:stdio -serial null"
+            else:
+                self.qemu_opt += " -serial mon:vc -serial null"
+
     def start_qemu(self):
         if self.kernel:
             kernel_opts = "-kernel %s -append '%s %s %s %s'" % (self.kernel, self.kernel_cmdline,
@@ -969,7 +1148,7 @@
 
     def cleanup(self):
         if self.cleantap:
-            cmd = 'sudo %s %s %s' % (self.qemuifdown, self.tap, self.get('STAGING_DIR_NATIVE'))
+            cmd = 'sudo %s %s %s' % (self.qemuifdown, self.tap, self.bindir_native)
             logger.info('Running %s' % cmd)
             subprocess.call(cmd, shell=True)
         if self.lock_descriptor:
@@ -978,7 +1157,7 @@
 
         if self.nfs_running:
             logger.info("Shutting down the userspace NFS server...")
-            cmd = "runqemu-export-rootfs stop %s" % self.nfs_dir
+            cmd = "runqemu-export-rootfs stop %s" % self.rootfs
             logger.info('Running %s' % cmd)
             subprocess.call(cmd, shell=True)
 
@@ -987,9 +1166,9 @@
             subprocess.call(cmd, shell=True)
 
         if self.clean_nfs_dir:
-            logger.info('Removing %s' % self.nfs_dir)
-            shutil.rmtree(self.nfs_dir)
-            shutil.rmtree('%s.pseudo_state' % self.nfs_dir)
+            logger.info('Removing %s' % self.rootfs)
+            shutil.rmtree(self.rootfs)
+            shutil.rmtree('%s.pseudo_state' % self.rootfs)
 
     def load_bitbake_env(self, mach=None):
         if self.bitbake_e:
@@ -1014,8 +1193,30 @@
             self.bitbake_e = ''
             logger.warn("Couldn't run 'bitbake -e' to gather environment information:\n%s" % err.output.decode('utf-8'))
 
+    @property
+    def bindir_native(self):
+        result = self.get('STAGING_BINDIR_NATIVE')
+        if result and os.path.exists(result):
+            return result
+
+        cmd = 'bitbake qemu-helper-native -e'
+        logger.info('Running %s...' % cmd)
+        out = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
+        out = out.stdout.read().decode('utf-8')
+
+        match = re.search('^STAGING_BINDIR_NATIVE="(.*)"', out, re.M)
+        if match:
+            result = match.group(1)
+            if os.path.exists(result):
+                self.set('STAGING_BINDIR_NATIVE', result)
+                return result
+            raise Exception("Native sysroot directory %s doesn't exist" % result)
+        else:
+            raise Exception("Can't find STAGING_BINDIR_NATIVE in '%s' output" % cmd)
+
+
 def main():
-    if len(sys.argv) == 1 or "help" in sys.argv:
+    if "help" in sys.argv or '-h' in sys.argv or '--help' in sys.argv:
         print_usage()
         return 0
     config = BaseConfig()
@@ -1030,6 +1231,7 @@
     config.print_config()
     try:
         config.setup_network()
+        config.setup_rootfs()
         config.setup_final()
         config.start_qemu()
     finally:
diff --git a/import-layers/yocto-poky/scripts/runqemu-export-rootfs b/import-layers/yocto-poky/scripts/runqemu-export-rootfs
index 7ebc071..c7992d8 100755
--- a/import-layers/yocto-poky/scripts/runqemu-export-rootfs
+++ b/import-layers/yocto-poky/scripts/runqemu-export-rootfs
@@ -44,7 +44,7 @@
 	echo "Did you forget to source your build environment setup script?"
 	exit 1
 fi
-. $SYSROOT_SETUP_SCRIPT
+. $SYSROOT_SETUP_SCRIPT meta-ide-support
 
 if [ ! -e "$OECORE_NATIVE_SYSROOT/usr/bin/unfsd" ]; then
 	echo "Error: Unable to find unfsd binary in $OECORE_NATIVE_SYSROOT/usr/bin/"
diff --git a/import-layers/yocto-poky/scripts/runqemu-extract-sdk b/import-layers/yocto-poky/scripts/runqemu-extract-sdk
index 32ddd48..2a0dd50 100755
--- a/import-layers/yocto-poky/scripts/runqemu-extract-sdk
+++ b/import-layers/yocto-poky/scripts/runqemu-extract-sdk
@@ -35,7 +35,7 @@
     echo "Did you forget to source your build system environment setup script?"
     exit 1
 fi
-. $SYSROOT_SETUP_SCRIPT
+. $SYSROOT_SETUP_SCRIPT meta-ide-support
 PSEUDO_OPTS="-P $OECORE_NATIVE_SYSROOT/usr"
 
 ROOTFS_TARBALL=$1
diff --git a/import-layers/yocto-poky/scripts/runqemu-gen-tapdevs b/import-layers/yocto-poky/scripts/runqemu-gen-tapdevs
index bfb60f4..11de318 100755
--- a/import-layers/yocto-poky/scripts/runqemu-gen-tapdevs
+++ b/import-layers/yocto-poky/scripts/runqemu-gen-tapdevs
@@ -23,11 +23,13 @@
 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 
 usage() {
-	echo "Usage: sudo $0 <uid> <gid> <num> <native-sysroot-basedir>"
+	echo "Usage: sudo $0 <uid> <gid> <num> <staging_bindir_native>"
         echo "Where <uid> is the numeric user id the tap devices will be owned by"
 	echo "Where <gid> is the numeric group id the tap devices will be owned by"
 	echo "<num> is the number of tap devices to create (0 to remove all)"
 	echo "<native-sysroot-basedir> is the path to the build system's native sysroot"
+	echo "e.g. $ bitbake qemu-helper-native"
+	echo "$ sudo $0 1000 1000 4 tmp/sysroots-components/x86_64/qemu-helper-native/usr/bin"
 	exit 1
 }
 
@@ -44,9 +46,9 @@
 TUID=$1
 GID=$2
 COUNT=$3
-SYSROOT=$4
+STAGING_BINDIR_NATIVE=$4
 
-TUNCTL=$SYSROOT/usr/bin/tunctl
+TUNCTL=$STAGING_BINDIR_NATIVE/tunctl
 if [[ ! -x "$TUNCTL" || -d "$TUNCTL" ]]; then
 	echo "Error: $TUNCTL is not an executable"
 	usage
@@ -85,7 +87,7 @@
 	echo "Creating $COUNT tap devices for UID: $TUID GID: $GID..."
 	for ((index=0; index < $COUNT; index++)); do
 		echo "Creating tap$index"
-		ifup=`$RUNQEMU_IFUP $TUID $GID $SYSROOT 2>&1`
+		ifup=`$RUNQEMU_IFUP $TUID $GID $STAGING_BINDIR_NATIVE 2>&1`
 		if [ $? -ne 0 ]; then
 			echo "Error running tunctl: $ifup"
 			exit 1
diff --git a/import-layers/yocto-poky/scripts/runqemu-ifdown b/import-layers/yocto-poky/scripts/runqemu-ifdown
index 8f66cfa..ffbc9de 100755
--- a/import-layers/yocto-poky/scripts/runqemu-ifdown
+++ b/import-layers/yocto-poky/scripts/runqemu-ifdown
@@ -41,11 +41,11 @@
 fi
 
 TAP=$1
-NATIVE_SYSROOT_DIR=$2
+STAGING_BINDIR_NATIVE=$2
 
-TUNCTL=$NATIVE_SYSROOT_DIR/usr/bin/tunctl
+TUNCTL=$STAGING_BINDIR_NATIVE/tunctl
 if [ ! -e "$TUNCTL" ]; then
-	echo "Error: Unable to find tunctl binary in '$NATIVE_SYSROOT_DIR/usr/bin', please bitbake qemu-helper-native"
+	echo "Error: Unable to find tunctl binary in '$STAGING_BINDIR_NATIVE', please bitbake qemu-helper-native"
 	exit 1
 fi
 
diff --git a/import-layers/yocto-poky/scripts/runqemu-ifup b/import-layers/yocto-poky/scripts/runqemu-ifup
index d9bd894..59a15ea 100755
--- a/import-layers/yocto-poky/scripts/runqemu-ifup
+++ b/import-layers/yocto-poky/scripts/runqemu-ifup
@@ -49,11 +49,11 @@
 
 USERID="-u $1"
 GROUP="-g $2"
-NATIVE_SYSROOT_DIR=$3
+STAGING_BINDIR_NATIVE=$3
 
-TUNCTL=$NATIVE_SYSROOT_DIR/usr/bin/tunctl
+TUNCTL=$STAGING_BINDIR_NATIVE/tunctl
 if [ ! -x "$TUNCTL" ]; then
-       echo "Error: Unable to find tunctl binary in '$NATIVE_SYSROOT_DIR/usr/bin', please bitbake qemu-helper-native"
+       echo "Error: Unable to find tunctl binary in '$STAGING_BINDIR_NATIVE', please bitbake qemu-helper-native"
 	exit 1
 fi
 
diff --git a/import-layers/yocto-poky/scripts/sysroot-relativelinks.py b/import-layers/yocto-poky/scripts/sysroot-relativelinks.py
index e44eba2..ffe2547 100755
--- a/import-layers/yocto-poky/scripts/sysroot-relativelinks.py
+++ b/import-layers/yocto-poky/scripts/sysroot-relativelinks.py
@@ -24,7 +24,7 @@
     os.symlink(os.path.relpath(topdir+link, subdir), filep)
 
 for subdir, dirs, files in os.walk(topdir):
-    for f in files:
+    for f in dirs + files:
         filep = os.path.join(subdir, f)
         if os.path.islink(filep):
             #print("Considering %s" % filep)
diff --git a/import-layers/yocto-poky/scripts/task-time b/import-layers/yocto-poky/scripts/task-time
new file mode 100755
index 0000000..e58040a
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/task-time
@@ -0,0 +1,132 @@
+#!/usr/bin/env python3
+
+import argparse
+import os
+import re
+import sys
+
+arg_parser = argparse.ArgumentParser(
+    description="""
+Reports time consumed for one or more task in a format similar to the standard
+Bash 'time' builtin. Optionally sorts tasks by real (wall-clock), user (user
+space CPU), or sys (kernel CPU) time.
+""")
+
+arg_parser.add_argument(
+    "paths",
+    metavar="path",
+    nargs="+",
+    help="""
+A path containing task buildstats. If the path is a directory, e.g.
+build/tmp/buildstats, then all task found (recursively) in it will be
+processed. If the path is a single task buildstat, e.g.
+build/tmp/buildstats/20161018083535/foo-1.0-r0/do_compile, then just that
+buildstat will be processed. Multiple paths can be specified to process all of
+them. Files whose names do not start with "do_" are ignored.
+""")
+
+arg_parser.add_argument(
+    "--sort",
+    choices=("none", "real", "user", "sys"),
+    default="none",
+    help="""
+The measurement to sort the output by. Defaults to 'none', which means to sort
+by the order paths were given on the command line. For other options, tasks are
+sorted in descending order from the highest value.
+""")
+
+args = arg_parser.parse_args()
+
+# Field names and regexes for parsing out their values from buildstat files
+field_regexes = (("elapsed",    ".*Elapsed time: ([0-9.]+)"),
+                 ("user",       "rusage ru_utime: ([0-9.]+)"),
+                 ("sys",        "rusage ru_stime: ([0-9.]+)"),
+                 ("child user", "Child rusage ru_utime: ([0-9.]+)"),
+                 ("child sys",  "Child rusage ru_stime: ([0-9.]+)"))
+
+# A list of (<path>, <dict>) tuples, where <path> is the path of a do_* task
+# buildstat file and <dict> maps fields from the file to their values
+task_infos = []
+
+def save_times_for_task(path):
+    """Saves information for the buildstat file 'path' in 'task_infos'."""
+
+    if not os.path.basename(path).startswith("do_"):
+        return
+
+    with open(path) as f:
+        fields = {}
+
+        for line in f:
+            for name, regex in field_regexes:
+                match = re.match(regex, line)
+                if match:
+                    fields[name] = float(match.group(1))
+                    break
+
+        # Check that all expected fields were present
+        for name, regex in field_regexes:
+            if name not in fields:
+                print("Warning: Skipping '{}' because no field matching '{}' could be found"
+                      .format(path, regex),
+                      file=sys.stderr)
+                return
+
+        task_infos.append((path, fields))
+
+def save_times_for_dir(path):
+    """Runs save_times_for_task() for each file in path and its subdirs, recursively."""
+
+    # Raise an exception for os.walk() errors instead of ignoring them
+    def walk_onerror(e):
+        raise e
+
+    for root, _, files in os.walk(path, onerror=walk_onerror):
+        for fname in files:
+            save_times_for_task(os.path.join(root, fname))
+
+for path in args.paths:
+    if os.path.isfile(path):
+        save_times_for_task(path)
+    else:
+        save_times_for_dir(path)
+
+def elapsed_time(task_info):
+    return task_info[1]["elapsed"]
+
+def tot_user_time(task_info):
+    return task_info[1]["user"] + task_info[1]["child user"]
+
+def tot_sys_time(task_info):
+    return task_info[1]["sys"] + task_info[1]["child sys"]
+
+if args.sort != "none":
+    sort_fn = {"real": elapsed_time, "user": tot_user_time, "sys": tot_sys_time}
+    task_infos.sort(key=sort_fn[args.sort], reverse=True)
+
+first_entry = True
+
+# Catching BrokenPipeError avoids annoying errors when the output is piped into
+# e.g. 'less' or 'head' and not completely read
+try:
+    for task_info in task_infos:
+        real = elapsed_time(task_info)
+        user = tot_user_time(task_info)
+        sys = tot_sys_time(task_info)
+
+        if not first_entry:
+            print()
+        first_entry = False
+
+        # Mimic Bash's 'time' builtin
+        print("{}:\n"
+              "real\t{}m{:.3f}s\n"
+              "user\t{}m{:.3f}s\n"
+              "sys\t{}m{:.3f}s"
+              .format(task_info[0],
+                      int(real//60), real%60,
+                      int(user//60), user%60,
+                      int(sys//60), sys%60))
+
+except BrokenPipeError:
+    pass
diff --git a/import-layers/yocto-poky/scripts/tiny/ksize.py b/import-layers/yocto-poky/scripts/tiny/ksize.py
index b9d2b19..ea1ca7f 100755
--- a/import-layers/yocto-poky/scripts/tiny/ksize.py
+++ b/import-layers/yocto-poky/scripts/tiny/ksize.py
@@ -41,7 +41,7 @@
 class Sizes:
     def __init__(self, glob):
         self.title = glob
-        p = Popen("size -t " + glob, shell=True, stdout=PIPE, stderr=PIPE)
+        p = Popen("size -t " + str(glob), shell=True, stdout=PIPE, stderr=PIPE)
         output = p.communicate()[0].splitlines()
         if len(output) > 2:
             sizes = output[-1].split()[0:4]
@@ -62,18 +62,18 @@
         r = Report(filename, title)
         path = os.path.dirname(filename)
 
-        p = Popen("ls " + path + "/*.o | grep -v built-in.o",
+        p = Popen("ls " + str(path) + "/*.o | grep -v built-in.o",
                   shell=True, stdout=PIPE, stderr=PIPE)
         glob = ' '.join(p.communicate()[0].splitlines())
-        oreport = Report(glob, path + "/*.o")
-        oreport.sizes.title = path + "/*.o"
+        oreport = Report(glob, str(path) + "/*.o")
+        oreport.sizes.title = str(path) + "/*.o"
         r.parts.append(oreport)
 
         if subglob:
             p = Popen("ls " + subglob, shell=True, stdout=PIPE, stderr=PIPE)
             for f in p.communicate()[0].splitlines():
                 path = os.path.dirname(f)
-                r.parts.append(Report.create(f, path, path + "/*/built-in.o"))
+                r.parts.append(Report.create(f, path, str(path) + "/*/built-in.o"))
             r.parts.sort(reverse=True)
 
         for b in r.parts:
@@ -116,6 +116,13 @@
                self.deltas["data"], self.deltas["bss"]))
         print("\n")
 
+    def __lt__(this, that):
+        if that is None:
+            return 1
+        if not isinstance(that, Report):
+            raise TypeError
+        return this.sizes.total < that.sizes.total
+
     def __cmp__(this, that):
         if that is None:
             return 1
diff --git a/import-layers/yocto-poky/scripts/tiny/ksum.py b/import-layers/yocto-poky/scripts/tiny/ksum.py
new file mode 100755
index 0000000..d4f3892
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/tiny/ksum.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+# ex:ts=4:sw=4:sts=4:et
+# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
+#
+# Copyright (c) 2016, Intel Corporation.
+# All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# DESCRIPTION 'ksum.py' generates a combined summary of vmlinux and
+# module sizes for a built kernel, as a quick tool for comparing the
+# overall effects of systemic tinification changes.  Execute from the
+# base directory of the kernel build you want to summarize.  Setting
+# the 'verbose' flag will display the sizes for each file included in
+# the summary.
+#
+# AUTHORS
+# Tom Zanussi <tom.zanussi (at] linux.intel.com>
+#
+
+__version__ = "0.1.0"
+
+# Python Standard Library modules
+import os
+import sys
+import getopt
+from subprocess import *
+
+def usage():
+    prog = os.path.basename(sys.argv[0])
+    print('Usage: %s [OPTION]...' % prog)
+    print('  -v,                 display sizes for each file')
+    print('  -h, --help          display this help and exit')
+    print('')
+    print('Run %s from the top-level Linux kernel build directory.' % prog)
+
+verbose = False
+
+n_ko_files = 0
+ko_file_list = []
+
+ko_text = 0
+ko_data = 0
+ko_bss = 0
+ko_total = 0
+
+vmlinux_file = ""
+vmlinux_level = 0
+
+vmlinux_text = 0
+vmlinux_data = 0
+vmlinux_bss = 0
+vmlinux_total = 0
+
+def is_vmlinux_file(filename):
+    global vmlinux_level
+    if filename == ("vmlinux") and vmlinux_level == 0:
+        vmlinux_level += 1
+        return True
+    return False
+
+def is_ko_file(filename):
+    if filename.endswith(".ko"):
+        return True
+    return False
+
+def collect_object_files():
+    print "Collecting object files recursively from %s..." % os.getcwd()
+    for dirpath, dirs, files in os.walk(os.getcwd()):
+        for filename in files:
+            if is_ko_file(filename):
+                ko_file_list.append(os.path.join(dirpath, filename))
+            elif is_vmlinux_file(filename):
+                global vmlinux_file
+                vmlinux_file = os.path.join(dirpath, filename)
+    print "Collecting object files [DONE]"
+
+def add_ko_file(filename):
+        p = Popen("size -t " + filename, shell=True, stdout=PIPE, stderr=PIPE)
+        output = p.communicate()[0].splitlines()
+        if len(output) > 2:
+            sizes = output[-1].split()[0:4]
+            if verbose:
+                print "     %10d %10d %10d %10d\t" % \
+                    (int(sizes[0]), int(sizes[1]), int(sizes[2]), int(sizes[3])),
+                print "%s" % filename[len(os.getcwd()) + 1:]
+            global n_ko_files, ko_text, ko_data, ko_bss, ko_total
+            ko_text += int(sizes[0])
+            ko_data += int(sizes[1])
+            ko_bss += int(sizes[2])
+            ko_total += int(sizes[3])
+            n_ko_files += 1
+
+def get_vmlinux_totals():
+        p = Popen("size -t " + vmlinux_file, shell=True, stdout=PIPE, stderr=PIPE)
+        output = p.communicate()[0].splitlines()
+        if len(output) > 2:
+            sizes = output[-1].split()[0:4]
+            if verbose:
+                print "     %10d %10d %10d %10d\t" % \
+                    (int(sizes[0]), int(sizes[1]), int(sizes[2]), int(sizes[3])),
+                print "%s" % vmlinux_file[len(os.getcwd()) + 1:]
+            global vmlinux_text, vmlinux_data, vmlinux_bss, vmlinux_total
+            vmlinux_text += int(sizes[0])
+            vmlinux_data += int(sizes[1])
+            vmlinux_bss += int(sizes[2])
+            vmlinux_total += int(sizes[3])
+
+def sum_ko_files():
+    for ko_file in ko_file_list:
+        add_ko_file(ko_file)
+
+def main():
+    try:
+        opts, args = getopt.getopt(sys.argv[1:], "vh", ["help"])
+    except getopt.GetoptError as err:
+        print('%s' % str(err))
+        usage()
+        sys.exit(2)
+
+    for o, a in opts:
+        if o == '-v':
+            global verbose
+            verbose = True
+        elif o in ('-h', '--help'):
+            usage()
+            sys.exit(0)
+        else:
+            assert False, "unhandled option"
+
+    collect_object_files()
+    sum_ko_files()
+    get_vmlinux_totals()
+
+    print "\nTotals:"
+    print "\nvmlinux:"
+    print "    text\tdata\t\tbss\t\ttotal"
+    print "    %-10d\t%-10d\t%-10d\t%-10d" % \
+        (vmlinux_text, vmlinux_data, vmlinux_bss, vmlinux_total)
+    print "\nmodules (%d):" % n_ko_files
+    print "    text\tdata\t\tbss\t\ttotal"
+    print "    %-10d\t%-10d\t%-10d\t%-10d" % \
+        (ko_text, ko_data, ko_bss, ko_total)
+    print "\nvmlinux + modules:"
+    print "    text\tdata\t\tbss\t\ttotal"
+    print "    %-10d\t%-10d\t%-10d\t%-10d" % \
+        (vmlinux_text + ko_text, vmlinux_data + ko_data, \
+         vmlinux_bss + ko_bss, vmlinux_total + ko_total)
+
+if __name__ == "__main__":
+    try:
+        ret = main()
+    except Exception:
+        ret = 1
+        import traceback
+        traceback.print_exc(5)
+    sys.exit(ret)
diff --git a/import-layers/yocto-poky/scripts/verify-bashisms b/import-layers/yocto-poky/scripts/verify-bashisms
index 0741e18..dab64ef 100755
--- a/import-layers/yocto-poky/scripts/verify-bashisms
+++ b/import-layers/yocto-poky/scripts/verify-bashisms
@@ -6,7 +6,7 @@
     # type is supported by dash
     'if type systemctl >/dev/null 2>/dev/null; then',
     'if type systemd-tmpfiles >/dev/null 2>/dev/null; then',
-    'if type update-rc.d >/dev/null 2>/dev/null; then',
+    'type update-rc.d >/dev/null 2>/dev/null; then',
     'command -v',
     # HOSTNAME is set locally
     'buildhistory_single_commit "$CMDLINE" "$HOSTNAME"',
@@ -22,7 +22,10 @@
             return True
     return False
 
-def process(recipe, function, script):
+SCRIPT_LINENO_RE = re.compile(r' line (\d+) ')
+BASHISM_WARNING = re.compile(r'^(possible bashism in.*)$', re.MULTILINE)
+
+def process(filename, function, lineno, script):
     import tempfile
 
     if not script.startswith("#!"):
@@ -40,18 +43,38 @@
         # TODO check exit code is 1
 
         # Replace the temporary filename with the function and split it
-        output = e.output.replace(fn.name, function).splitlines()
-        if len(results) % 2 != 0:
-            print("Unexpected output from checkbashism: %s" % str(output))
-            return
+        output = e.output.replace(fn.name, function)
+        if not output or not output.startswith('possible bashism'):
+            # Probably starts with or contains only warnings. Dump verbatim
+            # with one space indention. Can't do the splitting and whitelist
+            # checking below.
+            return '\n'.join([filename,
+                              ' Unexpected output from checkbashisms.pl'] +
+                             [' ' + x for x in output.splitlines()])
 
-        # Turn the output into a list of (message, source) values
+        # We know that the first line matches and that therefore the first
+        # list entry will be empty - skip it.
+        output = BASHISM_WARNING.split(output)[1:]
+        # Turn the output into a single string like this:
+        # /.../foobar.bb
+        #  possible bashism in updatercd_postrm line 2 (type):
+        #   if ${@use_updatercd(d)} && type update-rc.d >/dev/null 2>/dev/null; then
+        #  ...
+        #   ...
         result = []
         # Check the results against the whitelist
         for message, source in zip(output[0::2], output[1::2]):
             if not is_whitelisted(source):
-                result.append((message, source))
-        return result
+                if lineno is not None:
+                    message = SCRIPT_LINENO_RE.sub(lambda m: ' line %d ' % (int(m.group(1)) + int(lineno) - 1),
+                                                   message)
+                result.append(' ' + message.strip())
+                result.extend(['  %s' % x for x in source.splitlines()])
+        if result:
+            result.insert(0, filename)
+            return '\n'.join(result)
+        else:
+            return None
 
 def get_tinfoil():
     scripts_path = os.path.dirname(os.path.realpath(__file__))
@@ -68,9 +91,19 @@
 if __name__=='__main__':
     import shutil
     if shutil.which("checkbashisms.pl") is None:
-        print("Cannot find checkbashisms.pl on $PATH")
+        print("Cannot find checkbashisms.pl on $PATH, get it from https://anonscm.debian.org/cgit/collab-maint/devscripts.git/plain/scripts/checkbashisms.pl")
         sys.exit(1)
 
+    # The order of defining the worker function,
+    # initializing the pool and connecting to the
+    # bitbake server is crucial, don't change it.
+    def func(item):
+        (filename, key, lineno), script = item
+        return process(filename, key, lineno, script)
+
+    import multiprocessing
+    pool = multiprocessing.Pool()
+
     tinfoil = get_tinfoil()
 
     # This is only the default configuration and should iterate over
@@ -83,34 +116,33 @@
     else:
         initial_pns = sorted(pkg_pn)
 
-    pns = []
-    print("Generating file list...")
+    pns = set()
+    scripts = {}
+    print("Generating scripts...")
     for pn in initial_pns:
         for fn in pkg_pn[pn]:
             # There's no point checking multiple BBCLASSEXTENDed variants of the same recipe
+            # (at least in general - there is some risk that the variants contain different scripts)
             realfn, _, _ = bb.cache.virtualfn2realfn(fn)
             if realfn not in pns:
-                pns.append(realfn)
+                pns.add(realfn)
+                data = tinfoil.parse_recipe_file(realfn)
+                for key in data.keys():
+                    if data.getVarFlag(key, "func") and not data.getVarFlag(key, "python"):
+                        script = data.getVar(key, False)
+                        if script:
+                            filename = data.getVarFlag(key, "filename")
+                            lineno = data.getVarFlag(key, "lineno")
+                            # There's no point in checking a function multiple
+                            # times just because different recipes include it.
+                            # We identify unique scripts by file, name, and (just in case)
+                            # line number.
+                            attributes = (filename or realfn, key, lineno)
+                            scripts.setdefault(attributes, script)
 
 
-    def func(fn):
-        result = []
-        data = tinfoil.parse_recipe_file(fn)
-        for key in data.keys():
-            if data.getVarFlag(key, "func", True) and not data.getVarFlag(key, "python", True):
-                script = data.getVar(key, False)
-                if not script: continue
-                #print ("%s:%s" % (fn, key))
-                r = process(fn, key, script)
-                if r: result.extend(r)
-        return fn, result
-
     print("Scanning scripts...\n")
-    import multiprocessing
-    pool = multiprocessing.Pool()
-    for pn,results in pool.imap(func, pns):
-        if results:
-            print(pn)
-            for message,source in results:
-                print(" %s\n  %s" % (message, source))
-            print()
+    for result in pool.imap(func, scripts.items()):
+        if result:
+            print(result)
+    tinfoil.shutdown()
diff --git a/import-layers/yocto-poky/scripts/wic b/import-layers/yocto-poky/scripts/wic
index fe2c33f..a5f2dbf 100755
--- a/import-layers/yocto-poky/scripts/wic
+++ b/import-layers/yocto-poky/scripts/wic
@@ -41,6 +41,8 @@
 scripts_path = os.path.abspath(os.path.dirname(__file__))
 lib_path = scripts_path + '/lib'
 sys.path.insert(0, lib_path)
+oe_lib_path = os.path.join(os.path.dirname(scripts_path), 'meta', 'lib')
+sys.path.insert(0, oe_lib_path)
 
 bitbake_exe = spawn.find_executable('bitbake')
 if bitbake_exe:
@@ -51,11 +53,28 @@
 else:
     bitbake_main = None
 
-from wic.utils.oe.misc import get_bitbake_var, BB_VARS
-from wic.utils.errors import WicError
+from wic import WicError
+from wic.utils.misc import get_bitbake_var, BB_VARS
 from wic import engine
 from wic import help as hlp
 
+
+def wic_logger():
+    """Create and convfigure wic logger."""
+    logger = logging.getLogger('wic')
+    logger.setLevel(logging.INFO)
+
+    handler = logging.StreamHandler()
+
+    formatter = logging.Formatter('%(levelname)s: %(message)s')
+    handler.setFormatter(formatter)
+
+    logger.addHandler(handler)
+
+    return logger
+
+logger = wic_logger()
+
 def rootfs_dir_to_args(krootfs_dir):
     """
     Get a rootfs_dir dict and serialize to string
@@ -88,7 +107,7 @@
     """
     parser = optparse.OptionParser(usage=usage_str)
 
-    parser.add_option("-o", "--outdir", dest="outdir",
+    parser.add_option("-o", "--outdir", dest="outdir", default='.',
                       help="name of directory to create image in")
     parser.add_option("-e", "--image-name", dest="image_name",
                       help="name of the image to use the artifacts from "
@@ -107,7 +126,7 @@
     parser.add_option("-n", "--native-sysroot", dest="native_sysroot",
                       help="path to the native sysroot containing the tools "
                            "to use to build the image")
-    parser.add_option("-p", "--skip-build-check", dest="build_check",
+    parser.add_option("-s", "--skip-build-check", dest="build_check",
                       action="store_false", default=True, help="skip the build check")
     parser.add_option("-f", "--build-rootfs", action="store_true", help="build rootfs")
     parser.add_option("-c", "--compress-with", choices=("gzip", "bzip2", "xz"),
@@ -123,13 +142,11 @@
     (options, args) = parser.parse_args(args)
 
     if len(args) != 1:
-        logging.error("Wrong number of arguments, exiting\n")
         parser.print_help()
-        sys.exit(1)
+        raise WicError("Wrong number of arguments, exiting")
 
     if options.build_rootfs and not bitbake_main:
-        logging.error("Can't build roofs as bitbake is not in the $PATH")
-        sys.exit(1)
+        raise WicError("Can't build rootfs as bitbake is not in the $PATH")
 
     if not options.image_name:
         missed = []
@@ -140,9 +157,8 @@
             if not val:
                 missed.append(opt)
         if missed:
-            print("The following build artifacts are not specified:")
-            print("  " + ", ".join(missed))
-            sys.exit(1)
+            raise WicError("The following build artifacts are not specified: %s" %
+                           ", ".join(missed))
 
     if options.image_name:
         BB_VARS.default_image = options.image_name
@@ -152,15 +168,11 @@
     if options.vars_dir:
         BB_VARS.vars_dir = options.vars_dir
 
-    if options.build_check:
-        print("Checking basic build environment...")
-        if not engine.verify_build_env():
-            print("Couldn't verify build environment, exiting\n")
-            sys.exit(1)
-        else:
-            print("Done.\n")
+    if options.build_check and not engine.verify_build_env():
+        raise WicError("Couldn't verify build environment, exiting")
 
-    bootimg_dir = ""
+    if options.debug:
+        logger.setLevel(logging.DEBUG)
 
     if options.image_name:
         if options.build_rootfs:
@@ -168,33 +180,40 @@
             if options.debug:
                 argv.append("--debug")
 
-            print("Building rootfs...\n")
+            logger.info("Building rootfs...\n")
             if bitbake_main(BitBakeConfigParameters(argv),
                             cookerdata.CookerConfiguration()):
-                sys.exit(1)
+                raise WicError("bitbake exited with error")
 
         rootfs_dir = get_bitbake_var("IMAGE_ROOTFS", options.image_name)
         kernel_dir = get_bitbake_var("DEPLOY_DIR_IMAGE", options.image_name)
-        native_sysroot = get_bitbake_var("STAGING_DIR_NATIVE",
-                                         options.image_name)
+        bootimg_dir = get_bitbake_var("STAGING_DATADIR", options.image_name)
+        native_sysroot = get_bitbake_var("RECIPE_SYSROOT_NATIVE",
+                                         options.image_name) #, cache=False)
     else:
         if options.build_rootfs:
-            print("Image name is not specified, exiting. (Use -e/--image-name to specify it)\n")
-            sys.exit(1)
+            raise WicError("Image name is not specified, exiting. "
+                           "(Use -e/--image-name to specify it)")
+        native_sysroot = options.native_sysroot
+
+    if not native_sysroot or not os.path.isdir(native_sysroot):
+        logger.info("Building wic-tools...\n")
+        if bitbake_main(BitBakeConfigParameters("bitbake wic-tools".split()),
+                        cookerdata.CookerConfiguration()):
+            raise WicError("bitbake wic-tools failed")
+        native_sysroot = get_bitbake_var("RECIPE_SYSROOT_NATIVE", "wic-tools")
+        if not native_sysroot:
+            raise WicError("Unable to find the location of the native "
+                           "tools sysroot to use")
 
     wks_file = args[0]
 
     if not wks_file.endswith(".wks"):
         wks_file = engine.find_canned_image(scripts_path, wks_file)
         if not wks_file:
-            print("No image named %s found, exiting.  (Use 'wic list images' "\
-                  "to list available images, or specify a fully-qualified OE "\
-                  "kickstart (.wks) filename)\n" % args[0])
-            sys.exit(1)
-
-    image_output_dir = ""
-    if options.outdir:
-        image_output_dir = options.outdir
+            raise WicError("No image named %s found, exiting.  (Use 'wic list images' "
+                           "to list available images, or specify a fully-qualified OE "
+                           "kickstart (.wks) filename)" % args[0])
 
     if not options.image_name:
         rootfs_dir = ''
@@ -204,17 +223,13 @@
         kernel_dir = options.kernel_dir
         native_sysroot = options.native_sysroot
         if rootfs_dir and not os.path.isdir(rootfs_dir):
-            print("--roofs-dir (-r) not found, exiting\n")
-            sys.exit(1)
+            raise WicError("--rootfs-dir (-r) not found, exiting")
         if not os.path.isdir(bootimg_dir):
-            print("--bootimg-dir (-b) not found, exiting\n")
-            sys.exit(1)
+            raise WicError("--bootimg-dir (-b) not found, exiting")
         if not os.path.isdir(kernel_dir):
-            print("--kernel-dir (-k) not found, exiting\n")
-            sys.exit(1)
+            raise WicError("--kernel-dir (-k) not found, exiting")
         if not os.path.isdir(native_sysroot):
-            print("--native-sysroot (-n) not found, exiting\n")
-            sys.exit(1)
+            raise WicError("--native-sysroot (-n) not found, exiting")
     else:
         not_found = not_found_dir = ""
         if not os.path.isdir(rootfs_dir):
@@ -226,13 +241,11 @@
         if not_found:
             if not not_found_dir:
                 not_found_dir = "Completely missing artifact - wrong image (.wks) used?"
-            print("Build artifacts not found, exiting.")
-            print("  (Please check that the build artifacts for the machine")
-            print("   selected in local.conf actually exist and that they")
-            print("   are the correct artifacts for the image (.wks file)).\n")
-            print("The artifact that couldn't be found was %s:\n  %s" % \
-                (not_found, not_found_dir))
-            sys.exit(1)
+            logger.info("Build artifacts not found, exiting.")
+            logger.info("  (Please check that the build artifacts for the machine")
+            logger.info("   selected in local.conf actually exist and that they")
+            logger.info("   are the correct artifacts for the image (.wks file)).\n")
+            raise WicError("The artifact that couldn't be found was %s:\n  %s", not_found, not_found_dir)
 
     krootfs_dir = options.rootfs_dir
     if krootfs_dir is None:
@@ -241,10 +254,9 @@
 
     rootfs_dir = rootfs_dir_to_args(krootfs_dir)
 
-    print("Creating image(s)...\n")
+    logger.info("Creating image(s)...\n")
     engine.wic_create(wks_file, rootfs_dir, bootimg_dir, kernel_dir,
-                      native_sysroot, scripts_path, image_output_dir,
-                      options.compressor, options.bmap, options.debug)
+                      native_sysroot, options)
 
 
 def wic_list_subcommand(args, usage_str):
@@ -256,9 +268,8 @@
     args = parser.parse_args(args)[1]
 
     if not engine.wic_list(args, scripts_path):
-        logging.error("Bad list arguments, exiting\n")
         parser.print_help()
-        sys.exit(1)
+        raise WicError("Bad list arguments, exiting")
 
 
 def wic_help_topic_subcommand(args, usage_str):
@@ -293,10 +304,6 @@
 }
 
 
-def start_logging(loglevel):
-    logging.basicConfig(filename='wic.log', filemode='w', level=loglevel)
-
-
 def main(argv):
     parser = optparse.OptionParser(version="wic version %s" % __version__,
                                    usage=hlp.wic_usage)
@@ -309,7 +316,7 @@
         if args[0] == "help":
             if len(args) == 1:
                 parser.print_help()
-                sys.exit(1)
+                raise WicError("help command requires parameter")
 
     return hlp.invoke_subcommand(args, parser, hlp.wic_help_usage, subcommands)
 
@@ -318,6 +325,6 @@
     try:
         sys.exit(main(sys.argv[1:]))
     except WicError as err:
-        print("ERROR:", err, file=sys.stderr)
+        print()
+        logger.error(err)
         sys.exit(1)
-
diff --git a/import-layers/yocto-poky/scripts/wipe-sysroot b/import-layers/yocto-poky/scripts/wipe-sysroot
deleted file mode 100755
index 5e6b1a4..0000000
--- a/import-layers/yocto-poky/scripts/wipe-sysroot
+++ /dev/null
@@ -1,54 +0,0 @@
-#! /bin/sh
-
-# Wipe out all of the sysroots and all of the stamps that populated it.
-# Author: Ross Burton <ross.burton@intel.com>
-#
-# Copyright (c) 2012 Intel Corporation
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2 as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-# See the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-set -e
-
-if [ $# -gt 0 ]; then
-    echo "Wipe all sysroots and sysroot-related stamps for the current build directory." >&2
-    echo "Usage: $0" >&2
-    exit 1
-fi
-
-ENVS=`mktemp --suffix -wipe-sysroot-envs`
-bitbake -p -e > $ENVS
-
-eval `grep -F SSTATE_MANIFESTS= $ENVS`
-eval `grep -F STAGING_DIR= $ENVS`
-eval `grep -F STAMPS_DIR= $ENVS`
-rm -f $ENVS
-
-if [ -z "$SSTATE_MANIFESTS" -o -z "$STAGING_DIR" -o -z "$STAMPS_DIR" ]; then
-    echo "Could not determine SSTATE_MANIFESTS/STAGING_DIR/STAMPS_DIR from bitbake, check above for errors"
-    exit 1
-fi
-
-echo "Deleting the sysroots in $STAGING_DIR, and selected stamps in $SSTATE_MANIFESTS and $STAMPS_DIR."
-
-# The sysroots themselves
-rm -rf $STAGING_DIR ${STAGING_DIR}-uninative
-
-# The stamps that said the sysroot was populated
-rm -rf $STAMPS_DIR/*/*/*.do_populate_sysroot.*
-rm -rf $STAMPS_DIR/*/*/*.do_populate_sysroot_setscene.*
-rm -rf $STAMPS_DIR/*/*/*.do_packagedata.*
-rm -rf $STAMPS_DIR/*/*/*.do_packagedata_setscene.*
-
-# The sstate manifests
-rm -rf $SSTATE_MANIFESTS/manifest-*.populate_sysroot
diff --git a/import-layers/yocto-poky/scripts/yocto-compat-layer-wrapper b/import-layers/yocto-poky/scripts/yocto-compat-layer-wrapper
new file mode 100755
index 0000000..db4b687
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/yocto-compat-layer-wrapper
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+# Yocto Project compatibility layer tool wrapper
+#
+# Creates a temprary build directory to run Yocto Project Compatible
+# script to avoid a contaminated environment.
+#
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+if [ -z "$BUILDDIR" ]; then
+	echo "Please source oe-init-build-env before run this script."
+	exit 2
+fi
+
+base_dir=$(realpath $BUILDDIR/../)
+cd $base_dir
+
+build_dir=$(mktemp -p $base_dir -d -t build-XXXX)
+
+source oe-init-build-env $build_dir
+yocto-compat-layer.py "$@"
+retcode=$?
+
+rm -rf $build_dir
+
+exit $retcode
diff --git a/import-layers/yocto-poky/scripts/yocto-compat-layer.py b/import-layers/yocto-poky/scripts/yocto-compat-layer.py
new file mode 100755
index 0000000..ba64b4d
--- /dev/null
+++ b/import-layers/yocto-poky/scripts/yocto-compat-layer.py
@@ -0,0 +1,205 @@
+#!/usr/bin/env python3
+
+# Yocto Project compatibility layer tool
+#
+# Copyright (C) 2017 Intel Corporation
+# Released under the MIT license (see COPYING.MIT)
+
+import os
+import sys
+import argparse
+import logging
+import time
+import signal
+import shutil
+import collections
+
+scripts_path = os.path.dirname(os.path.realpath(__file__))
+lib_path = scripts_path + '/lib'
+sys.path = sys.path + [lib_path]
+import scriptutils
+import scriptpath
+scriptpath.add_oe_lib_path()
+scriptpath.add_bitbake_lib_path()
+
+from compatlayer import LayerType, detect_layers, add_layer, add_layer_dependencies, get_signatures
+from oeqa.utils.commands import get_bb_vars
+
+PROGNAME = 'yocto-compat-layer'
+CASES_PATHS = [os.path.join(os.path.abspath(os.path.dirname(__file__)),
+                'lib', 'compatlayer', 'cases')]
+logger = scriptutils.logger_create(PROGNAME, stream=sys.stdout)
+
+def test_layer_compatibility(td, layer, test_software_layer_signatures):
+    from compatlayer.context import CompatLayerTestContext
+    logger.info("Starting to analyze: %s" % layer['name'])
+    logger.info("----------------------------------------------------------------------")
+
+    tc = CompatLayerTestContext(td=td, logger=logger, layer=layer, test_software_layer_signatures=test_software_layer_signatures)
+    tc.loadTests(CASES_PATHS)
+    return tc.runTests()
+
+def main():
+    parser = argparse.ArgumentParser(
+            description="Yocto Project compatibility layer tool",
+            add_help=False)
+    parser.add_argument('layers', metavar='LAYER_DIR', nargs='+',
+            help='Layer to test compatibility with Yocto Project')
+    parser.add_argument('-o', '--output-log',
+            help='File to output log (optional)', action='store')
+    parser.add_argument('--dependency', nargs="+",
+            help='Layers to process for dependencies', action='store')
+    parser.add_argument('--machines', nargs="+",
+            help='List of MACHINEs to be used during testing', action='store')
+    parser.add_argument('--additional-layers', nargs="+",
+            help='List of additional layers to add during testing', action='store')
+    group = parser.add_mutually_exclusive_group()
+    group.add_argument('--with-software-layer-signature-check', action='store_true', dest='test_software_layer_signatures',
+                       default=True,
+                       help='check that software layers do not change signatures (on by default)')
+    group.add_argument('--without-software-layer-signature-check', action='store_false', dest='test_software_layer_signatures',
+                       help='disable signature checking for software layers')
+    parser.add_argument('-n', '--no-auto', help='Disable auto layer discovery',
+            action='store_true')
+    parser.add_argument('-d', '--debug', help='Enable debug output',
+            action='store_true')
+    parser.add_argument('-q', '--quiet', help='Print only errors',
+            action='store_true')
+
+    parser.add_argument('-h', '--help', action='help',
+            default=argparse.SUPPRESS,
+            help='show this help message and exit')
+
+    args = parser.parse_args()
+
+    if args.output_log:
+        fh = logging.FileHandler(args.output_log)
+        fh.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
+        logger.addHandler(fh)
+    if args.debug:
+        logger.setLevel(logging.DEBUG)
+    elif args.quiet:
+        logger.setLevel(logging.ERROR)
+
+    if not 'BUILDDIR' in os.environ:
+        logger.error("You must source the environment before run this script.")
+        logger.error("$ source oe-init-build-env")
+        return 1
+    builddir = os.environ['BUILDDIR']
+    bblayersconf = os.path.join(builddir, 'conf', 'bblayers.conf')
+
+    layers = detect_layers(args.layers, args.no_auto)
+    if not layers:
+        logger.error("Fail to detect layers")
+        return 1
+    if args.additional_layers:
+        additional_layers = detect_layers(args.additional_layers, args.no_auto)
+    else:
+        additional_layers = []
+    if args.dependency:
+        dep_layers = detect_layers(args.dependency, args.no_auto)
+        dep_layers = dep_layers + layers
+    else:
+        dep_layers = layers
+
+    logger.info("Detected layers:")
+    for layer in layers:
+        if layer['type'] == LayerType.ERROR_BSP_DISTRO:
+            logger.error("%s: Can't be DISTRO and BSP type at the same time."\
+                     " The conf/distro and conf/machine folders was found."\
+                     % layer['name'])
+            layers.remove(layer)
+        elif layer['type'] == LayerType.ERROR_NO_LAYER_CONF:
+            logger.error("%s: Don't have conf/layer.conf file."\
+                     % layer['name'])
+            layers.remove(layer)
+        else:
+            logger.info("%s: %s, %s" % (layer['name'], layer['type'],
+                layer['path']))
+    if not layers:
+        return 1
+
+    shutil.copyfile(bblayersconf, bblayersconf + '.backup')
+    def cleanup_bblayers(signum, frame):
+        shutil.copyfile(bblayersconf + '.backup', bblayersconf)
+        os.unlink(bblayersconf + '.backup')
+    signal.signal(signal.SIGTERM, cleanup_bblayers)
+    signal.signal(signal.SIGINT, cleanup_bblayers)
+
+    td = {}
+    results = collections.OrderedDict()
+    results_status = collections.OrderedDict()
+
+    layers_tested = 0
+    for layer in layers:
+        if layer['type'] == LayerType.ERROR_NO_LAYER_CONF or \
+                layer['type'] == LayerType.ERROR_BSP_DISTRO:
+            continue
+
+        logger.info('')
+        logger.info("Setting up for %s(%s), %s" % (layer['name'], layer['type'],
+            layer['path']))
+
+        shutil.copyfile(bblayersconf + '.backup', bblayersconf)
+
+        missing_dependencies = not add_layer_dependencies(bblayersconf, layer, dep_layers, logger)
+        if not missing_dependencies:
+            for additional_layer in additional_layers:
+                if not add_layer_dependencies(bblayersconf, additional_layer, dep_layers, logger):
+                    missing_dependencies = True
+                    break
+        if not add_layer_dependencies(bblayersconf, layer, dep_layers, logger) or \
+           any(map(lambda additional_layer: not add_layer_dependencies(bblayersconf, additional_layer, dep_layers, logger),
+                   additional_layers)):
+            logger.info('Skipping %s due to missing dependencies.' % layer['name'])
+            results[layer['name']] = None
+            results_status[layer['name']] = 'SKIPPED (Missing dependencies)'
+            layers_tested = layers_tested + 1
+            continue
+
+        if any(map(lambda additional_layer: not add_layer(bblayersconf, additional_layer, dep_layers, logger),
+                   additional_layers)):
+            logger.info('Skipping %s due to missing additional layers.' % layer['name'])
+            results[layer['name']] = None
+            results_status[layer['name']] = 'SKIPPED (Missing additional layers)'
+            layers_tested = layers_tested + 1
+            continue
+
+        logger.info('Getting initial bitbake variables ...')
+        td['bbvars'] = get_bb_vars()
+        logger.info('Getting initial signatures ...')
+        td['builddir'] = builddir
+        td['sigs'], td['tunetasks'] = get_signatures(td['builddir'])
+        td['machines'] = args.machines
+
+        if not add_layer(bblayersconf, layer, dep_layers, logger):
+            logger.info('Skipping %s ???.' % layer['name'])
+            results[layer['name']] = None
+            results_status[layer['name']] = 'SKIPPED (Unknown)'
+            layers_tested = layers_tested + 1
+            continue
+
+        result = test_layer_compatibility(td, layer, args.test_software_layer_signatures)
+        results[layer['name']] = result
+        results_status[layer['name']] = 'PASS' if results[layer['name']].wasSuccessful() else 'FAIL'
+        layers_tested = layers_tested + 1
+
+    if layers_tested:
+        logger.info('')
+        logger.info('Summary of results:')
+        logger.info('')
+        for layer_name in results_status:
+            logger.info('%s ... %s' % (layer_name, results_status[layer_name]))
+
+    cleanup_bblayers(None, None)
+
+    return 0
+
+if __name__ == '__main__':
+    try:
+        ret =  main()
+    except Exception:
+        ret = 1
+        import traceback
+        traceback.print_exc()
+    sys.exit(ret)