Squashed 'yocto-poky/' changes from b1f23d1..8358e54

Upgrade subtree to Yocto-2.1.

6c1c013 build-appliance-image: Update to krogoth head revision
5f84d65 syslinux.bbclass: Remove APPEND from variable dependency
d9dd864 bitbake: toaster-tests: tests for build dashboard
1cf8f21 bitbake: toaster: add modal to select custom image for editing
a40a3e6 bitbake: toaster: add build dashboard buttons to edit/create custom images
e65c980 bitbake: toaster-tests: make helper click on input before entering text
484cbf8 bitbake: toaster-tests: add tests for new custom image page
437b728 bitbake: toaster: prevent exception when Project.release is null
cfc22d3 bitbake: toaster: only prevent duplicate custom image names within a project
3036413 bitbake: toaster: disable/enable "Add layer" button according to input's content
040dbf6 bitbake: toaster: fix sorting after hiding a column in build tables
1b11b79 bitbake: toaster: ensure ToasterTable headings are reset when order by changes
9855840 image.bbclass: The wrong name is being used for the debug filesystem
38c7e2d image_types: Ensure rootfs dependencies cover DEBUGFS
0c3eaa7 syslinux.bbclass: The AUTO_SYSLINUXMENU value needs to be boolean
9c8a049 perf: pass DESTDIR in EXTRA_OEMAKE
9de7324 buildtools-tarball: set INHIBIT_DEFAULT_DEPS
ef09105 xf86-video-omapfb: remove EXTRA_OECONF_armv7a
c2f7da2 base.bbclass: Introduce PACKAGECONFIG_CONFARGS variable
e1c6890 git: update to 2.7.4
98bf7de license.bbclass: do write_deploy_manifest in image postprocessing
519600c devtool: sdk-update: fix handling of UNINATIVE_CHECKSUM changes
c7980b6 bitbake: main: fix processing of BBEVENTLOG
ee25d0e toasterconf.json: Update for krogoth release
b8e5de2 toasterconf.json: Remove fido from supported configurations
c59771e toasterconf.json: Update for krogoth release
d0bce0b toasterconf.json: Remove fido from supported configurations
d25eea3 poky-tiny.conf: set PREFERRED_VERSION_linux-yocto-tiny to 4.4
9f970b6 dev-manual, profile-manual, ref-manual: Purging Oprofile stuff
1d93104 ref-manual: Added description for the testsdk.bbclass.
db47094 ref-manual: Updated the remove-libtool.bbclass description.
a16eeca ref-manual: Added gobject-introspection.bbclass description.
3e761b4 ref-manual: Added reference for npm.bbclass.
5e50157 ref-manual: Fixed typo in the nopackages.bbclass description
f7b68c7 ref-manual: Added description for bash-completion.bbclass
ece900a ref-manual: Added nopackages.bbclass description. Fixed stray typo.
9143e9e ref-manual: Added description for the INSTALL_TIMEZONE_FILE variable.
6391dbf ref-manual: Updated the PREFERRED_PROVIDER variable with a note.
6d86f7a ref-manual: Dropped references to the autotools_stage class
4d5ff5e ref-manual, dev-manual: Scrubbed boot-directdisk and bootimg classes
cd2aaaa ref-manual: Updated the uninative.bbclass description.
e975d26 documentation: Converted "meta-yocto" to "meta-poky"
84452ee bsp-guide: Updated yocto-bsp create example output.
e00a62c ref-manual: Added the migration section for 2.1
02db9e6 yocto-project-qs, ref-manual: Upgraded minimum Git requirement
989841f ref-manual: Added rootfs-postcommands class description.
d06b343 ref-manual: Updated the EXTRA_OEMAKE variable description.
ecb2eb6 dev-manual: Updated "Additional Implementation Details" section
004b939 bitbake: lib/bb/utils: add docstring for contains()
524d04c ca-certificates: support Toybox
ecaf12e oetest: make console output more verbose
4946ecf dhcp: CVE-2016-2774
c219c6d buildtools-tarball: fix perl being included when building with ipk
9fe7738 buildtools-tarball.bb: fix unexpected operator
ed07f43 lib/oeqa/selftest/base.py: Correct a reference to meta/lib/oeqa/selftest
8953d83 oe-selftest: Correct the usage examples
dee47ad devtool: sdk-update: reset git metadata on update
396e64d build-appliance-image: Load TUN at startup
55068b1 default-providers.inc: set openssl PREFERRED_PROVIDER to openssl
74ab080 bind: CVE-2016-2088
d488d78 rpm: Disable __sync_add_and_fetch_8 on nios2
9d2d1ae kernel: fitimage: Fix do_deploy taskhash mismatch
4693593 images: zero out the rootfs_extra_space in initramfs images
8beb671 ext-sdk-prepare.py: exclude do_rm_work from unexpected output; create unit test
0262bc5 bitbake: bitbake-user-manual: Updated the 'bitbake -h' output example.
890ccd3 bitbake: bitbake-user-manual: Updated "Conditional Metadata" section
20a0121 bitbake: bitbake-user-manual: Updated discussion about using "inherit"
9f374c4 bitbake: providers: Add PREFERRED_RPROVIDER support
4b8b110 bitbake: providers: We don't depend on previous build results
8e7282c bitbake: cooker/knotty: Prefix parse logs with filename being parsed
1131303 bitbake: cooker: pass exception to finishAsyncCommand
ffa2ca0 fs-perms.txt: fix ROOT_HOME's permission
fd66a38 Revert "fs-perms.txt: fix ROOT_HOME's permission"
9ec9557 buildstats: Fix tracebacks for early task failures
7f9d01e default-providers: Update to use PREFERRED_RPROVIDER
76f4bbc oeqa/selftest/sstatetests: fix no-op sstate test
6326812 buildhistory: don't alter SDK creation stamps
bb40b5e dhcp: Enable update-rc.d service
27e202f meta/classes/qemu.bbclass: set -cpu of ppce5500/ppce6500 to e500mc
7c5823a shadow: Disable syslog for more commands
60a8719 devtool: upgrade: handle recipes where source is not first entry in SRC_URI
8353557 devtool: update-recipe: handle where SRC_URI is appended to with +=
aab3c8d linux-yocto: make aufs4 optional
d75d2be linux-yocto: tiny and pin ctrl config updates
8547cbf linux-yocto/4.4: BXT enablement
ffad386 linux-yocto/4.1: mainline SPI backports
4ba33a3 linux-yocto/4.4: gpio-pca953x: fix the "drive" property cannot read/write
86571db devtool: don't copy .git when building the eSDK
83eac65 package.bbclass: improve permission handling
eeae2ac fs-perms.txt: fix ROOT_HOME's permission
1db3dc8 runqemu: let ramfs equal to cpio.gz
a8c8e81 gcc-common.inc: String format tweak for available tunes
a7c426a pbzip2: fix LIC_FILES_CHKSUM following 1.1.12 -> 1.1.13 upgrade
1229009 pbzip2: don't skip do_configure
1e4ee30 useradd_base.bbclass: remove flock option '-w'
cb45ef3 matchbox-keyboard: Hide desktop launcher
69e20ca npm.bbclass: Stop packagenames containing underscores from being generated
c3c55478 bind: CVE-2016-1285 CVE-2016-1286
c4387a8 image.bbclass: add DEB_{PRE, POST}PROCESS_COMMANDS to rootfs_command_variables list
967bc74 rootfs.py: apply ROOTFS_POSTINSTALL_COMMAND to all package formats
f7352ca wic: fix bug in handling fsoptions
b2f5de5 buildtools-tarball.bb: set TOOLCHAIN_NEED_CONFIGSITE_CACHE to null
a460b04 rpm: more verbose errors in rpmTempFile
a43991d rootfs-postcommands: handle broken links when writing manifest
2c81e17 socat: Use c_ispeed and c_ospeed based upon libc
5c8124d archiver: Improve debug output
e912c46 kbd: remove uclibc-stdarg.patch
965fd3c image.bbclass: use max() instead of indexing booleans
6d85874 linux-yocto-tiny: fix KBRANCH
440d949 sudo: fix pam config on systemd systems
3fd5a6d sysvinit: make lastb.1 an alternative
175263e lib/oe/lsb: sanitise the distro identifier
9262d2f package.bbclass: handle links in sorted order
29cf263 sanity: allow sftp and ssh mirrors
f503317 toaster.bbclass: improve package information collection
88f4178 rsync: remove upstream's rebuild logic
8d59d06 rsync: pass cached configure values through the right variable
384e41c rsync: don't install acinclude.m4
e80800e Revert "oeqa/selftest/wic: add test case for sparse images"
45c0763 Revert "wic/utils/partitionedfs.py: assemble .wic images as sparse files"
e0e5426 bitbake: runqueue: Improve 'mulitiple .bb files are due to be built' message
380004b archiver: Ensure sstate-inputdir directory is created
3ad70a5 linux-yocto-tiny: fix COMPATIBLE_MACHINE
0e59727 glib-2.0: Put glib-compile-schemas back in -utils
d27ca36 oeqa/runexported.py: Fix exported test
85dbd7b oeqa/selftest/sstatetests: split 32/64 build host from no-op action tests
57be6dd util-linux: take ownership of hwclock if installed
acc1f96 meta: remove redundant ac_cv_sizeof_off_t assignments
92759d8 meta/site: remove sizeof_off_t
5602f64 archiver: Fix ASSUME_PROVIDED issues
fab626c distrodata: Exclude DATETIME reference from sstate checksum
faaeaf9 build-appliance-image: Support for VirtualBox guest additions
778121a local.conf.sample: Make it possible to override EXTRA_IMAGE_FEATURES
f947c27 poky.conf: add Fedora 23 to supported distros
f33a110 maintainers.inc: remove adt-installer
83d4fab local.conf.sample: remove reference to adt
52cfdb6 bitbake: toaster: fixes for customimage package not found
dae4ffb bitbake: data_smart: Restrict expansion regexp to not include : characters
7e739ac bitbake: tests/utils.py: test origvalue in a callback matches what is expected
e1e459e bitbake: lib/bb/utils.py: Fix a bug in edit_metadata() that could corrupt vars
43150ab oeqa/selftest/wic: add test case for sparse images
29bc2f7 wic/utils/partitionedfs.py: assemble .wic images as sparse files
7fdb061 image-vm.bbclass/image_types.bbclass: IMAGE_NAME -> IMAGE_LINK_NAME
04e1978 image_types.bbclass: fix elf
513ea49 image_types.bbclass: set nodesize for btrfs
bad434b libxml2: fix AM_PATH_XML2
9fe3d01 useradd_base.bbclass: prevent variable expansion in $opts
fb8e5f9 extrausers.bbclass: drop retry count for perform_user/group* calls
f737af4 build-perf-test: add eSDK installed size to metrics
50f5ca3 rpm: brace expansion is a bashism
66ecbd3 openssl.inc: minor packaging cleanup
e38ec0c systemd-systemctl-native: fix unit detection
4019058 apr-util: fix path in rules.mk for nativesdk
bdf453f bdwgc: installed-vs-shipped for nativesdk
12ca8df libsolv: fix installed-vs-shipped for nativesdk
c88c894 desktop-file-utils-native: disable emacs
d4f6c0e toaster: add DL_DIR and SSTATE_DIR to oe toasterconf
69b3f87 toaster.bbclass: strip task from the target
aa45c75 x11-common: Add PACKAGECONFIG for screen blanking
d366a33 opkg-utils: re-do find/ls code to not fail on filenames with spaces
5e360ca image-live.bbclass: fix iso + efi only
f5adb23 Add missing runtime dependency to python-pygobject
0720425 devtool: Create unlocked-sigs.inc containing items in the workspace
64cca7e sstatesig.py: Add a method to "unlock" recipes
1cb99dd populate_sdk_ext.bbclass: Enable locked sigs errors
2431ed7 sstatesig.py: Improve the SIGGEN_LOCKEDSIGS_TASKSIG_CHECK message
7e90280 sstatesig.py: Split single locked sigs check into multiple checks
7ce800c toasterconf.json: Set default distro to nodistro
1b7b548 dev-manual: Updated poky-floating-revisions file snippit example.
8d9e233 dev-manual: set correct task name for do_kernel_configme
6971029 poky-floating-revisions: Fix typo
14e2b90 toasterconf.json: Add DL_DIR and SSTATE_DIR to poky toasterconf
296dfbc build-appliance-image: Update to master head revision
00c4c9b poky: Convetion is 2.1, not 2.1.0
8cd1dec build-appliance-image: Update to master head revision
ecd58bb poky.conf: Bump version for 2.1.0 krogoth release
e955b5d bitbake: Update version to 1.30.0
4fd14e3 build-appliance-image: Update to master head revision
133224f documentation: Fixed references using the DISTRO_NAME variable
3831ca0 documentation: Updated release date in manual history tables.
b590fab dev-manual, ref-manual, sdk-manual: Removing oprofile references.
d2084cc Makefile: Removed adt-manual support
2677098 mega-manual: Removed the adt title .PNG file.
d9b4c80 README: Updated to remove the ADT manual and add the SDK manual.
9796cbb mega-manual.sed: Removed adt-manual processing
aa4b72b yocto-project-qs: Updated the minnowboard example.
f2505af poky.ent: Added lower-case distro name variable.
ee42a9b kernel-dev: Applied review comments to "Adding Recipe-Space Kernel Features"
d57fe7c ref-manual: Updated the PREFERRED_VERSION variable description.
53bade8 dev-manual: Added new section describing hardware and non-hardware config
763ae4e ref-manual: Updated verbiage on proxy handling
a1295ed ref-manual: Updated PREFERRED_VERSION variable description
879eec2 ref-manual: Updated debugging tips and tricks
23dbf81 kernel-dev: Added new "Adding Recipe-Space Kernel Features" section.
f30bfe9 kernel-dev: Updated the "Kernel Metadata Location" section.
53729bc sdk-manual: Removed three sections of writer notes.
9f0c571 sdk-manual: Applied review edits.
d4bdafa sdk-manual: Added sections in Appendix B.
d94fa00 dev-manual, profile-manual: Removed oprofile section and link
4f3dfa8 bitbake: bitbake: update LICENSE file with QUnit details
013984d bitbake: tests: browser Add test to run the js unit tests
7609888 bitbake: toaster: views jsunittest Add MACHINE and an extra layer to test project
fbc2c5d bitbake: toaster: tests Set MACHINE for the test projects
cb6b4eb bitbake: toaster: Add quint to project so that it can be used offline
18cb7fe bitbake: toaster: add rev dep column to image detail pages
7a309d9 bitbake: buildinfohelper: work around unicode exceptions
860cba8 bitbake: toasterui: update build in internal state
acb9407 bitbake: buildinfohelper: fix KeyError
52c8740 bitbake: toaster: get bitbake location from BBBASEDIR
f5d3ef6 bitbake: toaster: export BBBASEDIR variable
71ff9b9 bitbake: toaster: update projectconf.html for DL_DIR and SSTATE_DIR
705d44f bitbake: toaster: update view to support DL_DIR and SSTATE_DIR
4aafcae bitbake: toaster: use empty token
5ce4665 bitbake: toaster: runbuilds Clean up runbuilds
55b6fab bitbake: toaster: runbuilds Make runbuilds aware of the build CANCELLED state
f4cee88 bitbake: toaster: models Exclude the CANCELLED builds from get_number_of_builds
296d373 bitbake: toaster: mrb_section template Add build cancel button
f1b49dc bitbake: toaster: tables BuildsTable exclude cancelled builds
22242ae bitbake: buildinfohelper: Add handler for cancelling a build
9dcb9cb bitbake: toaster: bldcontrol models Add a cancelling state the BuildRequest
dfa8510 bitbake: toaster: models Add cancelled state to build outcome
5f862bb bitbake: toaster: update BuildEnvironmentController and BitbakeController
0db62c5 bitbake: toaster: libtoaster Update implementation of startABuild and cancelABuild
afab95c bitbake: toaster: xhr Update the implementation of the build cancellation request
eead032 bitbake: toaster: Move xhr calls for starting and stopping builds
f5aa970 bitbake: toaster: bldcontrol Add forceShutDown function to BitbakeController
d6992a8 bitbake: toasterui: shutdown on BuildCompleted event
c4ae028 bitbake: toaster: use bash explicitly
4adddfd bitbake: toaster: fix jethro build
b1a919a bitbake: toaster: update conf/local.conf
590a815 bitbake: toaster: stop bitbake server after the build
a8f6001 bitbake: toaster: add new parameter to _shellcmd
a43a16b bitbake: toaster: reimplement triggerBuild
ab18c20 bitbake: toaster: modified setLayers API
22fba9b bitbake: toaster: add brbe parameter to triggerBuild
829a0bd bitbake: toaster: remove release API
7068e8a bitbake: toaster: remove startBBServer API
9d4c62d bitbake: toasterui: fix brbe reporting
5bcce68 bitbake: buildinfohelper: improve handling of providermap
61b6b98 bitbake: uievent: improve BBUIEventQueue code
0b0d754 bitbake: toasterui: add brbe parameter to buildinfohelper
94ac3f0 bitbake: toaster: set BITBAKE_UI environment variable
e23a23b bitbake: toaster: get rid of noui option
f77baec bitbake: toaster: don't start bitbake server
4127fef image_types: use compress framework to produce checksums for images
60786b8 runqemu-gen-tapdevs: Add note about NetworkManager & tap devices
634aeed libtool: fix contaminated path to lt_truncate_bin
298d875 create-pull-request: fix for newer git
4faeff9 wget: fix build when len(TMPDIR) == 410
b667f4d sanity.bbclass: fix a hardcode in check_path_length()
94b3583 grub: remove unused 0001-Fix-build-with-glibc-2.20.patch
ef163ab glibc: remove unused CVE patches
b050ab2 clutter-gst-3.0: remove unused enable-tests.patch
064ebd5 cmake: remove unused dont-run-cross-binaries.patch
a71db4c tcl: remove unused fix-configure.patch
476eeea rpm: remove two unused patch
3d56864 ffmpeg, gstreamer1.0-libav: add textrel INSANE_SKIPs
8cc10a9 ffmpeg: Make configure options explicit
45c1944 bzip2: set correct soname
cbe33ec useradd.bbclass: remove user/group created by the package in clean* task
c115740 bitbake: fetch2/git.py: remove .indirectiondir workaround
4f07c22 bitbake: persist_data: Return str instead of unicode for sqlite3 text queries
d8f1f42 scripts/oe-selftest: avoid the creation of coverage file when coverage not installed
6e5e225 scripts/oe-selftest: remove coverage file if any coverage option is given
5edfec4 scripts/oe-selftest: remove unneeded coverage warning
8109e93 patch.bbclass: remove useless path assignment
7963613 gstreamer: remove now-redundant expansion in do_split_packages
37f4f5b package: do_split_packages: expand variables in extra_depends
2ed2089 xf86-video-intel: Add patch to fix some poor image quality
c1436b3 sanity: Increase minimum git version to 1.8.3.1
672545b scripts/oe-buildenv-internal: Fix regression in BB_ENV_EXTRAWHITE setting
f7fed7c license.bbclass: fix warnings when run in unprivileged "container" env
43071a0 externalsrc: avoid race in temporary git index file
f4f1d20 scripts/lib/bsp/help.py: Typo in help for yocto-bsp create
1bd2c8e bdwgc: use github repo for source location
0e6743b xf86-video-intel: Add patch to allow UXA to build
21e31c2 package_manager.py: better error handling in opkg's package listing
f2d5e20 systemd: make systemd-serialgetty optional
e699404 ncurses: reorder PACKAGES
f94ad4d bluez5.inc: remove obsolete workaround
a0cd8c0 buildtools-tarball: Add texinfo (for makeinfo)
9877795 cogl: fix G-I .typelib installation
b13184c classes/buildhistory: fix grammar in comments
e5c0a9f classes/buildhistory: fix filtering of depends-nokernel.dot
4d364f2 classes/buildhistory: optimise getting package size list
af5f423 bitbake: siggen: Ensure tainted stamps are accounted for with writing custom stamps
47e9e12 bitbake: siggen: Fix nostamp taint handling
8033627 bitbake: siggen: Add checksum recalculation/checking code
3e1b5e0 bitbake: siggen: Fix check calculation problem with file_checksums
39b637c bitbake: siggen: Drop misleading duplicate method
2c722e2 bitbake: tests/fetch.py: Improve unit tests for trusted network check
cf6d12d bitbake: fetch2: BB_ALLOWED_NETWORKS should not care about port numbers
158575c bitbake: toaster: orm better detect requires during CustomImageRecipe generation
c634473 bitbake: toaster: Correct typo on build form help text
c9ad1e6 bitbake: toaster: buildinfohelper Add additional metadata to the built layer
072a0b3 poky: Exclude DATE from DISTRO/SDK_VERSION checksums
f3c029f build-appliance-image: Exclude DDATETIME from task signature
7833eb4 image-vm: Exclude DISK_SIGNATURE_GENERATED from task signature
85ff4ff populate_sdk_ext: Exclude BBTASKDEPDATA from task signature
66412ab opkg-utils: opkg-build exit when fail to list files.
6b8f8a4 kernel-yocto: enforce SRC_URI specified branch
6ebd43c linux-yocto/4.4: UVC: Add support for R200 depth camera
6d2299f linux-yocto/4.4: fix PAT for 32bit x86
5559301 Revert "linux-yocto: Work around PAT issue on qemux86"
686c74f linux-yocto-dev: bump to v4.6-rcX
b3ba813 linux-yocto/4.1: ahci: backport AHCI runtime PM
8f7bbea linux-yocto/4.4: gpio-pca953x: add PCAL9535 interrupt support
4a50c05 linux-yocto/4.1: telemetry and dmaengine backports
31a10cb wic/isoimage-isohybrid.py: change cpio generated uid&gid to root
5cabf3b wic/isoimage-isohybrid.py: use glob to find initramfs location
5c60c36 bluez5: add ptest support
fc8b24d oe/patch: print cleaner error message when patch fails to apply
bf14014 oe/patch: more detailed error reporting
a2bf9e3 insane.bbclass: avoid false positives on library location
1f2f43c grub-efi.bbclass: use GRUB_ROOT rather than APPEND for root device
bf58526 bitbake.conf: Add BB_WORKERCONTEXT to HASHBASE_WHITELIST
1c1e851 gdb-cross-canadian: use PACKAGECONFIG for python and readline
370a50a base: Fixup PACKAGECONFIG incorrect mappings
dea3423 classes/packagegroup: Refactor code to be simpler
5defbcd default-distrovars.inc: remove libassuan from LGPLv2_WHITELIST_GPL-3.0
58d8123 libassuan: use package specific licensing
1f2a01b init-install-efi.sh: remove all root=foo from grub.cfg
3ce7d8c init-install.sh: fix disk_size
46eed0a ltp: fix test_proc_kill hanging
207ee90 ltp: add periodic output for memcg stress test
feafad1 epiphany: Depend on intltool-native for configure
2510239 image: Fix debugfs image type recursion loop
7dcb4c4 bitbake: toaster: tests Migrate landing page tests to Selenium
5b848fa bitbake: toaster: tests Migrate all projects page tests to Selenium
f2a38ea bitbake: toaster: tests Migrate project builds page tests to Selenium
961cd90 bitbake: toaster: tests Migrate all builds page and project page tests to Selenium
f859a3d bitbake: toaster: tests Migrate to Selenium for UI tests
965c72c yocto-bsp: Set correct default branches and branches base for i386, qemu and x86_64 archs
d110eba selftest/signing: Use packagedata to obtain PR value for signing test
34f11b5 lib/oe/packagedata: Add import os
0012b90 base.bbclass: avoid duplicate call to d.getVar('LICENSE', True)
efe73cb base.bbclass: drop obsolete HOSTTOOLS_WHITELIST_GPL-3.0
5293b83 man: use BUILD_CC and target include files for configure
5121705 scripts, lib: Don't limit traceback lengths to arbitrary values
3168134 bitbake: bitbake: Don't limit traceback lengths to arbitrary values
88ea0b9 image-vm.bbclass: remove invalid code
4d1df2c image-live.bbclass/image-vm.bbclass: remove duplicated code
d6d7526 bootimg.bbclass: merge it into image-live.bbclass
723fa56 boot-directdisk.bbclass: merge it into image-vm.bbclass
9e588481 man: fix several annoying compile/build warnings
aa13b97 image.bbclass: Make unneeded packages for a read-only rootfs configurable
4dde12f relocate_sdk: additional error checks
22bd875 systemd: fix build with gcrypt PACKAGECONFIG disabled
4b77909 devtool: modify: call shutdown on tinfoil when done
43da712 toolchain-shar-extract.sh: ensure all_proxy is allowed through
2aec71e oe-publish-sdk: exclude sstate-cache if publishing minimal SDK
8ef7016 oe-publish-sdk: prevent specifying a directory for the SDK argument
591b97c classes/populate_sdk_ext: support setting vars from environment at build time
c37d542 scripts, lib: Don't limit traceback lengths to arbitrary values
8049f25 pyton-numpy: Add definition of off_t size
b75505e image-live.bbclass: DEPENDS on syslinux
3ece012 ldconfig-native: Fix ELF flags on 64-bit binaries
d492aec recipes-support/rng-tools: Change runlevel start from S to 2, 3, 4, 5.
ab5c62e oeqa/runtime/parselogs.py: Add systemd unit circular dependencies errors.
9be3fb2 systemd-serialgetty: allow baud rate overriding
cf6788c psmisc: Remove including sys/user.h and __WORDSIZE
ede11b6 selftest: Added testcase decorator to tests
ccfe48c linux-yocto: add overlayfs feature
6ae0224 linux-yocto/4.4: broxton and usb type-c backports
e1ae3ee linux-yocto/4.4: drm/i915/skl: Fix DMC load on Skylake J0 and K0
0a1d621 linux-yocto/4.1: Intel Broxton: pwm backports
6ce8802 linux-yocto/4.1: Apollo Lake/Broxton mmc backports
a256628 linux-yocto/4.1: i2c: designware: Backport i2c patches
fbd209d linux-yocto/4.1: device property backports
ccf1b33 linux-yocto/qemuarm64: enable 32 bit compatibility
dacf9f2 linux-yocto/4.1: SMBus/iTCO backports
ab6fd48 default-distrovars.inc: remove gnutls + libtasn1 from LGPLv2_WHITELIST_GPL-3.0
2123a7e sanity.bbclass: Use pythonexception to raise real exceptions without backtraces
6af88d8 sanity: Require bitbake 1.29.1
1b2df6e uninative: Switch md5sum -> sha256
f719386 bitbake: cookerdata.py: remove slash in the end
e26087f bitbake: Bump version to 1.29.1
d73da22 bitbake: build/utils: Allow python functions to execute with real exception handling
672c07d bitbake: fetch2: Ensure that incorrect checksumed files are always renamed
2554be4 bitbake: cooker: fix CookerParser.shutdown()
53b5dc0 gcc: Fix musl ldso name for mips64
dd31bca selftest/buildoptions.py: use INHERIT +=
71db079 archiver.bbclass: addtask do_deploy_archives_setscene
1ca71e5 bitbake: cooker: Ensure bbappend order is deterministic
292c3e8 bitbake: checksum: In FileChecksumCache don't follow directory symlinks
326fc29 gcc-5.3/gcc-4.9: -fdebug-prefix-map support to remap relative path
9e20f94 ptest-runner_2.0.bb: Update recipe to point git.yoctoproject.org repo.
437841c man: fix src/Makefile to work with parallel make
abb5b46 oeqa/selftest/bbtests: Test bbappend order
ddbeb56 bitbake: cookerdata: Improve handling of ParseError
6dff639 gcc: Backport fixes for musl ssp configuration
ab20659 siteinfo: Fix musl 64bit targets
cd16b65 musl: Update to tip
0883aff buildhistory.bbclass: create image directory when needed
c093f7c runqemu: fix for iso
f1f9f89 init-live.sh: fix overlay fs
4e7eaed init-live.sh: fix ROOT_MOUNT
1622077 no-static-libs.inc: build static libusb1-native
b3e4a31 sstatesig: Ensure we keep native depends for allarch recipes
528a890 oe-selftest: generate .env only in test_image_env
21823cb build-appliance-image: Update to master head revision
7d251f7 build-appliance-image: Fix permissions
60656d0 bitbake: fetch2/wget.py: _check_latest_version_by_dir fix prefix detection
45ee2b1 bitbake: fetch2/wget.py: _check_latest_version_by_dir use group names
55cd35b conf/bitbake.conf package.bbclass: fix dbg package not contain sources while -fdebug-prefix-map used
e2b919c externalsrc: remove nostamp from do_configure
bbfc210 externalsrc: do not use do_configure[nostamp] for git srctrees
9ee403b archiver.bbclass: Just archive gcc-source for all gcc recipes
37683ef oeqa/utils/ftools: improve remove_from_file algorithm
3a934a8 scripts:/oe-selftest: Use timestamp instead of test names in coverage data file
71304d8 xcursor-transparent-theme: upgrade to latest git revision
7c5343a gdb: Fix build on mips64/musl
856be1f libunwind: Fix build on mips/mips64 for musl targets
dd61341 toolchain-shar-extract.sh: check the length for target_sdk_dir
c3c793b relocate_sdk: fixed .gccrelocprefix section handling
cc97d57 glib-2.0: Fix packaging
cef8bc9 gio-module-cache: Add class for Gio modules
0cda9d8 glib-2.0: Install gio-querymodules in main package
9ac1b6f oe-git-proxy: support username / password in http proxy
a15541d oe-git-proxy: also check all_proxy and http_proxy env variables
92b2bc5 wic: Update after task ordering changes
d6cb46c image.bbclass: run wicenv task only for wic images
5cb7705 wic: fix type of no-table option
1209eb2 matchbox-desktop: Do not close desktop on alt-F4
0361676 rootfs-postcommands: don't write manifest when IMAGE_MANIFEST empty
abd5b24 bitbake.conf: rename 'gobject-introspection-data' machine feature to 'qemu-usermode'
f81065f selftest/devtool: Update after make PROVIDER changes
25a04ee make, remake: make them properly exclude each other
f3a92ff kernel.bbclass: consider .csp firmware files
0569b69 tzdata: update to 2016c
a7e726a tzcode: update to 2016c
201d9d3 icecc.bbclass: replace icc with icecc
da00f6c icecc.bbclass: expand package arch
3f1702c icecc.bbclass: add icc_is_allarch inherit check
39170fe classes/sanity: use proper multi-line string literals
33a6135 oe-buildenv-internal: simplify derivation of BB_ENV_EXTRAWHITE
c6ab828 u-boot.inc: Add sub-dir support for SPL_BINARY
ddedab4 quilt: run ptest as normal user
afa4d5e site: Cache config vars for ccache
04344eb gdb-cross: use PACKAGECONFIG for python and readline
5005cab add !meta-poky to .gitignore file
1dd9348 scripts/lib/bsp/help.py: Add missing options to yocto-bsp help and usage
54eca75 poky-sanity.bbclass: update conf/templateconf.cfg for existing installations
2b992f3 site.conf.sample: fix reference to oe-git-proxy script
af63b49 conf-notes: remove reference to adt-installer
1d219ce linux-yocto: Update SRCREV for genericx86* for 4.4
8d4f43e linux-yocto: Update SRCREV for genericx86* for 4.1
84d5924 bitbake: fetch2: Handle lockfiles for file:// urls redirected to mirrors
b036afb bitbake: toaster: get all dependents for pkg for removal
9bf98a9 bitbake: toaster: new customise package-remove modal dlg
d5a419d bitbake: toaster: show full list of dependents to remove
fda94f4 bitbake: bitbake: fetch2/gitsm: Fix fetch when the repository contains nested submodules
1341c17 pseudo: backport a patch to fix xattr removal
07f0af3 uninative: don't try to relocate static binaries
c3c0d0a lib/oe/qa: add method to check if static or dynamic linked
10b6037 uninative: ensure patchelf errors are visible
86d7e44 libmad: remove use of obsolete _thumb over-ride
e7395c8 perf: package python modules into perf-python
b47225f perf: fix python scripts QA errors
ea8b914 linux-yocto/4.1: MFD backports
b6563a1 linux-yocto/4.1: device property : Backport device property patches
46baceb linux-yocto: ktypes/standard: Add tmpfs-posix-acl feature
bdf6b20 linux-firmware: Break out some additional firmware
6d8141f linux-firmware: Clean-up and sync license data
cea2a21 linux-firmware: Collapse iwlwifi firmware blobs for 7260 and 7265
3b3fe1d linux-firmware: Update to latest HEAD
d7cf2c3 archiver.bbclass: Fix tar name for git repositories
2cb4cb7 archiver.bbclass: Fix gcc-source corner case
c29eea0 archiver.bbclass: Fix use of ARCHIVER_WORKDIR and ARCHIVER_OUTDIR
8b7ee6e archiver.bbclass: Don't expand python functions in dumpdata
bc100b3 bind: /var/cache/bind
04d883c sysvinit: downgrade ALTERNATIVE_PRIORITY[mountpoint]
688d9a6 util-linux: split out util-linux-mountpoint
85ff75d gconf: fix buildpaths QA issue
7f7c9ab python-pygobject: use Python 2 instead of Python 3
e33124f sanity.bbclass: check host tool dependencies on change in NATIVELSBSTRING
4fe64d7 libunwind: Fix build with fstack-protector on musl
4aa08b8 ltp: Fix build on x86/musl
959b7f2 package.bbclass: Treat .node files same as .so when checking what to strip
e0bc781 bootimg.bbclass: only inherit syslinux when pcbios
1b1de89 grub-efi.bbclass: make it can build vm and live together
4ebaeb2 bootimg.bbclass: fix settings for grub-efi.bbclass
af1f77a pixz: Fix build on big-endian/musl systems
421289c sanity.bbclass cleanup
93e411e matchbox-wm: Update to fix XChangeProperty datatype issue
c843022 matchbox-panel-2: Fix Home-button icon load issue
01f6818 gstreamer1.0: fix introspection support also for git recipes
171adb1 gstreamer1.0-plugins-bad: fix incorrect handling of Cflags in gstreamer-gl.pc file
6462d08 x86-base.inc: suggest the latest kernel
c5c9ed6 at: fix configure option with/without-selinux
9b2b1f0 no-static-libs: just like target and native, nativesk-libcap doesn't like unrecognised options
bf90d0c linux-firmware: package firmware for Marvell 88W8688
cd17ab0 tune-arm926ejs: Handle missing thumb suffix
5b70c7e nativesdk-coreutils: a lot of warnings fixed
b47c53b runqemu-internal: split the code into functions
fae732f runqemu-internal: cleanup unsed code
e469bb7 runqemu: simplify checking for iso and ramfs
3610329 runqemu: add support for qcow2 and vdi
d85ca4a runqemu: remove ISO and RAMFS from help text
58bc854 runqemu: simplify the checking for vm images
6716eb2 runqemu: fix ROOTFS for vmdk
258cfa8 python(3): Disable tkinter
5988b5c selftest/signing.py: RPM_GPG_PASSPHRASE_FILE -> RPM_GPG_PASSPHRASE
3e5c5fe gpg_sign.py: get rid of pexpect
05d7e0d rpm: check _gpg_passphrase before ask for input
13a31b1 oe-publish-sdk: fix remote publishing
9926425 oe-publish-sdk: improve help output slightly
905286c oe-publish-sdk: drop SDK installer file from published output
0523378 devtool: add: create git repository if URL specified as positional argument
11c1d30 devtool: add: delete externalsrc files on npm recipe do_install
552a68a devtool: configure-help: fix error if do_configure not already run
eab3f06 bitbake.conf: whitelist proxy variables in config hash
58d2e56 classes/populate_sdk_ext: parse metadata on minimal SDK install
0684572 devtool: sdk-install: add option to allow building from source
50addfb classes/distutils*: don't hide logs when setup script fails
0ec30c7 classes/packagegroup: drop complementary -ptest if ptest not in DISTRO_FEATURES
d96ea29 classes/packagegroup: fix dbg/dev/ptest complementary packages
b58e5b1 bitbake: bitbake: xmlrpc: set single use mode differently
2df514b sdk-manual: Added note for running remote apps with SSH port forw enabled.
12f5c25 poky.ent: Added code name for 2.1 release to the variable
64241e0 sdk-manual: Applied more review edits to the manual per Eggleton.
b44d9e5 ref-manual: Created distrodata and checkpkg tasks, updated distrodata class
54050ff sdk-manual: Applied 2nd round of review edits.
6db8cbc sdk-manual: Applied review edits to the manual.
922eaeb sdk-manual: Updated the SDK devtool modify flow diagram.
2bbf77a dev-manual: Fixed a grammar error
286b76f sdk-manual, mega-manual: Updated the SDK devtool modify diagram
c3946bc dev-manual, profile-manual, ref-manual: Updates to remove meta-toolchain
7233e35 sdk-manual: Edits to add extensible SDK configuration sections.
b31bf7c ref-manual, sdk-manual: Changed section heading.
670735e ref-manual: Added some SDK manual support to introduction
266742b profile-manual: Updated screen output for oe-init-build-env
0654224 kernel-dev: Changed a link from an example to in-text.
19e3648 dev-manual: Edits from a 2.1 read-through.
a389684 poky.ent: Fixed a typo in one of the variables "ftar" to "tar"
b5d3065 poky.ent, bsp-guide: Removed eMenlow example and updated 2.1 variables
884b528 yocto-project-qs: Performed a read-through edit.
4b42385 poky.ent: Updated copyright year and version variables.
ae48b1f mega-manual: Added two new sections for the sdk manual
815d686 sdk-manual: Added some intro stuff about the SDK
4c5157f ref-manual: Resolving a conflict
4306f7f sdk-manual, mega-manual: Added new figure for Eclipse flow.
0bb6e48 sdk-manual: WIP on the book.
5a64701 sdk-manual, mega-manual, Makefile: Added new figures
32629e0 Makefile: Resolving a conflict
af40e9a sdk-manual: Added a new figure for installed extensible sdk directory.
62477889 sdk-manual: Applied some "red" text formatting to indicate notes
7ab8afa Makefile: Added the ".png" part to a figure I forgot.
fc43555 sdk-manual: Added a red-text "role" to the style sheet.
d07100d sdk-manual: Added new section detailing installed SDK directory.
b750729 sdk-manual-customization: Fixed XSL Appendix numbering parameter
ad7a994 Makefile: Updated the figure list for the mega-manual.
890f721 sdk-manual: WIP - Various small edits as WIP
f15f96c sdk-manual: New content for outline purposes.
4643b04 sdk-manual: Updated with two new appendices for new files.
d05566b sdk-manual: Added sdk-environment.png diagram.
0936eed sdk-manual: Added two appendix files to SDK Manual.
6996a1c Makefile: Added sdk-environment.png to figure list for SDK Manual
6cdb356 toaster-manual: Edits to a previous patch.
77594c0 mega-manual, Makefile: Added support for three new toaster figures.
00fe95d toaster-manual: Explain the local release
d06c7b8 documentation: remove all references to Hob
be8af37 ref-manual: Updated COREBASE_FILES variable.
5c7e5aa bitbake: bitbake-user-manual: include/require checks current directory
7ec8f28 bitbake: bitbake-user-manual: Updated the "inherit Directive" section.
75cba54 bitbake: bitbake-user-manual: Updated the copyright year to 2016
2918b50 bitbake: toasterui: remove ParseStarted from the event list
ab2abd4 bitbake: toasterui: Remove the excessive exception logging
d8137be bitbake: cache: Make BB_DONT_CACHE variable external
1d1aaa2 bitbake: toaster: orm generate CustomImageRecipe contents try secondary path
5c49230 bitbake: toaster: localhostbecontroller put generated layer in the builddir
b60c994 bitbake: toaster: localhostbecontroller Allow file:/// uri type for git repo
3025092 bitbake: toaster: orm Add a constant for the CustomImageRecipe's layer name
3df6551 bitbake: toaster: localhostbecontroller Don't clear out toaster custom layer dir
2f2f784 parselogs: add new whitelist entries to address 4.4.3 issues
8037ba4 bitbake: bb/tests/fetch: Update cups url
dab6d59 oe-buildenv-internal: Correct the sed expression which updates $PATH
068afc5 tzdata: update to 2016b
e140272 tzcode: update to 2016b
c0b3667 ffmpeg: Remove RSUGGEST=mplayer
e528a0a lttng-tools: Remove lttng-ust from PACKAGECONFIG for musl
42b9bdf packagegroup: Disable packages not available on musl
f148a2e world-broken: Add packages broken on musl
624ca6a siteinfo: Move apr configure cache to common-linux
90234f1 parselogs: add new whitelist entries to address 4.4.3 issues
13a2a3f u-boot: Upgrade to 2016.03 release
ecf3396 grub: add -Wno-error=trampolines to native CFLAGS
07515b0 dhcpd: create dhcpd user for dhcp dameon
b9ad80d valgrind: fix buildpath QA issue
7985006 gcc-5.3/gcc-4.9:Reuse -fdebug-prefix-map to replace -ffile-prefix-map
2faa718 gcc-5.3/gcc-4.9:replace build path with target path in __FILE__
76f10fd oe-buildenv-internal: Some clean up
4d1efc3 oe-buildenv-internal: Add variables individually to BB_ENV_EXTRAWHITE
39ac332 oe-buildenv-internal: Add paths to $PATH individually
dd5f2f7 oe-init-build-env*: Make them actually return failures
ea28de6 oe-init-build-env*: Remove unnecessary differences between the scripts
51aa00f oe-init-build-env*: Update/correct comment about specifying arguments
16fb9b8 oe-init-build-env*: Allow $OEROOT to be predefined
3173979 bluez5: allow D-Bus to spawn obexd in systems without systemd
10ef68f oeqa: remove RPM 4 self test
d915965 lib/package_manager: remove RPM4 support code
03fce73 smartpm: remove rpm4 patch
1e9de52 rpm: remove RPM 4
a7dd04d grub: fix documentation rebuilds
ee4f61b oe-selftest: Fixed --list-tests-by tag option
068e898 gcc-runtime.inc: set LICENSE for all gcc-runtime packages
788dfdd ParaTypeFFL-1.3: Add license file
62ddde6 externalsrc: use shared stamp directory if B=S
1969332 rpm: fix error when 'lua' is enabled
a31301e matchbox-keyboard: Update to latest HEAD to fix 64bit issue
40a55f1 oeqa/selftest/buildoptions: test read-only-rootfs
f64fdd2 oeqa/selftest/sstatetests: verify more variables don't impact the hash
ac347da gobject-introspection.bbclass: wrap comments at 80 columns
ae63b88 qemuarm64.conf: don't clear MACHINE_FEATURES
cad415d sanity.bbclass: allow customizing config file update error messages
96a5cb4 sanity.bbclass: fix success message when config file was updated
805aca8 sanity.bbclass: expand error messages for version checks
7d6801c lighttpd: fix /usr/lib/mod_cgi.so: undefined symbol: chunkqueue_written
5f7b9f0 valgrind: Disable nios2 support
aaaccc4 systemtap: Disable nios2 support
5857b20 lttng-modules: Add nios2 support
26248cd kexec: Disable on nios2
3e4d99b packagegroup-core-sdk: Disable sanitizers for nios2
797ffc8 bdgwc: Backport nios2 support
238e2c1 libatomic-ops: Backport nios2 support
7e83af3 selftest/buildoptions: Renamed one test case
0d9f515 python-numpy: Fix build on musl
e1f3f4c socat: Access c_ispeed and c_ospeed via APIs
bb4e6e0 watchdog: Disable nfs on musl targets
f00cca8 bdwgc: Check for getcontext() API during configure
51464e7 devtool: change config symlink name to .config.new
8c0148f systemd: Fix and expand ptests
427e369 oeqa/utils/testexport.py: add functionality for exporting binaries
2191623 init-live : make it easier to add custom boot targets
57a525c useradd_base.bbclass: replace retry logic with flock
5d06f00 image.bbclass: track ROOTFS_POSTUNINSTALL_COMMAND in do_rootfs vardeps
6129d86 eudev: split eudev-hwdb from eudev
9aa27fe openssl: don't move libcrypto to base_libdir
370419e xcb-util-image: Fix build with clang
8727975 musl: Update to get mips64 port
4653fdd dhcp: enable gentle shutdown
e382d96 coreutils: fix reporting 'unknown' by `uname -p' and `uname -i'
3b8cd1d ncurses_6: Improve installation
9cc65ed Revert "selftest: Added MACHINE = "qemux86" to tests that use runqemu"
3c5ee61 busybox: Drop -r passthrough patch
2c666af linux-yocto/4.1: usb: add usb_otg_caps to usb_gadget structure.
8dc9162 linux-yocto/4.1: Intel Broxton and Sunrisepoint-H: pinctrl and drm
99ad4c9 linux-yocto/4.1: powercap/RAPL: Backport powercap/RAPL
c4f544e linux-yocto/4.1: Thermal: Enable Broxton SoC thermal reporting device
123c2c6 linux-yocto/4.1: usb backports for Apollo Lake/Broxton
600b700 recipetool: create: don't create extra files directory unconditionally
8debfea local.conf.sample: Disable prelink by default
efa0881 oeqa/selftest/recipetool: Fix test_recipetool_create_simple
c9d269c Revert "packagegroup-core-x11-sato: add python-pygobject and gtk+3"
d24a39a oeqa/recipetool: Fix syntax error
55a1e52 oeqa/recipetool: Improve debugging output by adding dirlist
637b3c8 uninative: Add a fix for icu-native to use the correct ABI
9dbfbe9 scripts/oe-selftest: Add short names to most common options
681a452 gcc: Fix the license on GNU OpenMP
15c5b2a Revert "gcc: Fix the license on GNU OpenMP"
d5cdb48 perl: fix missing dependency for perl-misc
0eb52b9 classes/buildhistory: record a few more variables for extensible SDK
cbb4c5b package-deb: Ignore circular dependencies
fcc7ff0 package_deb: Fix python runtime error
9155b24 python-numpy: fix buildpaths QA issue
9e69963 python: move ast module into python-core
1a35166 xserver: require sufficiently new libdrm
36bf666 package_manager.py: Fix race condition in OpkgIndexer.write_index()
35be679 scripts/oe-selftest: Add search expression matching to run/list options
4489ef1 glib-2.0: relocate the GIO module directory for native builds
cf3402e image-buildinfo.bbclass: fix performance problems
e2fe28c linux-yocto/4.4: gpio-pca953x: add "drive" property
3d45853 python3: fix do_configure check platform triplet error
03b167d ncurses_6: Fix an install race condition
09eab6b build-appliance: make the inclusion of downloaded sources optional
8ea5cdc builder: remove hob from autostart
ff5d9f7 Revert "gstreamer1.0-plugins-XXX: move inherit gettext into common .inc file"
c99da8d musl: disable building of gobject introspection data
0dea50e machine/include/arch-x86: Make x32 ABI not supporting gobject-introspection-data
8c14c74 bitbake.conf: add 'gobject-introspection-data' to DISTRO/MACHINE_FEATURES_BACKFILL
2e27994 packagegroup-core-x11-sato: add python-pygobject and gtk+3
8b1fa2a webkitgtk: enable gobject introspection
7bd32b9 recipes-gnome: fix introspection support
efd37c5 python-pygobject: update to 3.18.2
ff3500b gnomebase.bbclass: do not disable gobject introspection
ac5cc0c gstreamer: enable gobject introspection
03cd714 libsoup-2.4: enable gobject introspection
c1d67e4 clutter: enable gobject introspection
0ec412b gtk+3: enable gobject-introspection
d6f8028 gtk+: enable gobject introspection
0d1e4b2 avahi: enable gobject-introspection
d2e0dc1 python-pygtk: remove the recipe
0c6d7cb avahi-ui: remove the dependency on python-pygtk by disabling avahi-discover
4fbf761 vala.bbclass: remove pre-packaged vapigen.m4 from tarballs
235455d vala: enable the use of vapigen by packages with vala support
d1b96f1 gobject-introspection.bbclass: add a class that enables gobject introspection
96b5847 gtk-doc-stub: remove introspection stubs
3a1d9fb gobject-introspection: Override GIO_MODULE_DIR when scanning
10e9977 gobject-introspection: add the recipe
3c66619 bitbake: fetch2/npm: fix ud.registry so that alternative registries can be handled
0155472 ref-manual: Updated "Application Development SDK" section.
4438460 ref-manual: Applied review edits to several SDK variables.
3c727ff ref-manual: Updated "Cross-Development Toolchain Development" section.
af1517c ref-manual: Updated "Build History SDK Information" section.
d9fc04b dev-manual, mega-manual: Updated "Application Development SDK" section.
357aa33 ref-manual, mega-manual: Updated "SDK Generation" section.
54490c0 ref-manual: Added several extensible SDK variables to glossary.
6dfd441 ref-manual: Updated IMAGE_PKGTYPE variable.
77f002c ref-manual: Updated "Cross-Development Toolchain Generation"
ee90cc6 ref-manual: Updated the "Build History SDK Information" section.
53dd8a0 dev-manual: Moved "Optionally Using an External Toolchain" to Tasks chapter.
9d76cfe meta: toolchain-shar-relocate.sh: Fix for extracting SDK in the same directory as SDK script.
054abad nettle: The variable named p in the patch file was incorrectly named.
93a5417 valgrind: Make dep on glibc-utils conditional on TCLIBC = glibc
40c9774 make 4.1: fix segfault when ttyname fails
7f27713 gcc: Disable libitm for MicroBlaze
81d58d6 sign_package_feed: add feed signature type
42f612c package_manager: sign IPK package feeds
c637783 signing-keys: create ipk package
14e809e gpg_sign: export_pubkey: add signature type support
0b088e0 gpg_sign: detach_sign: fix gpg > 2.1 STDIN file descriptor
2fccd8a gpg_sign: add local ipk package signing functionality
6bd6a2b systemd: add comment stating that resolved needs gcrypt
a5fd57d selftest/bblayers.py: Remove harcoded recipe files
dce7290 selftest/prservice.py: Sanitize package version when looking for stamp
cbd87f3 lsof: update UPSTREAM_CHECK_URI
57fb05a eudev: provide UPSTREAM_CHECK_URI
3f8d5bf toaster.bbclass: show packages that were setscened into existence too
39e1351 gcc: Fix the license on GNU OpenMP
c6aeef3 linux-yocto/4.4: Galileo updates
37b61b0 siteinfo: Add ppc64le support.
0265fcc nettle: disable static for 2.7.1
8660cd1 nettle: Security fix CVE-2015-8804
dae5715 nettle: Security fix CVE-2015-8803 and CVE-2015-8805
24aea3a glib-2.0: silence warnings when parsing headers for introspection
3331992 qemu: Limit paths searched during user mode emulation
b578a06 image-mklibs: handle position independent binaries
c706b5e libpam: define limits.conf as CONFFILES of package libpam-runtime
82dec46 perl-rdepends: Remove circular dependencies
815c36f rpm: Sync CVS to regular version
775f22e rpm: Fix musl integration with RPM5
001bdef gcc: Disable libitm for nios2
d53413d bitbake: server/process: Try connecting 4 times before giving up
0f01059 bitbake: toaster: models List only have the specified project's imported layers
0dcab02 bitbake: toaster: rework task buildstats storage and display
cc74a8a bitbake: toaster: use force_bytes to display non-ascii project names
aebc22d bitbake: fetch2: Make SRC_URI[md5sum] and SRC_URI[sha256sum] expand their values
d405f97 bitbake: xmlrpc: fix bug in setting XMLRPCServer.single_use
c50bdb3 bitbake: fetch2/npm: add missing URL argument to ParameterError
fbf27c4 bitbake: fetch2/npm: properly handle npm dependencies
ef6a451 bitbake: fetch2/npm: fix errors with some version specifications
ad50ce9 populate_sdk_ext: Correct commit 8b81bb56c69aabdea984352f8e267a9783c0bdbc
bc0e99d recipetool: create: shrinkwrap and lockdown npm modules
309b2e6 recipetool: create: support creation of additional files by plugins
2279eb2 recipetool: create: check if npm available if npm:// URL specified
9145500 recipetool: create: split npm module dependencies into packages
d46827c recipetool: create: add license file crunching
3fd244b recipetool: create: match *LICENSE* as a license file
2b6a352 recipetool: create: improve mapping for autotools program macros
1607fac recipetool: create: be more tolerant of spacing in configure.ac
9dca5c8 lib/sstatesig: skip shared_workdir when checking locked sigs
142bad3 python3: fix patching get_python_lib() in distutils/sysconfig.py
50d07e9 python3-native: use the previous version of python-config script
5dce2e3 qemu.bbclass: add qemu_wrapper_cmdline()
8b5afcd db: remove the NO_UPDATE_REASON and replace it a comment about RPM
5699c67 rpmresolve: It is not necessary to manually specify -lpopt
8ea55ba rpm: A number of the patches have been submitted upstream
6833c5d rpm: Enable specific crypto and digest settings via variables
59a4d99 security_flags.inc: Special flags are needed for RPM
007c284 rpm: Uprev to rpm-5.4.16 (pre) and rpm-5.4+cvs to current CVS head
a27ca6d yocto-bsp: Update templates to 4.4 kernel
2d0933c conf/distro/include: drop old recipes
1fd183e bblayers.conf.sample: remove BBLAYERS_NON_REMOVABLE
477b8fb poky: Enable uninative
1b7cc9c linux-yocto/4.4: explicitly enable ftrace in tracing fragment
aee7482 linux-yocto/4.4: iwlwifi: mvm: don't allow sched scans without matches to be started
2408f49 linux-yocto/kernel-meta: ktype refactoring: move DEBUG_KERNEL, EXPERT and EMBEDDED
9ac029b xmlto: tell xmlto where cp is
6d89b52 toaster.bbclass: improve how we gather buildstats for Toaster
4dd3e40 image-prelink: use STAGING_*_NATIVE variables
2193e9d strace: Backport fixes for compiling with clang
ee8ff42 ghostscript: 9.16 -> 9.18
3f5725c fontconfig: Revert changes made to FcConfigAppFontAddDir() recently
433d866 populate_sdk_ext: Make populate_sdk_ext nostamp
e186d6d systemd: binfmt should be added to SYSTEMD_PACKAGES only if binfmt is enabled
b051a95 license.bbclass: fix host contamination warnings for license files
f8a9774 oeqa/selftest/buildoptions: Test build does not fail without git rev
656aeff busybox.inc: add tail symlink so busybox can commit suicide cleanly
a321f4e avahi-ui: add dbus to PACKAGECONFIG
1bd4b72 avahi: add missing intltool-native build dependency
72f9e39 avahi: make dbus optional but default
424466b oe-setup-builddir: tidy up local.conf and bblayers.conf commentary
07919e9 net-tools: Add SCTP option support
e8254bc tune-corei7.inc: Fix PACKAGE_EXTRA_ARCHS for corei7-32
5346675 eudev: remove redundant udev_run assignment
adad264 xcursor-transparent-theme: use a version glob in the selftest bbappend
946d00c populate_sdk_ext: Update after uninative changes
ba57ba1 image.bbclass: support chaining compression (aka conversion) commands
5ac3dc7 image.bbclass: fix incomplete .rootfs customization
3322fa7 bitbake: toasterui: fix warning 'Unknown event'
621cbc8 bitbake: toasterui: exit on final events
8e138b7 bitbake: toasterui: make toasterui to work in build mode
0a61306 bitbake: toasterui: check if setEventMask succeeded
ac941ac bitbake: command: make setEventMask readonly
dd3da9a bitbake: toasterui: update list of events
f56fa5d bitbake: toasterui: reformat list of events
a71d32a bitbake: toaster: remove sshbecontroller module
3db71b4 bitbake: toaster: don't use sshbecontroller
790b2d1 bitbake: toaster: raise NotImplementedError
96535ba bitbake: toaster: bring back the strict directive
5b8b399 bitbake: toaster: change 'revision' to 'Git revision'
07ead98 bitbake: toaster: views api Package info return both kinds of RDEPENDS
9cda2ab bitbake: toaster: fixup dependency excludes for customimage
a54cebe bitbake: fetch2/npm: ignore unknown headers in tarballs
0cd1be1 bitbake: fetch2/npm: handle alternative dependency syntax
d999927 bitbake: fetch2/npm: fix indentation
26ee4dd image creation: allow overriding .rootfs suffix
e43fcdf scripts/hob: drop
59b4cef classes/packageinfo: remove
bbf2a5d conf/documentation.conf: remove BBLAYERS_NON_REMOVABLE
7054882 yocto-uninative: Add common include for uninative
d2c96ca mtools: Drop GCONV_PATH manipulation
d27644e uninative: Handle relocate of GCONV_PATH in libc
0523499 uninative: Add checksum support
73265d1 uninative: Refactor common code
4feb00d uninative: Use CXX11 ABI for interoperation between gcc4 and gcc5
013dd24 uninative: correctly enable uninative
034618d glibc: Add relocation of GCONV_PATH
8dca343 uninative-tarball: Add glibc-gconv-iso8859-1 for guile
1f50f29 dkpg: Use tar everywhere (not gtar)
b158d6c gtk3+: Add missing DEPENDS on wayland-native
e395e81 tune-cortexa17.inc: apply changes similar to a15
ea53d1e sstate: Allow late expansion of NATIVELSBSTRING
bd3a1d5 linux-yocto: Update SRCREV for genericx86* for 4.4
70c6df2 linux-yocto: Update SRCREV for genericx86* for 4.1
ae85c4b linuxloader/image-prelink/image-mklibs: Fix non-standard path prelinking
0b84897 insane/prelink: Handle nonstandard library paths
6b564ae ext-sdk-prepare: Catch setscene tasks which should have run but didn't
d8efd2e createrepo: Fix stat floating timestamps
ce5a9df xmlto: ensure /bin/bash is used as bash
70b4f36 openssl: add a patch to fix parallel builds
1632742 xdg-utils: remove trailing whitespace in multiline string
816391a btrfs-tools: Add libgcc to RDEPENDS
e467156 bitbake.conf: Add libgcc-native to ASSUME_PROVIDED
a91713f net-tools: Override CFLAGS/LDFLAGS in do_install too
fb0c3c5 nspr: Fix build regression on musl from last upgrade
37f5fb9 gdb: fix builds with internal readline and no static libraries
6518db4 feature-arm-thumb.inc: Fix thumb tune override warning
afb1d09 recipetool: create: fix support for AX_CHECK_LIBRARY
463fd5e formfactor: assume a keyboard is plugged in
e2107f5 acl: Fix re pattern in test cases
82a8064 gcc-runtime.inc: disable libitm for little endian MIPS too
25d9c4e devtool: add build-sdk subcommand
41eb36d devtool: build-image: rename module
82d0c8a oeqa/buildoptions: Improve unsafe references tests
4284fdf insane.bbclass: make the checking stricter for unsafe references in scripts
5cd71fe yocto-project-qs: Updated flow to mention Toaster
cd041b7 dev-manual: Applied review comments to the devshell section.
f54fe56 ref-manual: Updates for nativesdk clarifications.
a882267 dev-manual: Fixed typo in the devshell section.
70c7e36 dev-manual: Created devtool upgrade section.
b2b22d5 dev-manual, mega-manual, Makefile: Added support for new upgrade flow
0b7d8a4 dev-manual, mega-manual: Updated the workspace directory structure image
050e021 dev-manual: Applied review changes to the devtool section.
09ecf38 dev-manual, mega-manual: Updated three figures for devtool
f33ffaa dev-manual: Applied more review comments to the section.
fe70eb2 dev-manual, mega-manual: Updated the devtool modify flow diagram.
eb3b414 dev-manual, mega-manual: Updated the devtool add flow diagram.
4c5bd3f dev-manual, mega-manual: Updated the devtool workspace figure.
9cee16b dev-manual: Applied review comments to the devtool section
c678d1a dev-manual: Updated the devtool add section.
a09238a dev-manual, mega-manual: Updated devtool add flow diagram
7699f0a dev-manual: Added section for devtool modify flow
1eecaea dev-manual, mega-manual: Added new figure for devtool modify flow
9582da6 dev-manual: Edits to the devtool-add section.
740369f dev-manual, mega-manual: Updated the devtool add flow figure
a848e9f dev-manual, mega-manual: Updated the workflow layer content figure.
34e08b3 dev-manual: Added new "writernotes" style.
17a21e6 Makefile, dev-manual, mega-manual: Added new figure support
d346c35 dev-manual: Applied review comments to devshell section.
3b41049 ref-manual, dev-manual: Clarifying "native" and "sdknative"
a1970eb dev-manual: Updated devshell section.
a58cde0 toaster-manual: Updated how manage.py createsuperuser command is run
c5b4f69 ref-manual, dev-manual: Clarification of "native" and "sdknative"
952bcc7 toaster-manual: Removed prompts for json file.
34c75fa ref-manual: Updated the S variable description with feedback
2b2ced0 ref-manual: Updated the staging.bbclass description
b9dddd5 ref-manual: Updated the S variable description.
41e9f7c dev-manual, ref-manual: Updated licensing text information.
5066fbc ref-manual: Added order information for conf file parsing.
ad6b2f2 toaster-manual: Removed typo - double "allow" words.
c8c533e ref-manual: Updated the do_populate_sysroot task.
2a3942b dev-manual: Updated section on adding license text.
77b3d06 ref-manual: Updated the S variable entry in the glossary.
a1a4808 toaster-manual: Applied a patch to weed out build mode (modes).
353b755 bitbake: bitbake-user-manual: Added expand() function to list.
638ad17 bitbake: bitbake-user-manual: Added note for Python variable ref expansion.
da22add bitbake: bitbake-user-manual: Enhance environment variable discussion.
f11de9d e2fsprogs: do not enable non-stable features by default
b04280a sdk_update.py: Enable local sdk-update tests
14dd07c sdk.py: Fix undefined variable
c12e919 eudev: recipe formatting improvements
73a43fc openssl: Security fix Drown via 1.0.2g update
ed14aef layer.conf: Update after replacement of udev with eudev
e72233a bootimg: set default value for LABELS variable
4eaef67 sanity: Do not mistake meta-yocto-bsp for meta-yocto
86759de sanity.bbclass: remove conflict checking for image vm and live
bb1c719 syslinux.bbclass: make vm and live can be built together
5c5c13d recipetool: create: add basic support for new npm fetcher/class
2be37a9 recipetool: create: add basic support for generating linux kernel recipes
5cf15ff recipetool: create: add support for out-of-tree kernel modules
937ecd0 bitbake: toaster: cleanup of bin/toaster startup code
a7d1b95 bitbake: ui: remove the puccho ui
a9dc72f bitbake: hob: removal of hob ui and associated ui files
27468db bitbake: fetch2/npm: Add missing ParameterError import
44e3461 bitbake: npm: in cases where shrinkwrap resolved a git URL, ignore it and grab dist.tarball
2a73181 bitbake: fetch2: Fix unpack for absolute file urls
865d2fe bitbake: fetch2: fixes copying of file://dir; subdir=foo, bug 6128 and bug 6129
fb437d3 meta-yocto-bsp: bump to linux-yocto 4.4 for the non-x86 BSPs
fbedac4 maintainers.inc: Add new eudev package and change maintainership for udev
0138874 gcc: Add support for atomic opertions (libitm) where available
70153b4 classes/externalsrc: fix symlinking if symlink exists pointing to another path
eac4061 populate_sdk_ext: Only write LCONF_VERSION to bblayers if it is set
c366343 automake: don't delete .pyc files
d6e63be cracklib: fix Python packaging
a005d25 populate_sdk_base: handle empty SDK_PACKAGING_FUNC
ec3be9f linux-yocto/4.4: update to 4.4.3
6ed16ff linux-yocto/4.1: iwlwifi: mvm: don't allow sched scans without matches to be started
2497e80 linux-yocto/4.4: update to -stable 4.4.2
aa2c1f7 linux-yocto: braswell: Remove feature and move DRM_I915_PRELIMINARY_HW_SUPPORT option
702701d linux-yocto/4.4: yaffs2 build fixes
c2152b8 linux-yocto/4.1: update to 4.1.18
45d4cd7 linux-yocto/4.1: clkdev updates
79ecef6 linux-yocto/4.1: Galileo updates
5f61693 usbutils: Fix for new eudev implementation
c89b777 libgudev: Fix for new eudev implementation
3e5e540 eudev: Replaces udev with eudev for compatibility when using sysvinit on newer kernels
674e55f populate_sdk_ext: Delete the buildtools tar file after installation
d8acef2 libarchive: Set xattrs after setting times
431c1e1 combo-layer: handle empty commits during "init --history"
695cc45 classes/populate_sdk_ext: prepend to PATH rather than appending
b145480 classes/module: allow substitution of the modules_install target name
b03936c grub2.inc: drop bogus dependency on xz
7328765 grub2.inc: avoid passing -isystem to native builds
576587d grub2.inc: dont export TARGET_CFLAGS etc to grub2 configure
97a3322 harfbuzz: update 1.2.1 -> 1.2.3
edf93a0 gstreamer1.0-plugins-bad.inc: limit ARM_INSTRUCTION_SET over-rides to armv4/armv5
89140b0 dhcp: CVE-2015-8605
6ccd8cd sato/images: Add ptest image
f38debb layer.conf: Whitelist cantarell-fonts fontconfig dependency
b307937 pango: make ${PN}-ptest RDEPENDS on cantarell-fonts
0c80f29 cantarell-fonts: Add recipe
4006a7f sanity: Fix int verses string reference
2e27c4b bitbake: fetch2/npm: Enable fetcher
1c060d7 pseudo: Increase number of retries
030d920 bitbake: providers: Fix PREFERRED_VERSION lookup for '_' in PN
c679a3d bitbake: fetch2: Skip lockfiles and donestamps for local files
d01042e bitbake: fetch2/__init__.py: Error if lockfile path invalid
ab7b7bf bitbake: fetch2/__init__: Fix decodeurl to better handle urls without paths
06b4d8f bitbake: fetch2/wget: Set localfile for directories
8d7e799 genericx86-common: Update PREFERRED_VERSION_linux-yocto to 4.4
65d6a62 gstreamer1.0-plugins-bad.inc: enable webp PACKAGECONFIG by default
cd00748 gettext: Delete libintl.la file from install
b33efa9 systemctl: handle RequiredBy dependencies
8caa592 ffmpeg: add bzlib, lzma and xv PACKAGECONFIGs
0011760 rootfs-postcommands: fix ssh_allow_empty_password checking
96f5f89 musl: Add linux-libc-headers to deps
3354878 mesa: Fix build on musl
7651342 dosfstools_2.11: fix build following removal of -e from EXTRA_OEMAKE
6c8abea uclibc support for rng-tools
c7e5a38 oeqa/sdkext: Add sdk_update.SDKUpdateTest class.
738bd1a classes/testsdk: Pass tcname to SDK and SDKExt contexts
2a410b2 classes/testsdk: Move the removal of bitbake PATH to eSDK context only
eb1f8b9 classes/testsdk: Move code for avoid PATHs to oeqa.utils
55d4849 gstreamer1.0-plugins-XXX: control orc PACKAGECONFIG via GSTREAMER_ORC
083c63d boost.inc: fix BJAM_OPTS --build-dir option
f4e17c6 shared-mime-info: update to 1.6
4ffdfdf vala: update to 0.30.1
f53f374 python-git: update to 1.0.2
ec73437 pax-utils: update to 1.1.5
447ddb9 nettle: update to 3.2
26a3d25 ncurses: update to revision 20160213
dc42d30 libdrm: update to 2.4.67
0296e0a gtk+3: update to 3.18.8
e08ad62 gtk-icon-utils-native: update to 3.18.8
9daf153 git: update to 2.7.2
927dfaf gnupg: update to 2.1.11
2c39358 clutter-gst-3.0: update to 3.0.16
b8a1e59 ccache: update to 3.2.4
4d4aa1f libsolv: update to 0.6.19
8c2e420 ffmpeg: update to 3.0
afce247 nspr: update to 4.12
b19dbe5 pcmanfm: update to 1.2.4
6b41608 libfm: update to 1.2.4
325a9d3 epiphany: update to 3.18.4
d4da534 wic: don't throw away our created swap partition
5f82d17 automake: set test-driver path relative to top_builddir
b41862d uninative-tarball: respect SDKMACHINE when building
4d1c14f boost.inc: enable more verbose build logs
7f84ad0 gstreamer1.0-plugins-XXX: move inherit gettext into common .inc file
2ce48e6 gstreamer1.0.inc: add explicit PACKAGECONFIG init
935d88a gstreamer1.0-libav: move LIBAV_EXTRA_CONFIGURE_COMMON_ARG into .inc
3a8ff19 gstreamer1.0-libav_git: add --ranlib option to LIBAV_EXTRA_CONFIGURE_COMMON_ARG
b8bdb99 boost.inc: limit ARM_INSTRUCTION_SET over-rides to armv4/armv5
9ca8f30 populate_sdk_ext: Add images to SDK_INSTALL_TARGETS
07dc765 boot-directdisk.bbclass: drop IS_VM chechking
a87574c image-live/boot-directdisk.bbclass: remove AUTO_SYSLINUXCFG
76eb815 testimage.bbclass: reuse generic test suites
6571a84 testimage.bbclass: add generic, image test suites
8c45747 gconf: remove redundant dependencies
a74c389 gtk-doc-stub: don't inherit autotools
2269f90 os-release: sanitise VERSION_ID field
9d86b26 apr-util: add ldap crypto and sqlite3 to PACKAGECONFIG
d8d2f57 apr-util: fix loadable module packaging
77cfa2b glibc.inc: improve optimisation level sanity checking
04c4719 rsync: add native variant
2c20fe4 core-tools-profile: add lttng tools for aarch64
8a0b997 lttng-ust: add support for aarch64_be
6081c35 liburcu: add support for aarch64_be
07a3c71 harfbuzz: add explicit dependency on fontconfig
73cc8b8 harfbuzz: update 1.2.0 -> 1.2.1
bb151b8 fontconfig: Don't add font directories from host
e9f5134 musl: Upgrade to 1.1.14
bf4d380 oe-selftest: devtool: add an additional test for devtool upgrade
4bae2f2 oe-selftest: devtool: rework devtool upgrade test
10290f2 devtool: upgrade: print new recipe name
5cd3be3 devtool: upgrade: drop PR on upgrade
e6f684b devtool: upgrade: eliminate unnecessary datastore copy
860574e devtool: upgrade: fix several issues with extraction of new source
66a781c devtool: upgrade: fix constructing new branch from tarball releases
d30cc76 devtool: upgrade: fix renaming of recipe if PV is not in name
75eeeab devtool: upgrade: fix moving version-specific files directory
81ebb0b devtool: upgrade: fix version argument checking
e953b57 devtool: upgrade: drop superfluous call to validate_pn
492b1eb devtool: upgrade: make source tree path optional
942ae25 devtool: modify: fix source tree default name when mapping virtuals
e2334e1 devtool: add: tweak auto-determining name failure message
55ae566 uninative.bbclass: if the loader can't be found disable instead of failing
50b8740 uninative: use check_output instead of Popen directly
4495e8b lib/oe/qa: add explicit exception for 'file isn't an ELF'
4553bb1 libdrm: fix build with uclibc
4e5a871 strace: fix ptest execution
e8e0489 clutter-1.0: Fix confgure test errors found by clang
b748f40 oeqa/parselogs: Updated whitelist
4b32351 buildstats.bbclass: Don't assume /proc/<pid>/io present
07e1f10 sysvinit-inittab: Move start_getty scrip to base_bindir.
8d07e14 oeqa/selftest/prservice: Added new TC: check pr-server starts and stop correctly on localhost.
d2a563c oe-selftest: Add support for lib/oeqa/selftest subdirectories
7f58b92 musl: Upgrade to 1.1.14
73bf792 devtool: update-recipe: create config fragment
2fbd1d7 devtool: sync: update kernel config
26f951b git: fix installed-vs-shipped QA Issue
033db24 btrfs-tools: fix symlink creation multiple times
9af773f bison/gettext: add --with-bisonlocaledir to assign BISON_LOCALEDIR
b14e2ae gcc: use relative path for configure script
1f00fb2 depmodwrapper-cross: nopackages to avoid QA [buildpaths] issue
00a6f5a oeqa/utils: added new network module
3f7aa6f scripts/oe-selftest: Use site.USER_SITE to run coverage configuration code for sub-process
1c6c76e scripts/oe-selftest: Add filtering to the coverage data gathered by oe-selftest
4a21827 oeqa/selftest/signing: Added test for locked signatures
604dc1c package: check inherit instead of PN to decide if a recipe is a packagegroup
b4df005 tune-cortexa9.inc: add vfpv3 tunes
889a5cc mirrors/own-mirrors/sanity: Updates after npm fetcher addition
28d17cf npm.bbclass: Add npm class to match fetcher
bc5a1d1 base: Add nodejs-native dependency for npm:// urls
9d5483c meta-yocto: Rename to meta-poky to better match its purpose
ab3a718 adt-installer: Drop since its replaced by the extensible SDK
c1c6a9d sanity: Improve configuration upgrade capabilities (support meta-yocto -> poky transition)
2587101 image: Run do_rootfs_wicenv after do_image
e0fd964 bitbake: toaster: change 'delete layer' to 'remove layer'
6e82820 bitbake: toaster: rename 'run again' button
c8dd72c bitbake: toaster: fix banner after customimage package add
149f574 bitbake: toaster: custom breadcrumb for the default project
4a12865 bitbake: prserv: Add dump_db()
bdb51ab bitbake: toaster: remove custom images from Image Recipes
98d462c bitbake: toaster: show suffix for image files and basename for artifact files
88b5660 bitbake: toaster: add missing link to image recipe details
25b179d bitbake: toaster: adjust the search field width
a97081b bitbake: toaster: make 'configuration' the first tab
e1fc319 bitbake: toaster: link to configuration in all breadcrumbs
df2808f bitbake: toaster: reduce max height of modal dialogs
6c51f08 bitbake: toaster: disable add layer button on click
d4a663a bitbake: toaster: apply error class to name field
48f0ae2 bitbake: toaster: fix custom image name form
07eb4f2 bitbake: toaster: comment out project release change
12ade9b bitbake: fetch2/npm: Add mirroring support for npm fetcher
ca5b6d6 bitbake: fetch2/npm: Add npm fetcher
813bd1f bitbake: utils.py: Add sha1_file call
7bb9e8d signing-keys: Make signing keys the only publisher of keys
64ab17b systemd: Upgrade to 229
44248af harfbuzz: update to version 1.2.0
f4f5573 perf: add sysroot handling to subcmd
7a95c2c oeqa/selftest/buildoptions: build -minimal instead of -sato images
2980ac0 bitbake.conf: add findutils-native to ASSUME_PROVIDED
2e152ff findutils: upgrade to 4.6.0
951ce18 mesa: add missing space to RRECOMMENDS append
2305610 uclibc: Do not use immediate expansion operator
aab3900 security_flags: Disable ssp when compiling uclibc
afb954e rpm: fix building rpm 5 with internal beecrypt
069cdbe alsa-lib: topology: Add missing include sys/stat.h
b879aed libsdl2: Fix patch after upgrade
3d4f71d gstreamer1.0-libav_git: update 1.7.1 -> 1.7.2
9d83a3e gstreamer1.0-plugins-ugly_git: update 1.7.1 -> 1.7.2
6456a6f gstreamer1.0-plugins-bad_git: update 1.7.1 -> 1.7.2
821498f gstreamer1.0-plugins-good_git: update 1.7.1 -> 1.7.2
04e77c1 gstreamer1.0-plugins-base_git: update 1.7.1 -> 1.7.2
e67c91d gstreamer1.0_git: update 1.7.1 -> 1.7.2
ea8c34e libnewt: Fix build with PIE flags
66a833a pseudo: Fix build when security flags are enabled
91a1baa glibc: Upgrade to 2.23
c1f9507 no-static-libs: remove eglinfo
0ab67d6 freetype: use autotools instead of a manual do_configure
4883ccc classes/populate_sdk_ext: add a better config extension mechanism
524ee08 recipetool: create: improve CMake package mapping
7b6e5b0 recipetool: create: add additional extension mechanisms
b2d4472 devtool: modify: tweak help description for behaviour change
a8e0e5e devtool: deploy-target: preserve existing files
2059a34 devtool: undeploy-target: support undeploying all recipes
b95c72c devtool: deploy-target: write deployed files list to target
62989ef devtool: sdk-update: tweak command-line handling of updateserver
cada5a8 devtool: (un)deploy-target: add help descriptions
6bd88e6 scripts/lib/argparse_oe: tweak title above options
32ef523 devtool: categorise and order subcommands in help output
9f7df76 devtool: update-recipe: don't show workspace recipe warning if no update
51972ed devtool: reset: fix preserving patches/other files next to recipes
e54f9c1 devtool / recipetool: use common code for launching editor
dd35f69 devtool: minor fix for error message
41242a2 staging.bbclass: remove trail slash from SYSROOT_DESTDIR
aeb8964 terminal.bbclass: import oe.terminal for oe.terminal.prioritized()
bee556a recipe_sanity.bbclass: skip DataSmart in recipe_sanity_eh()
2d293bd image.bbclass: fix circular dependency when IMAGE_FSTYPES append hddimg
a332360 toolchain-scripts.bbclass: add three other path to PATH in env.sh
4d2910f libsoup-2.4: disable libsoup-gnome by default
619f6c6 libsoup-2.4: prevent PACKAGECONFIG dependant package renaming
13e726f libsoup-2.4: minor formatting improvements
dd0ef3c populate_sdk_ext.bbclass: Add SDK_RECRDEP_TASKS variable
4c5c40d devtool: Don't recursively look for .devtoolbase in --basepath
0220180 populate_sdk_ext: Don't ignore SDK_TARGETS value
8c0ba8d bitbake: toaster: toastergui Fix invalid char test and implementation
913e9b1 bitbake: toaster: PackagesTable show only installed packages
94bca58 bitbake: toaster: toastergui unit tests convert to use fixtures
8796ac8 bitbake: toaster: SoftwareRecipesTable apply default order_by
8469e58 bitbake: toaster: orm migrations Sort out migrations mess
78b6109 cml1/sstate: Fix missing getVar parameter
7e19f88 linux-yocto/4.1: capabilities backports
54bfbcc waf.bbclass: Remove --disable-static from EXTRA_OECONF
51fc304 gcc-5.3: backport fix for PR-target-65358
ed20c6c epiphany: Add libxml2-native to DEPENDS
2021f63 libsdl2: update to 2.0.4
947b3bf cmake: Update to 3.4.3.
4699483 sstate.bbclass: use oe.gpg_sign for gpg signing
db7c7c2 oe/gpg_sign: add 'passphrase' argument to detach_sign method
e845b75 sign_rpm.bbclass: do not store key details in signer instance
d5be866 oe/gpg_sign: add 'armor' argument to detach_sign()
03554b7 oe/gpg_sign: add verify() method
af7e516 ruby: break out ri-docs and rdoc into separate packages
8bcf139 insane.bbclass: print more info for build-deps and file-rdeps
5f3dfea curl: re-enable proxy support by default
1f61888 libtool: Don't hardcode grep paths
a3b996a cml1.bbclass: fix do_menuconfig
91bfe50 cups: upgrade to 2.1.3
eeac0a9 coreutils: upgrade to 8.25
01dc859 findutils: upgrade to 4.5.19
bf7d5f6 diffstat: upgrade to 1.61
247f3b4 grep: upgrade to 2.23
4e5e501 bitbake: data_smart: Drop default expand=False to getVarFlag [API change]
c7610aa bitbake: data_smart: Drop default expand=False to getVar [API change]
4f0ab27 bitbake: SignatureGeneratorBasic: make checksum cache file configurable
0cdf193 bitbake: MultiProcessCache: make cache filename configurable
ca552bb bitbake: FileChecksumCache: add get_checksums() method
8f61f2d bitbake: bb/runqueue: save task file dependency cache onto disk
5177b1e bitbake: SignatureGenerator: add method for saving the file checksum cache
97617fd bitbake: bb/cache: drop some unused arguments
5a87d8c bitbake: Allow Hob to run images on a custom simulator, other than qemu
7fc38ea gma500-gfx-check: Fixes infinite calling to modprobe gma500_gfx
be7b52a pulseaudio: 6.0 -> 8.0
c52b8f6 alsa-plugins: 1.0.29 -> 1.1.0
a231a4e alsa-utils: 1.0.29 -> 1.1.0
1adbb73 alsa-tools: 1.0.29 -> 1.1.0
3a82e2e avahi: update to version 0.6.32
14daeb5 no-static-libs.inc: Add libcap-native
c001863 libsdl2: Fix build with static libraries disabled
a46dc87 uboot-inc: Backport patch to fix Beaglebone Black bootloader
c7355b9 busybox: drop patches that are not valid anymore
47d0119 pcmciautils: Update SRC_URI
f37ac5b debianutils: Upgrade 4.5.1 -> 4.7
adfcaf2 busybox: Add musl config for _git recipe
46824dc debianutils: Fix SRC_URI to use debian snapshot
3df8701 nfs-utils: bugfix: adjust name of statd service unit
c15bf55 musl: Upgrade to 1.1.13+
07e7879 dpkg: Update to 1.18.4
5794b56 glew: upgrade to 1.13.0.
aea0746 glew: rewrite to use upstream build system
0b1c324 socat: Fix build with musl
04c6a48 binutils: Fix useless rpaths QA warning
eb6d14e image/populate_sdk: seprate variables to fix dependency
c9e5e34 gcc: Backport nios2 r31 fix
012460d sqlite3: update 3.10.2 -> 3.11.0
f770a6e insane: wrap autotools checks in inherits_class(autotools) checks
35011d9 cmake: don't inherit autotools
9cd64ed oeqa/selftest/bbtests: Test bitbake --setscene-only option
7e5b451 glew: don't put our CFLAGS into the pkgconfig file
b1145cc dbus: update large file patch
fad63e3 coreutils: fix problem with acl for 6.9 version
351039f gcc-4.9/5.3: Ignore -fdebug-prefix-map in producer string
7a11650 bitbake.conf: use target path as compile dir in debugging info
ef30119 glibc: Security fix CVE-2015-7547
c834ebc glibc: CVE-2015-8776
842177a glibc: CVE-2015-9761
efa1ae5 glibc: CVE-2015-8779
aefe1fa glibc: CVE-2015-8777.patch
152914f oeqa/parselogs: Whitelist dmi firmware failure message in 4.4 kernels
683ea31 rng-tools: Fix underquoted m4 and libgcrypt floating dependency
7a700f5 lib/qa.py: raise ValueError if file isn't an ELF
334e1b5 lib/oe/qa: ELFFile: check that a path is a file before opening it
11359e9 rng-tools: fix the build with musl
a258589 bitbake: bb.ui.knotty: prefix task messages with recipe/task
4bf8b21 bitbake: Move bb.{debug,note,..} into their own logging domain
3b35de3 layer.conf: Add gstreamer1.0-meta-base to SIGGEN_EXCLUDERECIPES_ABISAFE
14e9385 sstate: Add ca-certificates-native to postinst recipes list
73e53e4 nss: define RPATH variable for nss-native
6e4e9f7 Revert "lsbinitscripts: fix the path for mountpoint"
6db39e1 libunwind: Fix build on ppc
47896a7 dbus-glib: 0.104 -> 0.106
93d8fc1 conf/no-static-libs: add explicit rule for libical
637b44c runtime/systemd: Fix for boot time string parse error
ef5b8b4 security_flags: Add SECURITY_CFLAGS to TARGET_CC_ARCH for binutils
1387785 binutils: Use tip of 2.26 branch
da13f0b buildhistory.bbclass: remove out-dated information on request
a56da4a Remove obsolete references to exmap
8b21720 bitbake: knotty: Set exit failure code on runQueueTaskFailed events
a9223e2 bitbake: taskdata: Fix traceback issue with missing provider
7593756 bitbake: cooker: Improve cache handling
9cb38c1 poky: Disable static libs by default
f852014 bitbake.conf: Remove unhelpful default value for EXTRA_OEMAKE
b050c50 apmd: fix build with static libraries disabled
d585a71 oeqa: Update to handle domain specific references in build logs
9300749 libpng12: Handle no static libs
67ea65e ed_0.5: Handle --disable-static option
438d6d6 conf/distro/include: Add no-static-libs.inc
2eb19cc classes/buildhistory: fix for python function parsing change
1a3204c valgrind: Fix build with musl
e8b0da1 rpm: Fix build with musl
48144e0 gstreamer1.0-meta-base: Mark as machine specific due to COMBINED_FEATURES
ff8ca89 gdb-cross-canadian: Add missing virtual/* DEPENDS
120a160 e2fsprogs: Update to upstream version of a patch
5394ada gdb: Rationalise PACKAGECONFIG
ce0f8ab insane: Add --disable-static to UNKNOWN_CONFIGURE_WHITELIST
94abdb2 linux-yocto: Work around PAT issue on qemux86
6fb493a libgcrypt: update 1.6.4 -> 1.6.5
bf9ad22 musl: Upgrade to tip of tree
5d156bc oe-selftest: don't use specific tasks
80e8928 oe-selftest: pylinted wic tests
9b6dc9b wic-image-minimal: use uuid for root partition
ab7cb65 wic: fix processing of --use-uuid
51e0a8a oe-selftest: add new wic testcase
2100f82 wic-image-minimal: update .wks to boot by qemu
4b26601 wic-image-minimal: change IMAGE_FSTYPES
f799e21 oeqa/targetcontrol: support wic image type
7066f16 oeqa/targetcontrol: make ssh control optional
0ade658 qemurunner: add parameter to method 'start'
d083fec oe-selftest: remove unused parameter
c26a9c3 runqemu: support path/to/<image>-<machine>.wic
c7f0578 runqemu: don't set KERNEL for wic images
2c3a009 runqemu: add support for wic images
64d2f13 scripts/sstate-cache-management.sh: Change wording
6740dd5 qemu.inc: Add rng-tools to qemu images
ce3df21 rng-tools: Import recipe from meta-openembedded
36b43b2 lib/oe/terminal: set workdir for konsole terminal
03e1950 mmc-utils: upgrade to latest git version
b5b8003 ltp: Upgrade to 20160126 and fix build on musl
f6b3957 initscripts: start urandom after populate-volatiles
85ac8eb initscripts: populate-volatiles.sh: add mount-bind feature
be5b72c libdrm: don't detect components that have been disabled
5fc5996 buildhistory: Fix regex to handle versions without spaces
7c3d4c0 debian: Fix superfluous setting for RPROVIDES
2eba066 autotools: Fix interaction with bitbake -b
9c8fee9 autotools: Correct dependency search logic error
971fafb maintainers.inc: include libjpeg-turbo and mmc-utils
4e0b334 scripts/runqemu-internal: Work around qemux86 PAT bugs in linux 4.4.1
283a302 sanity: Bump minimum version to 1.29.0
1c2d632 bitbake: Bump version post release to 1.29.0
a12dcc4 base.bbclass: fix support for gitsm://
bc72f64 linux-yocto: Update SRCREV for genericx86* for 4.4
be89a1d linux-yocto: Update SRCREV for genericx86* for 4.1
4a8d20a poky: update qemu* to prefer 4.4 kernel
d255f4f linux-yocto/4.1: galileo backports and support
fdcb373 linux-yocto/4.1: update to v4.1.17
5688cab linux-yocto/4.4: update to v4.4.1
f9f93ae bitbake: cooker: gracefully shutdown parsers
1f7f077 bitbake: buildinfohelper: unset brbe variable when build finishes
9a6cb10 nativesdk-buildtools-perl-dummy.bb: Fix variable expansion in python code
5e978d7 classes/testsdk: do_testsdkext avoid STAGING_DIR/BASE_WORKDIR in PATH
f56e9aa freetype: update 2.6.2 -> 2.6.3
1ba1aa3 freetype: minor formatting improvements
0d5e611 piglit: upgrade SRCREV
72c6b62 libbsd: Security fix and update 0.8.2
78be954 gstreamer1.0-plugins-bad_git: fix gst_structure_get() etc compiler warnings
fdd8979 gstreamer1.0-plugins-good_git: fix gst_structure_get() compiler warning
a23a50e python-setuptools: Add python-compile on RDEPENDS
914ff14 qemu: Security fix CVE-2016-2198
0938353 qemu: Security fix CVE-2016-2197
1f3e1d1 curl: add PACKAGECONFIG options for less common / legacy protocols
19045ba toaster: tests Remove symlinks from toasteruitest folder
738a9b7 classes/sanity: check_perl_modules provide output when fail
e64ce73 oe-selftest: devtool: add another devtool add test
a5095d1 recipetool: create: set S when we set SRC_URI from local git repo
ca5a36c recipetool: create: convert http git URLs that don't end in .git but contain /git/
4c71afb recipetool: create: ensure URL parameters don't make it into the name
86f3464 devtool: add: fix adding from a local source directory
fa50153 devtool: modify: make -x the default behaviour
f767757 recipetool: create: determine name/version from github/bitbucket URLs
d94c7e3 recipetool: create: support cmake find_library directive
ddfe744 devtool: commit for extra tasks that modify source when extracting
e36cb6c classes/externalsrc: create symlinks for workdir and logs
20034c3 classes/externalsrc: disable rm_work when active
c38f253 uninative.bbclass: capture stdout/err from patchelf-uninative
9065222 db: update HOMEPAGE
f0d5478 mdadm: update to version 3.4
79d5041 iproute2: update to version 4.4.0
21e3b2a image_types_uboot: add cpio.gz.uboot to supported IMAGE_TYPES
6fab5fc recipetool.newappend: add -e/--edit argument
252f97e liburcu: Add nios2 support
e72ab70 strace: Fix build for arc, metag, nios2, or1k, tile
691277f udhcpc: specify full path for ip command calls
f141f0b alsa-lib: avoid including <sys/poll.h> directly
a1ad3d0 oprofile: Add nios2 support
fd7dd07 nspr: Add nios2 support
954dc45 guile: Fix nios2 support
611e3d8 binutils: Repair nios2 PLT and GP handling
027eac5 gstreamer1.0-meta-base: make gstreamer1.0-plugins-base-alsa conditional
056d82c curl: drop obsolete pkgconfig_fix.patch
0e62f01 iproute2: update to version 4.4.0
216e618 quota: update to version 4.03
25d2956 oeqa/selftest/sstatetests.py: check that PARALLEL_MAKE doesn't change signatures
2966016 bitbake.conf: remove unused ALLOWED_FLAGS
3bdeda5 libproxy: remove GPLv3 logic and spurious exports
86994fd libproxy: add PACKAGECONFIG control for gnome3
033d754 libproxy: replace PACKAGECONFIG equivalent with the real thing
e65a29e openssh: Properly skip ptrace test if tools are missing
e1a1e0b openssh: Fix regex that sets sftp-server path for tests
d7faf67 insane.bbclass: Support MicroBlaze with musl
9937c93 hdparm: Explicitly set EXTRA_OEMAKE as required
7475c4c qemu: Security fix CVE-2016-1568
4857511 xserver-xorg: Add PACKAGECONFIG for crypto libraries
34798fa mesa: upgrade 10.6.3 -> 11.1.1
7edea7c initrdscripts: fix mmc device as install target
c3ef2bb libsoup-2.4: Remove unnecessary gnutls dependency
04454b2 wpa-supplicant: Only depend on libgcrypt when needed
4de0ee6 systemd: Don't depend on gcrypt unnecessarily
0da96bf buildstats.bbclass: remove dead URL from comment
326592d Remove obsolete references to exmap
a0cc1c3 curl: update 7.47.0 -> 7.47.1
a0d3eb9 sign_package_feed.bbclass: fix task dependencies
8cb1e83 oe/gpg_sign: fix incorrect variable name
902a68f meta/conf/layer.conf: adapt to more flexible initramfs-framework RDEPENDS
5b2b343 tune-corei7.inc: tell qemu to emulate a matching processor
5b70ee4 pixz: fix upstream version check
62a6f97 webkitgtk: update to 2.10.7
1cd6912 libwnck3: update to 3.14.1
e53eef9 iso-codes: update to 3.65
30cf8aa bash-completion: fix upstream version check
8098256 gstreamer1.0: fix upstream check for unstable versions from git
c24b0ab ffmpeg: update to 2.8.6
9237097 python: merge python-elementtree into python-xml
5ac4172 piglit: add missing dependency on python-xml
4d3ca42 systemd: tighten timesyncd and journal-gateway user accounts
6be3031 systemd: extend PACKAGECONFIG flags
85728ec systemd: rename systemd-zsh to systemd-zsh-completion
22a2866 systemd: move some tools into systemd-extra-utils package
9909104 classes/useradd: handle whitespace only USERADD/GROUPADD/GROUPMEMS
e485686 systemd: realign packages list
41d0f83 systemd: move bash completion into separate package
9a80afd nettle.inc: drop duplicate LIC_FILES_CHKSUM and SRC_URI hashes
72ec267 gdb: drop unnecessary CC_FOR_BUILD etc exports
00d6b67 gdb: build fix for MIPS + musl libc
40e4e8c strace: build fix for MIPS + musl libc
299b426 uclibc: fetch from master branch not 1.0
4ac4d28 uclibc-ng: Bump up to 1.0.12 release
70bfd4c musl: Upgrade to tip of tree
d1496b4 e2fsprogs: Fix multiple xattr handling
9d4b526 cdrtools-native: Explicitly set EXTRA_OEMAKE as required
864797a oeqa/prservice: Fix whitespace problem
7cd8351 pseudo: uprev to 1.7.5
246b02e ptest-runner: Explicitly set EXTRA_OEMAKE as required
7932525 unzip: Explicitly set EXTRA_OEMAKE as required
4ef055c sysklogd: Explicitly set EXTRA_OEMAKE as required
625066b stat: Explicitly set EXTRA_OEMAKE as required
07e81c8 pigz: Explicitly set EXTRA_OEMAKE as required
936223b iputils: Explicitly set EXTRA_OEMAKE as required
1e3fdbb ed: Explicitly set EXTRA_OEMAKE as required
ef36b6f gptfdisk: Explicitly set EXTRA_OEMAKE as required
59ee206 dmidecode: Explicitly set EXTRA_OEMAKE as required
d17758a libacpi: Explicitly set EXTRA_OEMAKE as required
44e8d0f apmd: Explicitly set EXTRA_OEMAKE as required
961d898 perl: Explicitly set EXTRA_OEMAKE as required
ecb9c34 oeqa: Improve test failure messages
ae2f3a3 sstate: Ensure populate_lic sstate objects are cleaned
26f26e5 package_deb: Ensure allarch deb packages aren't target specific
b3a2065 base: Make do_cleansstate nostamp
37357ab classes/testimage: Fix exportTests function.
f895a61 classes/testsdk: Add help information on how to run tests.
e22fbce oeqa/sdkext/devtool.py: Add location test to ensure that devtool is the eSDK one.
92d0cc5 oeqa/sdkext: Add devtool basic tests for eSDK.
a619ea2 oeqa/oetest: Fix compatibility SDK tests using eSDK.
062dbd6 classes/populate_sdk_ext: Add SDK_EXT_TARGET_MANIFEST and SDK_EXT_HOST_MANIFEST
4cfdf17 testsdkext: Add skeleton for support Extensible SDK tests.
5580d7b classes/testsdk: Add compatibility SDK testsuite to eSDK
7181da7 oeqa/oetest: oeSDKTest when run a command redirect env output to null
f3c2ce2 classes/testsdk: Add function run_test_context
3577c35 oetest.py/TestContext: Move loadTests and runTests inside it.
8009418 testimage/testsdk: Move get test suites routine inside TestContext.
b588b80 testimage/testsdk: Modularize TestContext.
59791d1 toolchain-shar-extract.sh: Add proxy variable to new env.
abd8158 classes/testsdk: Add call to export_proxies on testsdkext.
42f2ac4 classes/testsdk: Add testsdkext task only install.
90590ab get_test_suites: Add sdkext type for load test suites.
2ecc319 populate_sdk_ext: Set TOOLCHAINEXT_OUTPUTNAME.
7b459be classes/testimage: Add defeault inherit for testsdk.
24326a9 classes/testsdk: Add new class testsdk.
3d1d30b testimage: Modularize helper functions for get test lists.
8b5ee36 bitbake.conf/base: Improve handling of SRCPV
947e526 oeqa: setup bitbake logger after tinfoil.shutdown
400f530 bitbake: build: Improve python execution tracebacks
aece748 bitbake: build/data: Don't expand python functions before execution [API change]
e39cfb1 bitbake: cooker: Don't expand python functions in variable dumps
f652b6b bitbake: data: Don't expand python functions for variable dependencies
d3e0c44 bitbake: data_smart: Avoid expanding anonymous python functions
e0eb2ea bitbake: toaster: models Remove manual transaction control from lsupdates
48622e1 bitbake: toaster: build section Improve display of builds when > 1 targets
4d0ba0f bitbake: toaster: templates make build data breadcrumb consistent
99184d7 bitbake: BBHandler/ast: Merge handMethod and handleMethodFlags
6ba69b4 bitbake: utils: Drop datastore function inspection during exception
f8a44b1 bitbake: cooker: extended dot styling
30c132b bitbake: toaster: Enable Image Customisation feature
5e14a8f bitbake: toaster: xhr_customrecipe_packages Add dependencies to included packages
749f5a6 bitbake: toaster: orm generate_recipe_content only exclude locale packages
6269411 bitbake: toaster: customrecipe page Add last successful build link and conditionals
8d5b61e bitbake: toaster: models Add update_package_list for CustomImageRecipe
86db0bd bitbake: toaster: orm Add last_updated field to CustomImageRecipe
18d8b17 bitbake: toaster: models add get_last_successful_built_target method
8885b7b bitbake: toaster: pkg_dependencies_popover just show direct dependencies
40f6eff bitbake: toaster: models add all_depends method for Package_DependencyManager
a8ab1c6 bitbake: toaster: buildinfohelper CustomImagePackage update dependency info
0fee829 bitbake: toaster: newcustomimage_modal add frontend name validation
cb6d290 bitbake: toaster: API CustomImageRecipe check the recipe name supplied is valid
5634a25 bitbake: toaster: views CustomRecipe API add size information to the package lists
6fbceb0 bitbake: toaster: models Invalidate ToasterTables cache when a m2m field changes
998f9af bitbake: toaster: customrecipe Add dependency tracking to package selection
9976e4f bitbake: toaster: tables move template logic into the pkg_add_rm_btn
d77c247 bitbake: toaster: CustomImageRecipe generate overwrite IMAGE_FEATURES
481dc11 bitbake: toaster: make locale packages uneditable in custom image page
a757d39 bitbake: toaster: include locale and packagegroup packages in custom image
baac458 bitbake: toaster: update custom image package table filters
efbffe3 bitbake: toaster: move recent builds query to model
b514785 bitbake: toaster: update customimagerecipe migration
df58f5b bitbake: toaster: add merge migration to resolve conflict
38f4913 bitbake: toaster: orm generate_recipe_file_contents Handler for require recipe
769017e bitbake: toaster: project builds Poll the server to get latest progress for build
971d65c bitbake: toaster: localhostbectrl Update the dirpath of customrecipe's base layer
6d9f342 bitbake: toaster: tables Check layer presence in project for customise_btn
76c0008 bitbake: toaster: toastergui tests Add addtional data to the setUp for new tables
70a078e bitbake: toaster: tables SelectPackagesTable rename recipe_id to custrecipeid
7e4c231 bitbake: toaster: toastergui tests Update package test to use CustomImagePackage
4b3c9d6 bitbake: toaster: customrecipe Add further front end features using new API
b213907 bitbake: toaster: xhr_customrecipe_packages add GET info for package response
a9668ee bitbake: toaster: xhr_customrecipe_id change to use CustomImagePackage
439314c bitbake: toaster: API allow CustomImageRecipe to be updated after creation
9ea4de6 bitbake: toaster: tables Change SelectPackagesTable to use ProjectPackage
20f400b bitbake: toaster: tables add recipe download link to CustomImagesTable
1c9ce1c bitbake: toaster: newcustomimage_modal use libtoaster method for new CustomRecipe
8b1d043 bitbake: toaster: libtoaster Add createCustomRecipe method
32048fa bitbake: toaster: orm Add convenience method to get all pkgs in a CustomImageRecipe
c80b7df bitbake: toaster: orm get_project_layer_versions to return layer_version objects
796e348 bitbake: toaster: toastergui tests Add unit test for download custom recipe
04d8c94 bitbake: toaster: toastergui tests Update to reflect changes to CustomImageRecipe
4e8a0aa bitbake: toaster: views xhr_customrecipe_packages clean up API
66b5608 bitbake: toaster: toastertable remove title from Show all in table
ce72896 bitbake: toaster: Add recipe details page
5f52614 bitbake: toaster: newcustomimage Move modal dialog out of newcustomimage template
2a3dd32 bitbake: toaster: Continue front end features to custom image recipe page.
d6e7e4a bitbake: toaster: tables Add table for Packages and update SelectPackagesTable
43f0a05 bitbake: toaster: views Add view to download custom recipe
2cf55af bitbake: toaster: move CustomImageRecipe generation to API entry point
c402ac2 bitbake: toaster: orm add CustomImageRecipe generate contents function
a6e4f94 bitbake: toaster: buildinfohelper Add the concept of CustomImagePackage
e1bfe1c bitbake: toaster: orm: Add db migration for new CustomImagePackage table
f760a78 bitbake: toaster: orm Add CustomImagePackage table
4117af2 bitbake: toaster: orm: Add db migration for new CustomImageRecipe inheritance change
1f10289 bitbake: toaster: orm make CustomImageRecipe inherit from Recipe
648753b bitbake: toaster: orm Add sum of dependencies size function to PackageDependencyManager
a92fc30 bitbake: toaster: tablejs Add an event handler to manually trigger a data reload
4c82878 bitbake: toaster: ToasterTables simplify filter function move common part to widget
3e1e8e6 bitbake: toaster: models fall back to a sensible string for no vcs reference
14d09c8 bitbake: toaster: localhostbecontroller CustomRecipe now base_recipe is Recipe
7d5d8d0 scripts/lib/bsp/engine: trailing whitespace cleanup
dfeda17 scripts/lib/bsp/engine: fix path separator
d482d84 maintainers: remove gtk-theme-torturer and gnome-mime-data
d0d85a4 bitbake: bb/fetch2: Move export_proxies function from wget to utils.
7226ce2 glibc-locale: fix QA warning
4a2f42f formfactor: add machconfig for Beaglebone
eb53c54 sstatetests: Fix after change to sstate populate_lic SWSPEC
a43b9ef gstreamer1.0-plugins-base: move freetype dependency into 1.6.3 recipe
fb4f05b gstreamer1.0-plugins-base_git: update to git master 1.7.1-79-g6414289
fc81c80 gstreamer1.0-plugins-bad_git: avoid including <sys/poll.h> directly
3f02474 gstreamer1.0-plugins-good_git: avoid including <sys/poll.h> directly
9b0a74a gstreamer1.0: avoid including <sys/poll.h> directly
f9e565e gmp_4.2.1: fix build for MIPS
6d570c8 gmp.inc: limit ARM_INSTRUCTION_SET over-rides to armv4/armv5
3aecdd9 gmp: move BBCLASSEXTEND = "native nativesdk" from gmp.inc into 6.1.0 recipe
263a65d gmp: move SRC_URI out of gmp.inc + minor reformatting
aacae25 image_types.bbclass: Embed IMAGE_NAME in ubinize config file
9c0d4ec toolchain-scripts: drop PYTHONHOME
6560f80 python: set PYTHONHOME for nativesdk
92ae4e2 gcc: musl related fixes for ppc/secure-plt and gthr
9e5222c gcc: Assume libssp and dl_iterate_phdr on musl
281bd41 security_flags: wipe security flags for gcc/glibc and related libraries
61a5875 security_flags: use -fstack-protector-strong
a07f2fd security_flags: ensure security flags only apply to target builds
8d57d1d gcc: Fix build on musl with -fstack-protector
eb134c6 isoimage-isohybrid.py: fix cpio working directory
8bedf76 glib-2.0: use the system libpcre
1ae132e libpcre: enable unicode properties by default
3adb8d5 python3: remove optimize by default patch
1df1ac9 security_flags.inc: don't do -pie for syslinux
562c75c neon: convert to PACKAGECONFIG
6228cf8 bitbake: toaster: reinstate ID on edit columns button
916c73d bitbake: cooker: shutdown cooker parser on shutdown
8857498 bitbake: fetch2/osc: Clean up old variable syntax
54da829 bitbake: fetch2/osc: Remove hardcoded url
c57ba52 cross-localedef-native: add ABI breaking glibc patch
0cc825f uninative: Improve error handling
576a248 patchelf: Add patch to handle large files
bbdbe00 package_manager.py: fix python indentation bug (opkg)
ea40a0b populate_sdk_ext: Make populate_sdk_ext depend on sdk_extra_conf
4f7656a populate_sdk_ext: Add support for a "minimal" type
71bb332 populate_sdk_ext: Don't set sdk_update_targets in the config
5b7a43e toolchain-scripts.bbclass: Use PYTHONPATH instead of PYTHONHOME
f1f8447 copy_buildsystem.py: Pass the nativelsb argument to gen-lockedsig-cache
b130805 gnome-mime-data: remove
12d5fa8 gtk-theme-torturer: remove from oe-core
659d755 openssl.inc: drop obsolete mtx-1 and mtx-2 over-rides
32b498c scripts/devtool: Add getVarFlag expand argument
ed5daa1 bitbake.conf/native/nativesdk: Set PKG_CONFIG_SYSTEM_ at top level
8fa2d52 pango: unset LDFLAGS when building gen_all_unicode
edfaa04 pango: merge bb and inc
00ccf51 e2fsprogs: Ensure we use the right mke2fs.conf when restoring from sstate
66a6ec2 nativesdk: Set PKG_CONFIG_SYSTEM_ variables
34e95b0 local.conf.sample.extended: Document HOW-TO enable systemd or busbox for init system
077d32e local.conf.sample: Remove trailing whitespaces
6ae662a bitbake: parse/ast: Mark anonymous functions as python functions
9913fd8 bitbake: codeparser: Improve handling of data.expand() dependencies
4628fe1 bitbake: lib/bb: Add expansion parameter to getVarFlag
b98866d bitbake: fetch2/gitsm: Fix when repository change submodules
390c2c1 bitbake: data_smart: Add missing expand parameter to getVar call
56454f6 bitbake: bitbake: prserv: do not clear umask when daemonizing
abf8a8f bitbake: bitbake: prserv: SIGTERM handling hung process
be032fc bitbake: bitbake: prserv: -wal and -shm sqlite lost when daemonizing
1e95ebd poky-tiny: Use musl for default system C library
6594bd5 maintainers.inc: Set me as Maintainer of QEMU.
86851d5 insane: Fix populate_sysroot sanity test path
d09a25e socat: upgrade to 1.7.3.1
fad264b libffi: move from recipes-gnome to recipes-support
d3753dd libffi: ensure sysroot paths are not in libffi.pc
c72614b syslinux: remove LDFLAGS manipulation
8ad11fc lttng-tools: Fix ptest installed la files
66ed16b gnutls: update 3.4.8 -> 3.4.9
149cb17 python-distutils: add missing dependency on python-email
3473962 nss-myhostname: Fix build on musl
42e37d7 linux-firmware: update to latest revision 52442afee
ce1bed7 license.bbclass: add LICENSE_CREATE_PACKAGE to perform_packagecopy vardeps
e43504b i2c-tools: point SRC_URI at Yocto source mirrors
2d7622c gnutls.inc: allow libidn support to be controlled via PACKAGECONFIG
60ebe1c gnutls.inc: add gmp to DEPENDS
935aa96 gnutls.inc: minor formatting improvements
3fa1c54 Revert "kernel/kernel-arch: Explicitly mapping between i386/x86_64 and x86 for kernel ARCH"
0b82af2 wic: isoimage-isohybrid: check for syslinux-native
9699441 formfactor: add machconfig for qemumips64
4701dc9 ncurses: use closing curly brackets in FILES_${PN}-tools variable
9d9f233 util-linux: Change ALTERNATIVE_PRIORITY above busybox
8f2306c mktemp: lower the priority of standalone mktemp package
6251846 libxsettings-client: drop obsolete disable_Os_option.patch
7894633 wic: default to empty bootloader config
090fb51 copy_buildsystem: add ability to exclude layers
8dc600f toaster.bbclass: reinstate scan for artifacts in the sdk directory
eee675b toaster.bbclass: attach image file scan postfunc to do_image_complete
0c0b072 meta: add ASSUME_PROVIDED dependency on wget-native for http fetches
f926610 gtk+3: Tweak getVar to use True, not 1
7fa6eeb classes/lib: Add expand parameter to getVarFlag
252e645 python-pycurl: remove unnecessary exports
9fd214d sstate: Fix SSTATE_SWSPEC only used by populate_lic tasks
4ea6a64 package.bbclass: Add data expansion to do_split_packages()
6ab5001 busybox/gtk/perl/base-passwd: Ensure data is correctly expanded
e8860f7 ref-manual: Fixed typo in FAQ 14.15 section.
9d2925e ref-manual: Updated FAQ entry regarding Proxy for SOCKS
29a44da ref-manual: Fixed type in LICENSE_CREATE_PACKAGE variable description
4181e58 ref-manual: Updated warning regarding libexecdir
0d8bd7d ref-manual: Added description for LICENSE_CREATE_PACKAGE variable.
6aca5b8 ref-manual: Added remove-libtool class
5e2201e toaster-manual: Updated the "Installation" to have TOASTER_DIR information
3aa162a p11-kit: fix packaging warnings
60c9759 piglit: don't use /tmp to write generated sources to
b33e440 libical: Work around hardcoded paths in pkgconfig file
a131b6e documentation.conf: align the documentation for DEBUG_OPTIMIZATION and FULL_OPTIMIZATION with bitbake.conf
974a8c0 pciutils: Explicitly set EXTRA_OEMAKE as required
2d3e6f3 openssl: Explicitly set EXTRA_OEMAKE as required
b07e161 dbus: add user sessions support
877eae1 dbus: use ${systemd_system_unitdir}
6010088 populate_sdk_ext: Add SSTATE_MIRRORS to config blacklist
70ec867 insane: add test for -dev packaging containing real libraries
38d6f1f python3: set INSANE_SKIP as libpython3.so is a trampoline library
4ac4023 p11-kit: fix module packaging
9a27010 libnl: package the libnl-cli modules in libnl-cli
111af1d remove-libtool: add new class
333dce4 gtk-immodules-cache.bbclass: fix immodules-cache path
b1e41f4 Revert "matchbox-keyboard: export GTK_IM_MODULE_FILE location"
ac1f311 directfb: use Yocto source mirrors for SRC_URI
4d80f7a gcc-configure-common.inc: drop --enable-target-optspace from configure
654eddc machine/include: drop tune-cortexm*.inc and tune-cortexr4.inc
322015a liboil: drop recipe from oe-core
41d50f9 boost: Fix build on soft-float ABI arm systems
07a91a6 libnss-mdns: Check for nss.h before using
1b34f55 db: Use cross libtool
64089c6 libtool-cross: Unset pre|post dep objects
457f417 docbook-xsl-stylesheets: create a link for easy refer
1ba62f9 pth: Remove dead code
a4a5d1f3 bitbake: cooker, bitbake-worker: Fix spelling of "received"
8f6b9c7 bitbake: cooker: Only start as many parse threads as we need
602da7c bitbake: knotty: Don't show errors for universe provider issues
1dd2d76 linux-yocto: Adds new genericx86 and genericx86-64 SRCREVs for kernel 4.4
b8fa9d3 poky: Add poky-world-exclude.inc and add qwt-as
5503a22 sstate: Revert using -m option to tar in sstate
6023798 libarchive-native: Disable libxml2 support
b09b054 pcmciautils: Fix makefile race
89df5f1 binutils: Use target provided zlib
c85c54f binutils: Upgrade to 2.26
ba2fdcd native.bbclass: Set CXXFLAGS from BUILD_CXXFLAGS not BUILD_CFLAGS
2394b15 gstreamer1.0-plugins-base: Add video crop supporting when convert frame
2724908 gstreamer1.0-plugins-bad: Fix memory leak of navigation thread
db81fc9 lib/oe/package_manager: remove package feed lists
c43da12 externalsrc: use shared CONFIGURESTAMPFILE if B=S
c6b8227 Make sure that the directory for CONFIGURESTAMPFILE exists
ca06179 autotools.bbclass: use oe_runmake instead of ${MAKE}
f4f9f2f gcc, qemuppc: Explicitly disable forcing SPE flags
691f7e4 pango.inc: misc dependency fixes
70efb8d pango.inc: limit ptest specific do_compile_prepend to target builds
c1273d4 systemtap_git.inc: do not immediate expand SELECTED_OPTIMIZATION
e631be2 glibc.inc: do not immediate expand SELECTED_OPTIMIZATION
770d9ff mkelfimage: fix target cflags leaks to host
c936bf0 base: Move COMPATIBLE_MACHINE out the scope of SOURCE_MIRROR_FETCH
3072361 bitbake: bitbake: BBUIHelper: Remove function findServerDetails
28c041c bitbake: fetch2: Simplify logic in verify_checksum()
5375e64 bitbake: bitbake: Set process names to be meaninful
5b234d1 bitbake: utils: Add ability to change the process name
0b06924 bitbake: data.py: avoid double newlines at the end of functions in emit_var()
68600ae bitbake: build.py: minor shell_trap_code() formatting tweaks
423a264 conf/distro/poky.conf: use example.com for connectivity check
6c058ce curl: update 7.46.0 -> 7.47.0 ( CVE-2016-0754 CVE-2016-0755 )
adbe63d openssl: update 1.0.2e -> 1.0.2f ( CVE-2016-0701 CVE-2015-3197 )
85b6679 autotools.bbclass: don't create subshell to delete configure scripts
2f1bcc1 sstate: Add back packagedata on packagedata dependencies
346b225 libical: update to 2.0.0
b696bb3 kexec: package kdump init script/configuration file correctly
51cebbf connman: fix crash with iptables 1.6
7f54fab autotools_stage.bbclass: remove it
07c4bc1 gdb-common.inc: add PACKAGECONFIG for readline
5869e35 tzdata: update to 2016a
c9cc707 tzcode: update to 2016a
aff2f58 glibc-testing.inc: drop pruning of PATCH_GET from the testglibc script
dfb9d41 gcc-cross.inc: drop pruning of PATCH_GET from the testgcc script
9e7d929 bitbake.conf: stop exporting PATCH_GET = "0"
5410aff sstate: Improve handling of useradd dependencies
9823802 gtk-icon-utils-native: Drop problematic dependency
6c04e0d glib.inc: limit ARM_INSTRUCTION_SET over-rides to armv4/armv5
83476b5 glib-2.0: drop add-march-i486-into-CFLAGS-automatically.patch
fab76ae glib-2.0: refresh configure-libtool.patch
593dcd4 systemd: fix systemctl enable script for template units
3c90507 glib: use bash-completion.bbclass
d88ed5d kmod: use bash-completion.bbclass
0f3780c git: use bash-completion.bbclass
9d20661 util-linux: use bash-completion.bbclass
0e5b0bf dbus-glib: use bash-completion.bbclass
9cddc0a bash-completion.bbclass: add class
ddb786c bash-completion: move in recipe from meta-oe
74e2f68 ffmpeg: add a recipe, and remove the libav recipe
eb7e554 lib/oe/patch: Make GitApplyTree._applypatch() support read-only .git/hooks
3ed566e gcc: fix hidden weak symbols by removing buggy gcc patch
51d9ba6 dpkg: fix CVE-2015-0860
f80d16e qemu.bbclass: clarify QEMU_EXTRAOPTIONS
3dca294 pango.inc: drop obsolete dependency on qemu-native
a16e9a2f dbus: upgrade to 1.10.6
7081458 buildhistory: fix the check for existence of a git repo
d74325e connman: tidy up connman-conf usage
79f4495 connman-conf: convert to systemd oneshot
5c35883 bitbake-whatchanged: avoid double do_ task name prefix
7881c02 netbase: add ipv6 host to /etc/hosts
93fcee6 linux-yocto/4.4: CVEs and preempt-rt update
07c182f linux-yocto/4.1: update to 4.1.16
7003698 gstreamer1.0-plugins-bad: fix compiler warnings with -Os in 1.7.1
6e90145 gstreamer1.0-plugins-good: fix compiler warnings with -Os in 1.7.1
3cd70c8 libsoup-2.4: add glib-2.0-native dependency
d5b3b97 libtirpc: remove stray .orig file from Use-netbsd-queue.h.patch
209066c ptest-runner: Add ptest-runner_2.0 recipe.
4953e26 musl: Upgrade to tip of tree
52413d0 libdrm: Refresh patch to match upstream submission
66e215f fts: Correct LIC_FILES_CHKSUM
be4c446 pth: Delete
df95988 elfutils: Fix build with uclibc/musl
047ad2c grub: Backport fix for largefile detection/use
956be0c oeqa/runtime/rpm: be more verbose if test_rpm_query_nonroot fails
3b5288f libc-package.bbclass: add LOCALE_UTF8_IS_DEFAULT
4f3ef90 ref-manual: Updated the BBMASK variable description.
b2b7214 dev-manual: Restored ptest-runner2 to ptest-runner
d484e58 ref-manual: Removed obsolete do_deploy statement from "Shared State"
7705b87 toaster-manual: Updated instructions for production setup.
4b4a8a6 ref-manual: Updated the SDK figure.
d7481ce ref-manual: Added do_image and do_image_complete tasks
d39e9d1 ref-manual: Rewrite of "Image Generation" and devtool text.
1e7735e ref-manual, mega-manual: Updated the Image Creation figure
fded4fa ref-manual: Updated configuration of auto.conf in closer look
9f192c8 dev-manual: Updated the devtool help examples.
4bbd39d dev-manual: Grammar fix to kickstart section.
75078dd dev-manual: Updated wic reference section
9ed7881 poky-ent: Grouped Fedora perl packages for niceness
3ac0416 local.conf.sample.extended: Update the info about BBMASK
d61d290 bitbake: bitbake-user-manual-ref-variables: Update the help for BBMASK
a948f52 bitbake: cooker: Allow BBMASK to contain multiple regular expressions
e82101a bitbake: bitbake-user-manual-metadata: Updated 'dir' flag
100d6c2 bitbake: bitbake-user-manual: Updated the example BitBake directory
11be341 documentation.conf: Update the help for BBMASK
3d2c0f5 cmake: update to 3.4.2
4364850 at-spi2-core: update to 2.18.3
c763940 webkitgtk: update to 2.10.5
1e95815 libsecret: update to 0.18.4
9259a43 freetype: update to 2.6.2
5ec6dbb gdk-pixbuf: update to 2.32.3
9c84fbc glib-2.0: update to 2.46.2
bd7278c gtk+3: update to 3.18.6
d609cd5 gtk+: update to 2.24.29
6197313 gtk-icon-utils-native: update to 3.18.6
1556f0e libsoup-2.4: update to 2.52.2
dff038a waffle: update to 1.5.2
89bd19f vala: update to 0.30.0
6c02099 rxvt-unicode: update to 9.22
245af2b btrfs-tools: Disable backtrace on musl
fa01d37 bsd-headers: Fix LICENCE and dev package RDEPENDS
05e11a5 gdb: Fix build failures on musl
72c1aa2 ltp: Add rdep on ldd
1d0332d argp-standalone: Fix build when S != B
9f22898 bitbake: fetch2/wget: fallback to GET if HEAD is rejected in checkstatus()
d11cc29 busybox: fix stop -vs- start typo in rcS script
9f4b088 mtools: keep v3.9.9 recipe in sync with the v4.0.18 version
2c14be3 gen-lockedsig-cache: fix bad destination path joining
9dea876 distutils-common-base: do not set PACKAGES - use defaults from bitbake.conf
4ead707 insane: remove unused variable assignment
44e9c3b meta: fix capitalisation in Upstream-Status
06b4572 pixman: only check even upstream versions
0f74387 gcr: check only even upstream versions
a2848ee avahi: Add patch to fix Win10 mDNS issues
04ef34f xf86-input-libinput: initial add 0.16.0
8a2dfa1 image.bbclass: check INITRAMFS_MAXSIZE
962cc37 systemd: make TEST_DIR configurable
9967746 bind: update to 9.10.3-P3
cac47db uninative: handle UNINATIVE_URL being file:///
9995814 uninative: fix path to patchelf-uninative
2495dfa scripts/wipe-sysroot: also delete uninative sysroot
bb97157 meta/lib: new module for handling GPG signing
aadb879 devtool: extract: use the correct datastore for builddir
fa801e7 busybox: backport upstream truncate open mode fix
6996b26 gstreamer1.0-plugins-base.inc: drop obsolete dependency on liboil
1c4a8cc e2fsprogs: disable blkid
0de8766 pango.inc: drop obsolete FULL_OPTIMIZATION over-ride
89a7ed5 devtool: add configure-help subcommand
84720c8 devtool: properly handle bb.build.FuncFailed when extracting source
c3f0f7b devtool: add: warn if modified recipe found in attic directory
e559b66 devtool: build-image: allow specifying packages to add to image
e00eac8 devtool: move edit-recipe to a separate module
6720bda image: Don't create tasks with '.' in the name
88ca227 rootfs-postcommands: fix allow-empty-password on read-only rootfs
fdac363 kernel: Clean DEPLOYDIR before do_deploy runs
c2231de gcc-cross-canadian: Add missing DEPENDS on virtual/${HOST_PREFIX}gcc-crosssdk
5fdedb6 libtirpc: Drop unneeded xz-native dependency
7a98fb7 libuser: Drop unneeded xz-native dependency
72f98ba bitbake: toaster: Update UI test runner
c192bd6 Revert "xz: Allow to work with ASSUME_PROVIDED xz-native"
6df607b acpid: upgrade to 2.0.26
7a52f67 build-perf-test.sh: add eSDK testing
5c367ec build-perf-test.sh: more generic timing function
44fee2b python3-pip: Upgrade to 8.0.0
9d95a9d orc: update HOMEPAGE
0c1c93e gstreamer1.0-plugins.inc: drop obsolete ${S}/po/Makefile.in.in workaround
be145ad busybox: Add support for busybox-init
716fa93 pulseaudio.inc: drop obsolete dependency on liboil
55bfaa2 sqlite3: update 3.10.0 -> 3.10.2
6bb1dd1 sqlite3.inc: add PACKAGECONFIG to support building against libedit
39f6a9e sqlite3.inc: dynamically link the sqlite3 command-line utility
9b2835e sqlite: formatting improvements, move more stuff into sqlite3.inc
89ed462 sqlite3.inc: drop obsolete config_BUILD_CC, etc exports
6188419 sqlite3.inc: fix readline PACKAGECONFIG
939de8d sqlite3: fix the parallel build fix patch
a304b82 weston: Add missing DEPENDS on wayland-native
4a5458f bitbake: fetch2: Don't show checksum warnings if a single checksum was supplied
e66599f uninative: Fix conflicts with normal sysroot
4833bee insane: Drop do_stage test
861c916 populate_sdk: Use pixz instead of xz
a1c35f3 lib/oe/sdk: Partially revert "sdk.py: fix conflicts of packages"
29c5eda uninative: Add fetch capability
b54fa25 pixz: Add 1.0.6
d47572d xz: Allow to work with ASSUME_PROVIDED xz-native
0aeb33f lib/oe/package_manager: prevent testing an undefined variable
c1f4e92 recipetool: create: better fix for fetch error handling
10c8d14 recipetool: create: fix extraction of name from URLs ending in /
b307e0a recipetool: create: extract SRC_URI from local git repositories
50e40fc devtool / recipetool: support specifying a subdirectory within the fetched source
7e1691d recipetool: create: strip quotes from values extracted from CMakeLists.txt
477fa84 gen-lockedsig-cache: copy correct native sstate into ext SDK
204e4ab toolchain-shar-extract.sh: improve behaviour when xz is not installed
979c8fb classes/populate_sdk*: add dependencies on script files
f220abc classes/populate_sdk_ext: drop ext-sdk-prepare.py when installing
b435225 devtool: add sdk-install subcommand
44d1a2a devtool: sdk-update: improve SDK update process robustness
3360baa devtool: sdk-update: improve temp directory handling
d193531 devtool: build: ensure pkgdata is written out
d3a4f72 classes/populate_sdk_ext: add option to bring in pkgdata for world
a9dfced linux-libc-headers: Port patches for linux-headers for musl
3cffa6d libsolv: Update to 0.6.17+
d9134cf glib-2.0: Fix locale location on musl
527cd95 syslinux: Set LD to avoid using build host ld
136db70 binutils: Fix gold linking errors due to unresolved R_ARM_MOVW_ABS_NC
704e342 puzzles: Silence warning on arm with clang
bee65f9 eglinfo: Fix build on raspberrypi
6296c0f mdadm: Fix build with musl
67eef11 gpgme: Define __error_t_defined on musl
368e838 console-tools: Fix header inclusion when not using glibc
5a8c935 uclibc: Update to 1.0.11
1113d58 unfs3: Depend on libtirpc when building on musl
2ecfc02 guile: Fix build with musl
2df08b8 bsd-headers: Package cdefs.h
29deaf0 musl: Create ld.so as a relative symlink
2d028b3 fts: Fix linker hash-style option
8dd1aa8 dosfstools: Correct cross-compile CFLAGS and fix build with musl
21550d1 nss: Undefine HAVE_SYS_CDEFS_H
92e6a7a apmd: Fix build with musl
5d661c5 pcmciautils: Fix parallel build and include sys/types.h
86795ff kexec-tools: Define _GNU_SOURCE for getting loff_t definition
ff8006f systemd: Skip parsing on musl based targets
f2856a1 oprofile: fix build with musl
226c450 portmap: Point to tirpc headers and libraries on musl
5512c2f nfs-utils: Disable tcp-wrappers for musl
06d0204 bsd-headers,musl: Add recipe for bsd missing features
c2c9202 tcf-agent: Implement canonicalize_file_name() for musl as well
f294813 chkconfig: Avoid using caddr_t
b2aca09 nspr: Drop older glibc code
c0976fc irda-utils: Fix header inclusions
a3f9721 iproute2: Fix build with musl
22333f0 libuser: Fix build when secure getenv is not there
ea9dc99 iputils: Use member based initialization for mrghdr struct
b207868 pax: Fix build with musl
1076499 tar: Fix build for musl based targets
e451023 rt-tests: Fix build with non-gcc compilers
68da390 webkitgtk: Fix build with clang/musl
da81635 console-tools: Include sys/types.h for u_char and u_short defs
205a07a sysklogd: untangle header inclusion maze
9f40dba babeltrace: Add missing header for MAXNAMLEN define
2458850 libunwind: backtrace APIs are glibc specific
abdfacb apt: Add support for building for musl targets
ec187d3 puzzles: Zero'ise structs before use
3cd0a8c dpkg: Add musleabi to known architectures
aaa8516 xinetd: Fix build with musl
93fb408 watchdog: Fix build with musl
7509ffd gzip: Fix build with musl
1d28cbc directfb: Fix build with musl
7b6b312 net-tools: Link with libintl on uclibc
ee1bfdb parted: Fix build with uclibc
ed5da2a mtools: Fix build with uclibc
5384f08 gnutls: Link with libuargp on uclibc
493e557 guile: Fix build with uclibc
1636f6f packagegroup-self-hosted.bb: Move glibc-gconv-ibm850 to glibc only case
3e7d7ab util-linux: Fix ptest builds on musl
77825f8 gnutls: Link with libargp on musl and depend on argp-standalone
1a6fe71 argp-standalone: Add recipe
a7d780c gdk-pixbuf: Fix latent build issue exposed by musl
f2cf5d3 xserver-xorg: Fix build with musl
b8de631 libcgroup: Add dependency on fts when building on musl
87c3e98 connman: include config.h for HAVE_STRUCT_IN6_PKTINFO_IPI6_ADDR
cc55fc7 fts: Add recipe
6e3950b tcp-wrappers: Fix build with musl
68f88a5 ppp: Fix build with musl
4972edd blktrace: Include <sys/types.h for dev_t
d629fa1 powertop: Include right headers for timval struct
063dc38 update-alternatives: when warning about alt_link==alt_target, say what PN
6baafa1 python-setuptools: Unify and upgrade python-setuptools and python3-setuptools to 19.4
f0e500e gstreamer1.0-libav: update git recipe to 1.7.1
90cbdfb gstreamer1.0-plugins-ugly: update git recipe to 1.7.1
6752484 gstreamer1.0-plugins-bad: update git recipe to 1.7.1
ad8f201 gstreamer1.0-plugins-good: update git recipe to 1.7.1
2ca9f20 gstreamer1.0-plugins-base: update git recipe to 1.7.1
3c7f2b8 gstreamer1.0: update git recipe to 1.7.1
7c810d0 gstreamer1.0-libav: update 1.6.2 -> 1.6.3
a4b8e9a gstreamer1.0-plugins-ugly: update 1.6.2 -> 1.6.3
8170e06 gstreamer1.0-plugins-bad: update 1.6.2 -> 1.6.3
497ebc9 gstreamer1.0-plugins-good: update 1.6.2 -> 1.6.3
3d87902 gstreamer1.0-plugins-base: update 1.6.2 -> 1.6.3
1e256ee gstreamer1.0: update 1.6.2 -> 1.6.3
dacf2aa gst-plugins-package.inc: drop perl RDEPEND for XXX-apps packages
676275f gstreamer1.0-plugins.inc: don't set base SRC_URI via python
852f098 gstreamer1.0-plugins.inc: drop obsolete lib-link.m4 workaround
a32ac26 gstreamer1.0-plugins-bad.inc: update hls dependency gnutls -> nettle
97e0752 gstreamer1.0-plugins-bad.inc: don't set ${S} or apply version specific patch
78e9361 gstreamer1.0-plugins-good.inc: remove duplicate --disable-examples
0edabfd gstreamer1.0-plugins.inc: convert GSTREAMER_1_0_DEBUG to a PACKAGECONFIG
81cd227 gstreamer1.0-plugins.inc: add missing glib-2.0-native dependency
a0b1e66 gstreamer1.0.inc: add missing glib-2.0-native dependency
e5fb79d gstreamer1.0-rtsp-server.inc: minor formatting improvements
434aa8e gstreamer1.0-omx: minor formatting improvements + update HOMEPAGE
69bcd33 gstreamer1.0-libav: minor formatting improvements + update HOMEPAGE
1d6e61a gstreamer1.0-plugins-ugly: minor formatting improvements
c45ce26 gstreamer1.0-plugins-bad: minor formatting improvements
c1ea981 gstreamer1.0-plugins-good: minor formatting improvements
beb8091 gstreamer1.0-plugins-base: minor formatting improvements
61f30b4 gstreamer1.0-plugins.inc: minor formatting improvements
981145a gstreamer1.0: minor formatting improvements
9f1a943 gst-plugins-package.inc: minor formatting improvements
9e08b69 gst-player: minor formatting improvements
a8ed2c8 valgrind: remove unused valgrind-remove-rpath.patch
e24123d emptytest: exclude from world builds
6808035 build-appliance-image: bump version to 14.0.0
eb418c3 insane.bbclass: fix package_qa_walk()
e185004 insane.bbclass: print all the QA messages
95fa36e weston: upgrade 1.8.0 -> 1.9.0
1bc0c89 wayland: upgrade 1.8.1 -> 1.9.0
03dae8e glib-2.0: fix the ptest
68c5e6d insane.bbclass:buildpaths: ignore ipkg/dpkg's CONTROL dir
258676b sstate: display the sysroot name when cleaning for clarity
f35b2e2 bitbake: set default libexecdir to $prefix/libexec
40f0c2d gawk: fix libexecdir/libdir/BPN confusion
2458f41 mesa: update SRC_URI
fdb12f9 e2fsprogs: set PV to 1.42.99+1.43+git${SRCPV}
9cf1ec0 valgrind: avoid neon for targets which don't support it
b191f58 valgrind: re-enable ARM intdiv and vcvt_fixed_float_VFP tests
b0b3412 valgrind: let valgrind determine its own optimisation flags
92abb5f meta/files/toolchain-shar-relocate.sh: Detect different python binaries and select one that exists.
924e2c3 python-nose: upgrade to 1.3.7
02440b5 python-native: Make python-native also RPROVIDE python-unittest-native
b7ca05d linux-libc-headers: update to 4.4
f73ee59 libpng12: upgrade to 1.2.56
3a59486 libpng: upgrade to 1.6.21
63a49f8 libtirpc: remove redundant va_list patch
55a8df2 perl: Upgrade to 5.22.1
a840588 oeqa/selftest/signing: use temporary rpmdb
65c1de9 kexec-tools: inherit update-rc.d
ba837f1 autotools: don't output the full config.log on configure failure
3e3cb62 bitbake.conf: Remove horrible variable expansion hacks
b963efb mesa: add missing wayland-native build dependency
9dd6c81 maintainers.inc: Correct maintainership for several packages
bd1a534 bitbake: toaster: run bitbake server with --read option
76a281c bitbake: taskdata: add the ability to access world targets list
11a1f49 bitbake: cache.py: check existence before add to cachedata.rproviders
05c1775 bitbake: taskdata.py: add RuntimeProviders to close matches
cf9cb65 bitbake: data_smart: Don't show exceptions for EOL literals
b80219e udev: Add 2 patches to support 4.4 kernel
1013385 gcc-runtime.inc: provide libquadmath
60b237f kexec: update supported architecture list
92a0032 strace: update 4.10 -> 4.11
0aa8169 strace: fix ARCH definition in tests/Makefile
2408149 strace: remove need for git-version-gen script
9ca6a5f strace: fix --disable-aio configure option
dd90f32 strace: drop unnecessary dependency on acl
aadae7b libnewt: Fix linking error due missing symbols
571289d lib/oe/package_manager.py: Remove list() from PkgsList class
6ebda8e lib/oe/rootfs: Use list_pkgs() instead of list()
03075f6 lib/oe/utils: Add function format_pkg_list()
c708411 lib/oe/package_manager: Add list_pkgs() to PkgsList class
113e136 python3: Minor upgrade 3.5.0 -> 3.5.1
918149d python-numpy: upgrade to 1.10.4
eae7584 swig: upgrade to 3.0.8
21f7677 python-scons: upgrade to 2.4.1
7721652 python-pycurl: upgrade to 7.21.5
2ef401f python-mako: upgrade to 1.0.3
2a608cc python-setuptools: Upgrade to 19.2
6395bc8 python3-setuptools: upgrade to 19.2
40738af python: Upgrade 2.7.9 > 2.7.11
35855a0 wic: pylinted ksparser module
e3b3bcf wic: add help for 'include' command
bfaabe5 wic: move parts of canned .wks into common.wks.inc
50a3dc5 wic: implement search of includes
15ea180 wic: refactor get_boot_config
d304162 wic: ksparser: add support for include
3fc6aaa wic: do not remove build dir in source plugins
8d34eea wic: use unique partition number
43b4058 wic: move wks parsing code to KickStart._parse
3860640 nss: update to 3.21
ea39ad0 libjpeg-turbo: fix upstream version check (sort of)
48a8a89 libical: fix upstream version check
c6f71c5 gnutls: update to 3.4.8
7a80f84 sysstat: fix upstream version check
2aabf9a pbzip2: update to 1.1.13
77aee28 ncurses: fix upstream version check
56e4ff6 libsolv: fix upstream version check
d46bc77 e2fsprogs: fix upstream version check
0436e3f build-appliance-image: bump version to 14.0
a206a19 btrfs-tools: update to 4.4
a1790bc bootchart2: update to 0.14.8
68c7113 poky.conf: Delete BB_SIGNATURE_HANDLER settings
0916235 rpm: remove bashisms: [ x == x ] -> [ x = x ]
2dbd61f uclibc: remove a use of immediate expansion and oe_filter_out ()
32eeb00 gcc-runtime: switch to removal override syntax to modify CXXFLAGS
c886a78 bitbake: tests/codeparser.py: Add filename/lineno flags to test variable
f130033 bitbake: toaster: write variables to toaster.conf
1835768 sstate: replace verbose manifest removal with a single count
d4c721a libdrm: Upgrade 2.4.65 -> 2.4.66
b5508a8 slang: Add dependency on ncurses
27b2df2 valgrind: make it explicit that valgrind supports armv7a and above
5dc38a3 sign_rpm.bbclass: fix task dependencies
27c39c4 opkg-utils: store alternatives in nonarch_libdir
77fde15 security_flags.inc: remove obsolete workarounds for curl
31ce027 cups: update systemd support
a4b48c2 coreutils: Add xattr PACKAGECONFIG
7a0b1c1 oeqa/runtime/parselogs: use -F to search fixed strings for grep
b8e11e2 libinput: Upgrade 0.21.0 -> 1.1.4
a9f2e87 postinst-intercepts: always use set -e
de0848f maintainers: mark Khem as nominal owner for uclibc
3235f5e formfactor: remove unused beagleboard configuration
6c64700 alsa-state: remove beagleboard configuration
f0d47a6 bitbake: Revert "runqueue.py: Ensure one setscene function doesn't mask out another which needs to run"
9e867ef sstate: Add packagedata to list of tasks not to recurse
5e881c1 classes/populate_sdk_ext: fix task dependency regression
2e9f092 image: Handle image types containing '-' correctly
0612ca4 oe-selftest: devtool: fix test_devtool_add_library if python was built first
c1492c4 recipetool: create: add a couple more license checksums
2c8c9fe recipetool: create: add basic support for extracting dependencies from cmake
3eb397f recipetool: create: force GL libraries to virtual/*
726dbda recipetool: create: move dependency mapping code to RecipeHandler
788e4bb recipetool: create: fix overzealous mapping of git URLs
ece0a2e recipetool: create: support additional autoconf macros from autoconf-archive
903d471 recipetool: create: detect flex/bison dependency
a66f4ac recipetool: create: pick up boost macros in configure.ac
dbe91a3 recipetool: create: improve extraction of pkg-config / lib deps
e7bedb9 wic: rename kickstarter.py -> ksparser.py
3bb6ea6 wic: override ArgumentParser.error
d652203 wic: removed unused imports
d2090a6 wic: improve processing of parseing errors
1ed97cc wic: catch KickStartError
bda77fd wic: add custom exception KickStartError
ef211a5 bootimg/image-vm/image-live: Improve image dependencies
0910bc6 image: Always run do_rootfs_wicenv
12e37e7 selftest/buildhistory: Improve test to remove sources of error
05716dd bootimg/image: Enhance bootimg to respect RM_OLD_IMAGE
1c869a9 rootfs-postcommands: Ensure license manifests respect RM_OLD_IMAGE
d27491b image: Ensure we don't expand TMPDIR in image commands
ce8a206 image: Fix instability of do_image_* checksums
fb1654f image: Fix wic environment issues
1da8f52 insane: Start to clean up do_configure_qa code
dd28695 insane: Clean up horrible return value processing code
839fb18 e2fsprogs: fix PV
b1236dc e2fsprogs: add PACKAGECONFIG for fuse
f98e11c bitbake: toastergui: make artifact download more robust
68f3e1e bitbake: toasterui: log OSErrorException metadata events
fb94754 bitbake: toasterui: listen for bb.event.MetadataEvent
a2f23fa openssh: CVE-2016-1907
320a319 license.bbclass: fix license manifest
4339a82 wic/help.py: document requirements for valid fstab generation
d688df8 glib-2.0: add dependency glib-2.0-native back
76e35f1 kernel-yocto.bbclass: move do_kernel_link_vmlinux() into kernel.bbclass
d453fa1 kernel-yocto.bbclass: remove do_kernel_link_vmlinux from SRCTREECOVEREDTASKS
2b92f88 libarchive: Add bsdtar and bsdcpio packages
e246905 toaster.bbclass: Separate artifact dump from image file dump
4f481bc pax-utils: 1.0.5 -> 1.1.4
f9974f2 sqlite3: upgrade to version 3.10.0
cd7910d connman: upgrade to 1.31
b9169b7 python3: add missing dependency on PN-misc to PN-modules
4b4dea7 useradd-staticids.bbclass: Remove unnecessary spaces
4f2c352 useradd-staticids.bbclass: Read passwd/group files before parsing
4cbdb15 useradd-staticids.bbclass: Simplify the logic for when to add groups
b18e40c useradd-staticids.bbclass: Simplify some logic
b689aa0 useradd-staticids.bbclass: Make --no-user-group have effect
c03ea8d useradd-staticids.bbclass: Treat mutually exclusive options as such
af8b005 wic: get rid of 2 getters
2573e28 wic: get rid of set_size and set_source_file setters
5cd222b wic: get rid of get_rootfs and set_rootfs
4d5d5dd wic: get rid of get_timeout getter
26fb2a1 wic: adjust code for new data structure
c827238 wic: remove pykickstart code
c15ea82 wic: use new kickstart parser
f572f44 wic: add kickstart parser module
e5e1905 wic: add partition module
180f170 alsa-lib: 1.0.29 -> 1.1.0
a8c25af matchbox-keyboard: export GTK_IM_MODULE_FILE location
d75cb1f xf86-input-evdev: upgrade to 2.10.1
2283732 menu-cache: upgrade to 1.0.1
ec7e406 libxi: upgrade to 1.7.6
86f3f25 librsvg: upgrade to 2.40.13
72dd806 libgpg-error: upgrade to 1.21
3c02fe0 libevdev: upgrade to 1.4.6
33e9930 libcroco: upgrade to 0.6.11
5b63c44 gsettings-desktop-schemas: upgrade to 3.19.3
dfff167 gpgme: upgrade to 1.6.0
5abb691 u-boot: Update to 2016.01 release
e9280d1 linux-yocto: introduce v4.4 standard/preempt-rt/standard kernel
8c3276e e2fsprogs: 1.42.9 -> 1.43 (master)
b248e55 bitbake.conf: rename python-native-runtime
65d0bfc net-tools_1.60-26.bb: Fix do_patch dependency error
99923fc ncurses: 5.9 0 -> 6.0
44d283a autotools.bbclass: use relative path to run configure script
b2f1de3 glibc-initial.inc: use relative path to run configure
0fe6e2d bitbake: toaster: increase timeout
a5f34bc poky.ent: Added "perl-bignum" package for Fedora
afc6cba dev-manual: Updated "Running ptset" section
ec047ad yocto-project-qs: Updated the "Next Steps" section
57ddbe8 ref-manual: Removed all variables related to "QMAKE"
7814b33 ref-manual: Updates to cull out qt4 stuff.
bf81969 toaster-manual: Updates on how to start Toaster.
798e8b8 bitbake: toastergui: code formatting and clean-up
c4b5011 bitbake: toaster tests: fix Django tests for new ToasterTable pages
88a262c bitbake: toastergui: remove unused views and template code
059a274 bitbake: toastergui: fix error and warning counts for builds
4103e0c bitbake: toastergui: make "Apply" button state depend on filter range
6c2d88f bitbake: toastergui: mute label for filter actions with no records
f08730a bitbake: toastergui: set default visible and hideable columns
112f374 bitbake: toastergui: serialise decimals correctly
e024aab bitbake: toastergui: streamline construction of filter objects
fcb20f9 bitbake: toastergui: ensure filter_value updates
f9c46f5 bitbake: toastergui: don't hide all elements with .col class
eaae82a bitbake: toastergui: convert project builds page to ToasterTable
33b011c bitbake: toastergui: implement "today" and "yesterday" filters
f8d383d bitbake: toastergui: implement date range filters for builds
b929889 bitbake: toastergui: show recent builds on all builds page
1a4b203 bitbake: toastergui: switch off filter highlights when inactive
809046c bitbake: toastergui: refactor ToasterTable filtering
294579b bitbake: toastergui: convert all builds page to ToasterTable
6c12ca7 bitbake: toastergui: use event delegates for hover help elements
ef93dce bitbake: toastergui: switch projects/ view to ToasterTable
417f1d3 bitbake: toaster: check inferred file suffixes against list of known types
c02ee05 bitbake: toaster: move image file suffix list to model
d29e4cd bitbake: toastergui: use ToasterTable for projects page
b1256db openssh: update to 7.1p2
c0e9f2d kernel/kernel-arch: Explicitly mapping between i386/x86_64 and x86 for kernel ARCH
f8508de bitbake: Revert "fetch/git: Change to use clearer ssh url syntax for broken servers"
b567235 image/image-live: Add back IMAGE_TYPES_MASKED support
e914e2a image.bbclass: Handle image base type dependency properly
ad32f65 autoconf: add missing perl-module-file-find to RDEPENDS
d83dfe6 ca-certificates: update to 20160104
4440560 epiphany: upgrade to 3.18.3
dcf54b4 iso-codes: upgrade to 3.64
d7bee35 lighttpd: upgrade to 1.4.39
08c8923 libwebp: upgrade to 0.5.0
cf0aea7 classes/populate_sdk_ext: avoid unnecessary sstate being brought in
ea29bec insane/package: Fix cases where QA errors aren't fatal
2e620a4 classes/populate_sdk_ext: check that extensible SDK prepared correctly
4685c33 classes/buildhistory: save auto.conf and bblayers.conf for extensible SDK
39f6472 classes/populate_sdk_ext: support auto.conf
91877aa classes/populate_sdk_ext.bbclass: handle if local.conf doesn't end with a newline
764c927 util-linux: create util-linux-runuser iff pam in DISTRO_FEATURES
95dce70 rsync: 3.1.1 -> 3.1.2
38aa0fc less: 479 -> 481
4cb2269 iputils: s20121221 -> s20151218
fe47dd7 wget: 1.17 -> 1.17.1
79886e9 git: 2.5.0 -> 2.7.0
d3e16b8 file: 5.24 -> 5.25
3549abc autogen-native: 5.18.5 -> 5.18.6
fb14627 curl: upgrade to 7.46
eaf88d7 xz: upgrade to 5.2.2
8516ff7 sysstat: upgrade to 11.2.0
ae73be1 at: upgrade to 3.18
21efab7 kmod: upgrade to 22
c88efae resolvconf: upgrade to 1.78
6729889 pciutils: upgrade to 3.4.1
edd319c gnupg: 2.1.7 -> 2.1.10
78b58b8 help2man-native: 1.47.1 -> 1.47.3
ac0e0d5 man-pages: 4.02 -> 4.04
1e0cbb9 libgcrypt: 1.6.3 -> 1.6.4
372c23d xmlto: 0.0.26 -> 0.0.28
aaafe33 elfutils: 0.163 -> 0.164
38901a7 dhcp: 4.3.2 -> 4.3.3
ea05e05 image.bbclass: Unconditional includes of populate_sdk_ext fails
c08f272 tcmode-default.inc: Fix preferred provider nativesdk-sdk_prefix-libc-initial
5d2f783 dhcp: search libxml2 for bind
b69652d tzdata: remove bashism
7c7c249 harfbuzz: update 1.1.2 -> 1.1.3
84623dc libpostproc: duplicate armv7a over-rides for armv7ve
1744198 libav.inc: duplicate armv7a over-rides for armv7ve
102dfa1 gcc-configure-common.inc: duplicate armv7a over-ride for armv7ve
b08dfb5 subversion: Upgrade 1.9.2 -> 1.9.3
d6fae0c lttng-ust: Upgrade to 2.7.1
a9cc9b5 lttng-tools: Upgrade to 2.7.1
6b02575 lttng-modules: Upgrade to 2.7.1
a378430 gdb: upgrade to 7.10.1
92cc02f linux-yocto: Update Genericx86* BSPs to 4.1.15
da43a56 bitbake: Revert "fetch2/local.py: avoid using PREMIRROR"
96a34e7 conf/distro/poky-tiny: correctly disable python in opkg-utils
1724ffd bitbake: fetch2/git.py: Add missing "errno" module import.
74fa824 bitbake: bitbake: clean up stamp-base related codes
f3f769a local.conf.sample: add qemumips64
43328fe bitbake: runqueue: Fix setscene task dependencies
7b905ca bitbake: toaster: settings Add uid to the toaster cache dir
dff7a27 bitbake: toaster: show 'satisfied via' text for reverse deps
89f4932 bitbake: toaster: show 'satisfied via' text for build deps
febb898 bitbake: toaster: show list of provides for the recipe
2ff4ccb bitbake: buildinfohelper: add provides info to the db
16a81fb bitbake: toaster: add Provider model
6a28ed3 bitbake: buildinfohelper: use providermap
f2b7252 bitbake: cooker: add providermap to dep_tree
7e380d4 bitbake: taskdata: refactor get_providermap
46731da bitbake: main/runqueue: Add --setscene-only option to bitbake
34f8db9 update_font_cache: only scan system font directories
e5c011b Add "CVE:" tag to current patches in OE-core
f04fb88 scripts/create-pull-request: fix git request-pull syntax
928ceb6 qt4: fix-for-mips-n32.patch: remove it
c4a3258 util-linux: create util-linux-runuser package
554ca68 valgrind: include aarch64 in COMPATIBLE_HOST
0ce775a valgrind: update to 3.11.0
21a94f6 valgrind: don't restrict to armv7a
b8ebac9 DpkgRootfs: Fix logcheck_error false-positive when use multilib
e265fbb package_deb.bbclass: add 'Multi-Arch: foreign' tag to allarch packages
4aeb69d package_manager.py: fixes for multilib deb packaging builds
9ea7428 package_deb.bbclass, cross-canadian.bbclass: DPKG_ARCH mapping function
72e6932 connman.inc: add missing RDEPENDS
675ff42 meta: rename perl-native-runtime
3f4fb39 dbus: support large-file for stat64
0d5e41f freetype: enable out-of-tree builds, and use host zlib
8f2ab19 bluez5: upgrade to 5.37
11f5a42 cogl-1.0: fix may be used uninitialized error
235606f oeqa/runtime/logrotate: fix hardcoded root directory
cce6c3e oeqa/runtime/smart: fix hardcoded root directory
cd2cf1f boost: update to 1.60.0
afc0255 bitbake.conf: remove 'stamp-base'
c8fef7f gcc5: Fix build on NIOS2
eda3947 rpmresolve.c: Fix unfreed pointers that keep DB opened
3c8a451 tzdata: Make /etc/timezone optional
b80da02 systemd: arrange for volatile /etc/resolv.conf
5548a76 systemd: add myhostname to nsswitch.conf
d6bc841 opkg-utils: add update-alternatives PACKAGECONFIG
c3b96ff linux-dtb.inc: use absolute upd-alt paths
3ad08c0 uclibc: Upgrade to 1.0.10
74c3667 populate_sdk_ext: Pass excluded_targets as a list to prune_lockedsigs
e306d54 populate_sdk_ext: Change to include siginfo and non sstate task sigs
e1a558a populate_sdk: Switch from bzip2 to xz
3341f3f classes: Fix do_rootfs references
0a4e1f9 image: Create separate tasks for rootfs construction
fdced52 image: Move pre/post process commands to bbclass
cdc0aee image.bbclass: Separate out image generation into a new task, do_image
0269219 populate_sdk_ext: Use new --setscene-only option to bitbake instead of workarounds
1ee0842 sstatesig: Handle special case of gcc-source shared-workdir for printdiff
d93c212 bitbake.conf: add virtual/libiconv-native to ASSUME_PROVIDED
b2fe2a8 devtool: build: support using BBCLASSEXTENDed names
38ed039 devtool: reset: support recipes with BBCLASSEXTEND
532f429 devtool: refactor code for getting local recipe file
ec90168 devtool: add: support adding a native variant
99e3872 devtool: reset: do clean for multiple recipes at once with -a
5ef716c recipetool: create: support creating standalone native/nativesdk recipes
1e503c0 recipetool: create: lower case name when determining from filename
4deed25 devtool: sdk-update: add option to skip preparation step
d586a11 devtool: sdk-update: fix error checking
c1b7d83 devtool: sdk-update: fix metadata update step
efead10 devtool: sdk-update: fix not using updateserver config file option
9348c91 classes/populate_sdk_ext: disable signature warnings
d44dcd7 classes/populate_sdk_ext: fix cascading from preparation failure
d11051c scripts/oe-publish-sdk: add missing call to git update-server-info
fbc2147 libbsd: upgrade to 0.8.1
221d864 bitbake: fetch/git: Change to use clearer ssh url syntax for broken servers
46d62d0 bitbake: knotty: Use non-interactive mode as fallback for dumb terminals
bfa7859 bitbake: cooker: fix findFilesMatchingInDir documentation
3d42737 bitbake: cooker: use in instead of count
0e83229 maintainers.inc: remove x11vnc
d914c7f meta-yocto: drop qt4 references
0f3ad7c scripts/yocto-layer: Avoids duplication of "meta-" prefix
220ef32 poky-lsb/poky-tiny: update preferred kernel to 4.1
b82e228 yocto-bsp: remove 3.14 and 3.19 bbappends
685daeb conf/local.conf.sample: comment out ASSUME_PROVIDED=libsdl-native
2c5e7e0 image: Really remove lockfiles flag
a500e3a boost: ensure boost to remain an empty metapackage
b151506 image_types.bbclass: Rebuild when WICVARS change
eb4159c gccmakedep: fix buildpaths qa check
f54e53c bash: fix buildpaths qa check error
6d111c8 testimage: remove VNC test, x11vnc isn't in oe-core anymore
8bec5c5 x11vnc: remove all references to moved package
8f865e2 x11vnc: move recipe to meta-oe
ae1fc96 classes/buildhistory: actually use KiB in extensible SDK sizes files
84f66b5 x11vnc: move recipe to meta-oe
c44599d readline: move inputrc into readline
f29d642 tune-*: use mcpu instead of mtune for ARM tunes
c6a1991 arch-armv7ve: add tune include for armv7ve and use it from cortexa7 and cortexa15
21d61fa cortexa{7,15,17}: add VFPv4 tunes
7f2cb68 feature-arm-vfp.inc: Further simplify with TUNE_CCARGS_MFLOAT
e9b2ffc feature-arm-{neon,vfp}.inc: refactor and fix issues
45f726c arch-armv7a.inc: add vfpv4 support also to softfp and big endiand tunes
ebe8358 arch-armv7a.inc: Fix PACKAGE_EXTRA_ARCHS for tune-armv7atb-vfpv3, tune-armv7atb-vfpv3d16, cortexa7thf-neon-vfpv4
9280a8e arch-armv5.inc: drop duplicate ARMPKGSFX_DSP and PACKAGE_EXTRA_ARCHS_tune-armv5tehf-vfp
46d6b0e arch-armv[456]*.inc: improve indentation like armv7a
860663a arm/arch-arm*, tune-cortexa*, tune-thunderx.inc, powerpac/arch-powerpc64.inc: Use normal assignment
8c483a1 arch-armv7a, tune-cortexa*: improve indentation
7498b91 arch-armv7a, tune-cortexa*: improve comment VFP -> HF
bb9b581 arch-armv7a: add missing space before ?=
15f8344 tune-cortexr4.inc: fix PACKAGE_EXTRA_ARCHS
e2736f7 sanity.bbclass: add more information to error message about TUNE_PKGARCH missing in PACKAGE_ARCHS
b68d947 mkefidisk.sh: add boot log on console
62d7c97 mkefidisk.sh: add startup script for automated boot
5aa3b93 oeqa/selftest/recipetool: update for libjpeg-turbo migration
ffa7469 libjpeg: Replace libjpeg with libjpeg-turbo
29d273f python3: fix installed-vs-shipped when 64bit + multilib
db7cee6 pulseaudio: add PACKAGECONFIG for lirc
b900ec8 sstate-sysroot-cruft.sh: Extend the whitelist
20843fa iptables: upgrade to 1.6.0
c2bda6c scripts/oe-selftest: Allow to run tests on random/all MACHINEs
8e1435e selftest: Added testcase decorators for 2 tests
32f332c oe-selftest: New option --list-tests
17d886b oe-selftest: Improved --list-classes when determining test names
4ec2da7 selftest: moved tc test_buildhistory_does_not_change_signatures
02d259c scripts/oe-selftest: Remove extra coverage data added to unittests
30c06a4 expat: CVE-2015-1283
315bdc8 packagegroup-core-x11-sato: enable pcmanfm on mips
a3e26f9 wic: rawcopy: Copy source file to build folder
d6e0da4 grub2: Fix CVE-2015-8370
bb663b0 systemd: enable compatibility libraries by default
3fea163 systemd: add more compression and importd PACKAGECONFIGs
d462b70 gcc-sanitizers: link directly against sysroot libstc++
3eb6135 openjade: Fix build if not installing libtool .la files
6308c47 valgrind: Define __UCLIBC__ for uclibc based systems
3d19a1e security_flags.inc: disable -fstack-protector-XXX for valgrind
807ed8a meta/conf/layer.conf: bump layer version due to Qt4 removal
4fb3e05 packagegroup-core-lsb: treat qt4 packages same as qt3 packages
8b11ed8 qt4: remove recipes and classes
0baadc8 toaster-manual: Updates to toaster use chapter.
908bbff ref-manual: Updated the list of supported image types.
5d27451 dev-manual: Added the --configfile bootloader option.
7b3b1f9 dev-manual: Added three new wic option descriptions.
eeffa64 dev-manual: Added the --overhead-factor wic option description.
2beb19b dev-manual: Added the --extra-space wic option description.
95851df dev-manual: Added wic --notable option description.
88a2794 dev-manual:
8bdc707 sdk-manual: Initial Manual framework
f1f7625 bsp-guide: Updated the license statement.
6686a31 dev-manual: Correction to the KVM stuff in the runqemu commands.
ccc830d documentation: Prepare for 2.1 builds
7af9314 mega-manual: Added four new figures for GUI example.
f8185ff bitbake: ast: Add filename/lineno to mapped functions
a178c5a bitbake: main: kill server without queue setup
773700d bitbake: xmplrpc: split connect method
05b4fbc bitbake: uievent: refactor retry loop
ebc169c bitbake: uievent: get rid of EventHandler attribute
4e0de6e bitbake: uievent: add error to registerEventHandler return
01419d5 bitbake: cooker: add state.get_name method
763506d bitbake: fetch2/__init__.py: Add support for 7-Zip
f5bfc1c bitbake: utils: Remove double compile from better_compile
b4141f6 bitbake: fetch2/local.py: avoid using PREMIRROR
1ad3595 bitbake: siggen: Change exception note into a warning
4ba49ac bitbake: data: Drop misleading ExpansionError exception
2c94311 bitbake: cooker: Drop useless parsing exception
a16b543 bitbake: data: Pass lineno/filename data from build_dependencies
958f0ff bitbake: codeparser: Add support for correct linenumbers
db4376e udev-extraconf: introduce multiple blacklist files for more complex setups
a8fb429 uclibc: disable parallel builds
401c632 image: Condense do_rootfs function/flags
0051510 image/rootfs-postcommands: Separate out post rootfs commands to separate class
3428edd image: Remove pointless rootfs lock
eb5bb0e packagegroup-core-boot:replace busybox to variable
cc7bb6c initramfs-framework_1.0:replace busybox for variable.
d9ffa59 core-image-minimal-initramfs: replace base-utils
9349f42 base-utils:flexible dependency for command utilities
c44b76a orc: Add missing PACKAGES_DYNAMIC
2cd061a bluez5: include the patch only for 5.36
4c35473 meta-yocto-bsp: remove 3.14 and 3.19 bbappends
6af8981 meta-yocto-bsp: Remove uvesafb (v86d) from generic x86 features
614e9ec qemu: add PACKAGECONFIG for Nettle crypto support
09705a4 oeqa/selftest: support sets in devtool comparisons
4b543f7 packagegroup-core-x11-sato: include pulseaudio-misc
23302ee devtool: use cp instead of shutil.copytree
d6e7b5b xorg-lib: allow native building without x11 DISTRO_FEATURES
4cba706 busybox: generalize recipe to work with arbitrary install directories
9d001ae cairo: update 1.14.4 -> 1.14.6
6d561fb libdrm: Upgrade to 2.4.65
0f516f0 image-vm.bbclass: uses IMAGE_LINK_NAME
c851096 image-live.bbclass: uses IMAGE_LINK_NAME
907b87d rpm: Generate per distribution and multilib macro files
c910789 package_manager.py: add debugging support for rpm scriptlet execution
8dd27ef xinput-calibrator: get screen geometry when calibrating
e8d36f4 scripts: hand the TEMPLATECONF local over to setup-builddir
0f4fb26 util-linux: Fix floating dependency upon 'readline'
2cb434a linux-firmware: package Broadcom BCM43340 firmware
f70d46f rpcbind: Fix build with libtirpc 1.0.1
866c693 libtirpc: upgrade to 1.0.1
5754b83 gstreamer1.0-libav: upgrade to version 1.6.2
6ac601f gstreamer1.0-rtsp-server: upgrade to version 1.6.2
3ac3d33 gstreamer1.0-plugins-ugly: upgrade to version 1.6.2
823b623 gstreamer1.0-plugins-bad: upgrade to version 1.6.2
6d13f30 gstreamer1.0-plugins-good: upgrade to version 1.6.2
05896a5 gstreamer1.0-plugins-base: upgrade to version 1.6.2
a8eb77b gstreamer1.0: upgrade to version 1.6.2
dd5756b mirrors: add archive.apache.org to Apache mirrors
cfbd804 guile: remove redundant replacement of .pc file
c2e8079 bind: 9.10.2-P4 -> 9.10.3-P2
7204a0f libsndfile1: enable FLAC/Ogg/Vorbis support
35bd254 buildhistory: improve support for extensible SDK
ea0abcd buildhistory: fix not recording SDK information
b6d191d scripts/oe-selftest: Add support for selftest log with timestamp
ab79287 selftest: Added MACHINE = "qemux86" to tests that use runqemu
b09080d ncurses: fixes wrong paths in BINCONFIG
8df88fb xcb: don't build-depend on python-native
d7759a5 tcmode-default: Use glibc for nativesdk version even on uclibc and musl
a7eadc3 qemu: upgrade to 2.5.0
9988ab3 webkitgtk: update to 2.10.4
cedb027 epiphany: update to 3.18.2
6e27dd8 libwebp: update to 0.4.4
efcf4b4 libsecret: update to 0.18.3
0112274 gnome-desktop3: update to 3.18.2
88a656e gcr: update to 3.18.0
883193a linux-yocto: remove 3.14 and 3.19 recipes
4487e3a kernel-yocto: fix checkout bare-cloned kernel repositories
5161944 linux-yocto/4.1: update to v4.1.15
a462d16 linux-yocto-dev: bump to 4.4-rcX
862b3b3 lttng-modules: fix build issue against kernel 4.4
9563aa8 yaffs2: fix checkpoint functionality
cefc24d mobile-broadband-provider-info: update to tagged release 20151214
04aa27c icu: fix upstream version check
2865e5f btrfs-tools: update to 4.3.1
5beb3bc iso-codes: update to 3.63
503c08d kexec-tools: update to 2.0.11
4fa2e4b lighttpd: update to 1.4.38
f7a7796 tiff: update to 4.0.6
2498065 libassuan: update to 2.4.2
f2192fa msmtp: update to 1.6.3
7fc3066 liburcu: update to 0.9.1
10d14bc trace-cmd: update to 2.6
fc774e9 python3-pip: update to 7.1.2
c3330aa pytnon-pexpect: update to 4.0.1
aa90b5d ifupdown: update to 0.8.2
4c98105 gptfdisk: update to 1.0.1
edde9af cryptodev: update to 1.8
9da9308 oe-selftest: devtool: add more explicit check for ls output
c2435b1 oe-selftest: add tests for simple devtool add / recipetool create URL case
8916731 recipetool: create: fix error when extracting source to a specified directory
fe28c25 recipetool: create: improve autotools support
498e483 devtool: sync: tweak help / messages
b272c51 devtool: reset: print message about leaving source tree behind
95a234e devtool: status: list recipe file within workspace if one exists
e116739 devtool: modify: default source tree path
110f433 devtool: add: allow specifying URL as positional argument
ceaa4bf devtool: add: figure out recipe name from recipetool
ee0d5a1 devtool: add: allow source tree to be omitted
0d8751f scripts/lib/argparse_oe: handle intermixing of optional positional arguments
1bd7793 devtool: update-recipe: use correct method to get bbappend filename
2074654 devtool: split out function for naming bbappend
6acbdc9 devtool: add: tweak help text
316b57b devtool: edit-recipe: add new subcommand
ebe5f0b recipetool: create: basic extraction of name/version from filename
db5f964 recipetool: create: support extracting name and version from build scripts
6a7661b recipetool: create: set up priority system for recipe handlers
38803e3 recipetool: create: detect when specified URL returns a web page
e78a039 recipetool: create: prevent attempting to unpack entire DL_DIR
e61645b recipetool: create: minor fix for potential issue in python handling
ae2141b recipetool: create: fix do_install handling for makefile-only software
c2f1742 recipetool: create: avoid traceback on fetch error
470f20b recipetool: create: handle https://....git URLs
8e0a84c scripts: print usage in argparse-using scripts when a command-line error occurs
548d433 directfb.inc: enable bfd linker workaround for all arm targets
2381f4a devtool: sdk-update: fix traceback without update server set
7540550 classes/populate_sdk_ext: error out of install if buildtools install fails
ecce3d3 classes/populate_sdk_ext: hide build configuration in devtool build* output
fd84d0f classes/base: don't print header if BUILDCFG_HEADER not set
a4f496a classes/populate_sdk_ext: use uninative to set NATIVELSBSTRING
a6f8a3f toaster.bbclass: fix TypeError when parsing build stats
937b7fd libxcb: Add a workaround for gcc5 bug on mips
86c8b8b flex: update to 2.6.0
dad130b opkg: upgrade to v0.3.1
d2b770c systemd: remove merge conflicts accidently left in
ca69643 wic/help.py: document that mountpoint is optional for part command
5628dde pixman: check neon support via TUNE_FEATURES, not the _armv7a over-ride
9a74388 xdg-utils: Do not build the in-script documentation
520b37d gettext: Upgrade 0.19.4 -> 0.19.6
cae0e0f gcc-configure-common.inc: add gcc-runtime ABI fixes for armv7m and armv7r
cba8fb3 tune-cortexr4.inc: provide an _armv7r over-ride via MACHINEOVERRIDES
fd10723 tune-cortexm3.inc: provide an _armv7m over-ride via MACHINEOVERRIDES
b6fe440 feature-arm-thumb.inc: drop 'no-thumb-interwork' tuning feature
1d5a4cf feature-arm-thumb.inc: drop legacy _thumb and _thumb-interwork over-rides
ca64c16 feature-arm-thumb.inc: drop ARM -vs- thumb comments
95a79a5 rpm: Fix support for db5 and db6
75cec07 oe-buildenv-internal: fix return code
606c9e7 staging.bbclass: make already-stripped can be skipped
647e0e4 buildhistory-collect-srcrevs: hide empty sections
d4b5a1f selftest/buildhistory.py: Test buildhistory does not change sigs
4b83f1f gcc5: Upgrade gcc-5.2 -> gcc-5.3
0381b78 bitbake: event/utils/methodpool: Add a cache of compiled code objects
c61c1eb bitbake: BBHandler: Improve IN_PYTHON_EOF handling
2a94194 bitbake.conf: Add filename and lineno to BB_SIGNATURE_EXCLUDE_FLAGS
5f40691 bitbake: toaster: remove 2 confusing parameters
3960b6e bitbake: toaster: move setting of default values
b194c0c bitbake: toaster: move startup checks to a better place
064d2c7 bitbake: toaster: remove 2 unused functions
c505f24 bitbake: toaster: remove addtoConfiguration function
c7e4404 bitbake: toaster: updated header of the toaster script
af34920 bitbake: toaster: add MANAGE variable
563b786 bitbake: toaster: remove unused variable
aa3cc12 bitbake: toaster: split long lines, add/remove whitespace
8e4acac bitbake: toaster: check if address:port is in use
847b935 bitbake: toaster: implement checksocket command
9f3681d buildstats-summary/toaster: Cope with removal of get_bn()
522dcaa bitbake: knotty: Improve exception error message
01d67bf bitbake: knotty: Fix row/column function return value issue
6c12efa bitbake: buildinfohelper: Update for buildstats layout change
28ea1a1 bitbake: fetch: use orig localpath when calling orig method
5cb6d83 bitbake: utils: Improve traceback from better_exec internal errors
0019edc bitbake: ast/event/utils: Improve tracebacks to include file and line numbers more correctly
b14ccb2 bitbake: runqueue: Add support for <task>- syntax
5069ab6 m4: Drop unused/unreferenced patch
d7e766b toaster: Update for buildstats changes
adfdca4 buildstats: Improve to add getrusage data and corrected IO stats
3187647 buildstats: Separate out the build and task data to allow improvements
38a2553 buildstats: Clean up e.data and bb.data references
7b1e48f buildstats: Drop get_bn/set_pn and just use BUILDNAME
7837162 buildstats: Drop disk data from buildstats
030c033 nativesdk-buildtools-perl-dummy: Bump PR
e6f2761 combo-layer: Stop using filterdiff
f1f3716 meta: more removals of redunant FILES_${PN}-dbg
5fb8fea clutter-gst-3.0: add dependency on libgudev
54f01ca systemd: Upgrade to 228
63bdadc uclibc: Switch to using uclibc-ng
0b5cddd cdrtools-native: update to 3.01 final
c4dfb92 grep: update to 2.22
d8608bc procps: update to 3.3.11
52f6a01 babeltrace: update to 1.3.1
0c705d6 powertop: update to 2.8
516d8c9 nfs-utils: update to 1.3.3
9c39a4f systemtap: update to 2.9
fef0ec6 kbd: update to 2.0.3
8668e17 gmp: update to 6.1.0
86e02d0 docbook-xsl-stylesheets: fix UPSTREAM_CHECK_REGEX
f065766 mtd-utils: update to 1.5.2
5d32aeb unfs3: update to r497
4e653b5 python-numpy: update to 1.10.1
90b7212 libxml-simple-perl: update to 2.22
689db13 dmidecode: update to 3.0
d301451 cpio: update to 2.12
2bea006 puzzles: update to current commit
2d04c83 gnutls: update to 3.4.7
cf1eb2b libidn: add native and nativesdk support
dd58b3b libpng: Update SRC_URI to use GENTOO_MIRROR
b763668 libpng12: Upgrade 1.2.54 -> 1.2.55
91c92fc libical: Upgrade 1.0.0 -> 1.0.1
5c6ff26 libxslt: use proper SRC_URI
a444eb5 kexec-tools: added the script kdump
be9f7f9 ltp: Upgrade 20150420 -> 20150903
81f1e41 musl: Update to latest 1.1.12 release
c529e66 util-linux: Upgrade to 2.27.1
bdbc5ee packagegroup-core-sdk: Disable sanitizers for uclibc
692853d libsolv: add new recipe
8bba7de curl: upgrade to 7.45
2e3a172 libsndfile1: 1.0.25 -> 1.0.26
df18352 wget: Upgrade 1.16.3 -> 1.17
81eb101 unifdef: upgrade to 2.11
19c76ad sstate-sysroot-cruft: Add php, python, lua, fontcache generated files to whitelist
f80f8ba oeqa/selftest: Added testcase decorators for 2 testcases
a5dd1dd uninative.bbclass: Choose the correct loader based on BUILD_ARCH
388e580 license: Fix BB_TASKDEPDATA references
f19e8de coreutils/procps: Revert priority change since coreutils > busybox
455ff32 meta: more removals of redunant FILES_${PN}-dbg
e0890b6 meta: Drop now pointless manual -dbg packaging
b7766e4 package: Add auto package splitting of .debug files
89f13c7 meta/conf/toasterconf.json: remove SDKMACHINE variable as it no longer used
03d715e bitbake: toaster: tables Set a default order for the software recipes table
4ff0d60 bitbake: toaster: rework checking of Django version
4a78416 bitbake: toaster: monkey patch Queryset
c1c8eff bitbake: toaster: removed extra calls of migrate
507aafb bitbake: toaster: work around 'database is locked' error
322b470 bitbake: toaster: fixed format strings
84daa40 bitbake: toaster: use OneToOneField instead of ForeignKey
c464f34 bitbake: toaster: Amend regex for MySQL database URLs
f001a4a bitbake: toaster: Remove compatible_layerversions() method
0adffdf bitbake: toaster: Check Django version against toaster-requirements.txt
8d058cf bitbake: toaster: Update deprecated manage.py command
717c636 bitbake: toaster: Prevent deprecation warnings for RedirectView
0f602c1 bitbake: toaster: Update API used to make runbuilds methods run in transactions
93f5738 bitbake: toaster: rename get_query_set -> get_queryset
23c4806 bitbake: toaster: Start Django machinery for database access
7a0c45e bitbake: toaster: Create default project with get_or_create* method
9de8dfa bitbake: toaster: Fix references to app paths
535fc9b bitbake: toaster: Remove South migrations
8ca4664 bitbake: toaster: Upgrade to Django 1.8.6 and remove South
b322dec bitbake: toasterui: process SetBRBE event
0274b68 bitbake: toaster: trigger SetBRBE event
fdb8e74 bitbake: toaster: implement BitbakeController.triggerEvent
5de3800 bitbake: event: Fix subprocess event error traceback failures
0da1d71 nopackages: Add class for recipes which don't generate packages
5003d14 sstate: Ensure populate_lic dependencies are not followed
48aad51 populate_sdk_ext/sign_rpm/sign_package_feed: Add missing getVar parameter
98dcdcb autoconf: Disable macro which causes excessive delays when using dash as sh
28fa304 automake: Remove delays in configure scripts using automake
f5e681d site/common-linux: Add some macros to avoid sleeps during configure
93adf46 meta-yocto/conf/toasterconf.json: remove SDKMACHINE variable as it no longer used
b3d6872 lttng-tools: Revert wrong enforcement of Python 3.0 use
2c11bdd attr: Add patch to account for use of internal glibc header
f1c034b libpam: Fix build with musl
33bab59 openssl: Add musl configuration support
c4207ee busybox: Add config for musl
083d9d1 gettext: Delete libintl.h and charset.alias
3a0797f sysvinit: Fix build with musl
fd21402 musl: Add recipe
781d34f mtools: Use proper glibc override to add glibc packages to recommendations
1b90d67 squashfs-tools: Define FNM_EXTMATCH if not defined
36a709a mtd-utils: Backport and create patches to support musl
41fd73f gdb: Fix build with musl
1ee97d8 autoconf: Add musl support
a2ea58b gcc: Add support for building musl configuration
37c74e2 gstreamer1.0: Split bash completion information into separate package
fc32a3b attr: add attr dependency to attr-ptest
9205f0a valgrind: import Debian link_tool patch for MIPS
c27bbb4 slang: update upstream URI to (official) jedsoft.org
21e35df subversion: update to 1.9.2
39260c3 json-c: add manual upstream version check
4ff0017 mirrors: replace references to archive.apache.org
1672a18 mobile-broadband-provider-info: update to current commit
b699b15 nspr: update to 4.11
dec8d20 python-setuptools: update to 18.7.1
b3535e2 openssl: update to 1.0.2e
fce2ee7 dropbear.inc: drop legacy CFLAGS and LD tweaks
f87063b dropbear: update 2015.70 -> 2015.71
a520495 texinfo: don't create dependency on INHERIT variable
2b2774b sudo: upgrade to 1.8.15
5eb0e90 linux-firmware: update to latest revision bbe4917
c147782 bluez5: upgrade to 5.36
64c3a09 sudo: remove libdir INSANE_SKIP
b407a80 libsdl: expand PACKAGECONFIG and enable native builds
39facf9 buildtools-tarball.bb: 32bit tools need pseudo 32bit library
bc26a7d rpm: fix file conflicts for MIPS64 N32
01c0285 rpm: Enable MIPS64 N32 transactions
a742586 bash: fix testcase run-coproc/run-execscript/run-test/run-heredoc failed
a6bb872 cpio: fix test case of symlink-bad-length
787d82b linux-libc-headers: update default KORG_ARCHIVE_COMPRESSION bz2 -> xz
94c0332 linux-libc-headers.inc: remove '-e MAKEFLAGS=' from EXTRA_OEMAKE
c7ad779 gcc-4.9: import patch fixing compilation in thumb mode
1260ded gcc-5.2: import patch fixing compilation in thumb mode
b4db53a dropbear: Upgrade 2015.68 -> 2015.70
e0162c1 gcc-cross-initial: make dependency on gnu-config-native and autoconf-native explicit
fccb128 weston-init: add a native systemd unit file
a1fa8d9 python: Fix cross compiling issue
c9fdc1b icu: Upgrade 55.1 -> 56.1
95909bc kernel.bbclass: drop unnecessary 'eval' from kernel_do_configure()
ec79a19 insane: in libdir test allow libraries in libexecdir
9c0186f rootfs.py: Change logic to unistall packages
23083e7 oeqa/systemd: get runtest target boot time and log
c6330a2 oeqa/systemd: journalctl helper function
220a78b scripts: oe-selftest Added new features.
98d2485 oe-buildenv-internal: preserve existing BB_ENV_EXTRAWHITE
9cab798 toolchain-shar-extract.sh: fix ~ not working in path
f27401d nativesdk-buildtools-perl-dummy: properly set PACKAGE_ARCH
5e3e2e0 poky.conf: Bump for 2.1 development
7e8ff7b bitbake: toaster: toasterui Add ParseStarted/ParseProgress events to mask
f823601 build-appliance-image: Update to master head revision
992e577 linux-yocto: Update genericx86* BSPs to v4.1.13
b4f6950 cmake: Add nios2 support
27b9f04 boost: adjust hard-coded path after python3 upgrade
639cadd sdk.py / OpkgSdk: remove_packaging_data() after install
fd4894f devtool: extract: update SRCTREECOVEREDTASKS for kernel
34f1d81 devtool: extract: copy kernel config to srctree
6650357 lib/oe/package_manager: Introducing PACKAGE_FEED_BASE_PATHS/PACKAGE_FEED_ARCHS
d7baeb5 selftest/wic.py: Add test for custom bootloader config
8612f26 directdisk-bootloader-config.wks: Add example for custom bootloader config
c59dc3b wic/help.py: Document the new option "configfile"
7033873 wic: Allow to use a custom config for bootloaders
f95f729 wic/utils/misc.py: Added function to search for files in canned-wks
9773faa wic: Prepare wicboot to allow custom bootloader config
4515186 package_ipk: allow to specify OPKG_ARGS in local.conf
7cf7156 systemd.bbclass: Allow enabling of parameterised services
551cda0 base: check for existing prefix when expanding names in PACKAGECONFIG
c093fd8 linux-yocto/4.1: Fix kernel oops on qemuarm boot
cda3905 toolchain-shar-extract.sh: ensure cleaned environment will work for ext SDK
f9384b0 bitbake: knotty: Enforce terminal line limit to stop crazy scrolling
7a775a1 initramfs-framework: create directory /var/run
2861399 libpcre: drop UPSTREAM_CHECK_ variables
35c28e3 libpcre: upgrade to 8.38
d50ef65 libpng: update 1.6.19 -> 1.6.20 (CVE-2015-8126)
2b736f2 ghostscript: add dependency for pnglibconf.h
976f0e3 package_regex.inc: split the rest of the entries to their recipes
74bfa62 package_regex.inc: split entries which blacklist specific versions to their recipes
75c6929 package_regex.inc: split sourceforge related entries to their own recipes
cefeac2 package_regex.inc: split PyPi related entries to their own recipes
aa5df2a package_regex.inc: split Debian-related entries into their own recipes
12ba5cc package_regex.inc: split GITTAGREGEX entries into recipe files
642e92f package_regex.inc: split entries with odd-even versioning into their own recipes
96eac69 package_regex.inc: deprecate the file
b0bbea5 gstreamer: really fix the helper install race
b822216 neard: fix libdir/libexecdir confusion
cbfccc6 glibc: fix libdir/libexecdir path confusion
d0577f9 sudo: handle libexecdir != libdir/PN.
6f837cc util-linux: Add ptest
dbd02bd libav: Correctly handle prefix=""
fda9859 libav: Add PACKAGECONFIG options: avdevice, avfilter, avplay, gpl
7ba85f1 libav: Remove deprecated --disable-avserver
2739ed0 busybox: backport upstream fixes for unzip
6decbbb qt4-4.8.7: fix build for mips n32
f1e8938 gstreamer1.0: Convert tests and valgrind config opts to PACKAGECONFIGs
11b9524 cracklib: fix for base_libdir == libdir
d9f73ca libbsd: Upgrade to 0.8.0
10d6dc4 libcroco: Upgrade 0.6.8 -> 0.6.9
79b823a shared-mime-info: Upgrade 1.4 -> 1.5
f6ec8a4 xdg-utils: Upgrade to 1.1.1
a3f63f9 gsettings-desktop-schemas: Upgrade 2.16.1 -> 3.18.1
754f6b6 gnome-common: Upgrade 3.14.0 -> 3.18.0
75aba18 clutter-gtk-1.0: Upgrade 1.6.2 -> 1.6.6
c6a6212 clutter-gst-3.0: Upgrade 3.0.8 -> 3.0.14
2da6cd5 clutter-1.0: Upgrade 1.24.2
148c953 cogl-1.0: Upgrade 1.20.0 -> 1.22.0
f54d4e4 ghostscript: Add NIOS2 support
21ba42b harfbuzz: update 1.1.0 -> 1.1.2
058b91e xvideo-tests: move to the latest release
70d459c scripts/oe-pkgdata-util: sort the packages in list-pkg-files
80e3919 wic: insert local Python paths at front
9d788d7 toolchain-scripts.bbclass: unset command_not_found_handle
82ab99f waf.bbclass: remove unused parameter from get_waf_parallel_make()
68d3dfe toolchain-shar-extract.sh: proper fix for additional env setup scripts
0c5d239 base: Improve handling of switching virtual/x providers
3745479 bitbake: bitbake: rename REGEX, REGEX_URI, and GITTAGREGEX.
dd282d4 bitbake: toaster: return back 'New project' button
2a8e970 bitbake: toaster: tests Update UI tests to work with 2.0 changes
fe8a0a3 bitbake: toaster: tests Automated build-mode backend tests
0497b57 bitbake: toaster: unset environment variables
8b7a548 bitbake: toaster: get rid of complicated heuristics
556b8b6 bitbake: toaster: remove SDKMACHINE from project variables
4186f5b bitbake: toaster: stop using toaster-pre.conf
361faa3 bitbake: toaster: remove writeConfFile API
fcbba5a bitbake: toaster: set varibales on bitbake server
993bc7e bitbake: toaster: implement BitbakeController.getVariable
53e981e bitbake: toaster: buildinfohelper Broaden the toaster created recipe data case
57e5f24 bitbake: toaster: do not create duplicate HelpText objects
4c1e5ec bitbake: toaster: remove usage of BUILD_MODE variable
9902895 bitbake: toaster: do not terminate bb server
58765a8 bitbake: toaster: remove stopBBServer API
95a3cf7 bitbake: toaster: reimplemented startBBServer method
76d53b5 bitbake: toaster: remove _setupBE function
87b2f95 bitbake: toaster: implement 'toaster restart-bitbake'
891484a bitbake: toaster: implement start_bitbake function
bf25471 bitbake: toaster: implement stop_bitbake function
7c2b225 bitbake: toaster: update brbe and project attributes
de812d0 bitbake: toaster: start 'manage.py runbuilds' in the script
28e8ccf bitbake: toaster: make runbuilds to loop
a3871a3 bitbake: toaster: use parent of the build dir
2a96d35 bitbake: toaster: check for toaster configuration later
d87a534 bitbake: toaster: remove unused variable
dc6a489 bitbake: toaster: change toasterconf.json logic to use TEMPLATECONF, like oe-setup-builddir
5a42c2d bitbake: toaster: run bitbake the same way
cac91db bitbake: toaster: set DATABASE_URL in toaster script
a464bf2 bitbake: toaster: implement get-dburl command
e473151 bitbake: toaster: don't allow to run toaster as a script
4de214f bitbake: lib/bb/utils: improve edit_bblayers_conf() handling of bblayers.conf formatting
0debb11 bitbake: lib/bb/utils: fix error in edit_metadata() when deleting first line
9d19dd9 bitbake: wget.py: parse only <a> tags
71ede7b bitbake: toaster: toastergui tests Add generic test for ToasterTables widget
34b22cf bitbake: toaster: tables Fix invalid field name on NewCustomImagesTable
1c59846 bitbake: toaster: tables Add default_orderby field where it was missing or unset
d82c541 bitbake: toaster: CustomImageRecipe add search_allowed_fields to this model
bdf6241 bitbake: toaster: machines table Fix missing layers information needed for filter
b90a8dc bitbake: toaster: tablejs Make sure click handlers consume click event
c075bcf bitbake: toaster: projectpage Make sure build targets are space separated
698c74c libsdl: remove redundant configure_tweak patch
35945fd iw: upgrade to version 4.3
15969ae gstreamer1.0-plugins-good: fix PACKAGECONFIG for gudev and add one for v4l2 and libv4l2
e601b38 gstreamer1.0-plugins-bad: fix dependencies for uvch264 PACKAGECONFIG
ddf2501 gudev: Add from meta-oe
e406fa8 lsb: fix installed-vs-shipped for mips
39ecdce rpm: fix for N32 MIPS64
09b4da6 glibc/0029-fix-getmnt-empty-lines.patch: fix getmntent()
1781a9a init-install-efi: fix script for eMMC installation
f808747 init-install-efi: fix script for gummiboot loader
2a55036 linux-firmware: rtl8192cx: Add latest available firmware
b60af3b libsdl2: add missing dependency on libxkbcommon for PACKAGECONFIG[wayland]
ed31874 libxml2: upgrade to 2.9.3
ecb1c71 libxml2: merge pointless bb/inc split
19a626d openssh: redesign ssh-agent.sh regression test case
81b59e7 gcr: Require x11 DISTRO_FEATURE
934e486 psplash: update to latest git version
ccb2a57 sysvinit-inittab: Add wrapper script to verify console exists
b7f610d linux-yocto/4.1: Bluetooth:Fix the connection fail of 6lowpan over BT LE
d08e761 linux-yocto-rt/4.1: update to -rt15
6aa464c linux-yocto/4.1: fsl-mpc8315e-rdb: Enable EEPROM
bd29006 linux-yocto/4.1: update to v4.1.13
5561407 uClibc: enable utmp for shadow compatibility
533fc01 glibc: Backported a patch to fix glibc's bug(18589)
598e372 ncurses: update SRC_URI
51b64ee openssl: enable parallel make
88e45cd busybox: enable resize applet
87de4a1 busybox: disable support for mounting NFS file systems on Linux < 2.6.23
73cc839 busybox: update 1.23.2 -> 1.24.1
f8ac408 busybox: re-order defconfig to align with busybox 1.24.1
3648a37 busybox.inc: remove '-e MAKEFLAGS=' from EXTRA_OEMAKE
bf28ea9 busybox.inc: set CC=${CC} via make command line
f21dce1 busybox.inc: fix CONFIG_EXTRA_CFLAGS configmangle
6167669 busybox.inc: don't set .config CROSS_COMPILER_PREFIX
e1ecccd busybox: move EXTRA_OEMAKE etc into busybox.inc
0e63300 busybox.inc: don't export EXTRA_OEMAKE
3735776 busybox_git: Enable getopt applet
b1774f4 harfbuzz: update 1.0.6 -> 1.1.0
31f803a sqlite3: update 3.9.0 -> 3.9.2
7e3474c readline: apply missing upstream patches
99b9d52 readline: prepare for readline6.3 upstream patches
e0b6d0c dbus: merge .bb and .inc
d99958a pulseaudio: Fix HDMI profile selection
2ba954f initscripts: hide the error in case system is not writeable
4ed84ff nativesdk-buildtools-perl-dummy: fix rebuilding when SDKMACHINE changes
b8fdd09 xf86-video-vmware: Add vmwgfx PACKAGECONFIG option
dfd5c4d pkgconfig: merge .bb and .inc
61c6887 pkgconfig: upgrade to version 0.29
744e89f ofono: upgrade to version 1.17
996f843 libxml2: remove legacy LDFLAGS += "-ldl" workaround
dedabc1 apr: fix LTFLAGS to make it work with ccache
9470956 iproute2: install bridge tool by default
1b8f6a2 lttng-tools: add libgcc to RDEPENDS
22dd6e7 lttng-tools: Upgrade to 2.7 release
ef73f21 lttng-tools: Drop unused patch
c375976 lttng-ust: Upgrade to 2.7 release
f5c1b57 lttng-modules: Upgrade to 2.7 release
8d708a5 libunistring: upgrade to version 0.9.6
f840e59 libtasn1: upgrade to 4.7
012ca02 wpa-supplicant: upgrade to 2.5
872e153 mesa: Make gl libraries RRECOMMEND mesa-megadriver
a62fa23 directfb.inc: force bfd linker for armv7a
9b075ca libpng12: update to 1.2.54
6d1eb34 libpng: update to 1.6.19
92a881f orc: update to 0.4.24
2f479b1 libpcap: update to 1.7.4
bd4058f apr-util: add missing RDEPENDS for ptest
1408642 iproute2: update to 4.3.0
e677c25 ruby-native: Depend on openssl-native
9e37812 db: fix race issue for libdb-6.0.la
c19036a pango: use ptest-gnome
43b29d9 gst-plugins-bad: improve FILES variables
9fc877f gstreamer1.0-plugins-base: add PACKAGECONFIG for libvisual
7a2bb0d python3: fix building nativesdk-python3
2268a70 python3: Upgrade from 3.4.3 to 3.5
ed8d1be python-git: Add missing dependency
dee2a8c guile, mailx, gcc, opensp, gstreamer1.0-libav, libunwind: disable thumb where it fails for qemuarm
c0b822f icu: force arm mode
f42ef3f rpcbind: Security Advisory - rpcbind - CVE-2015-7236
04034e7 subversion: fix CVE-2015-3187
f91aedf subversion: fix CVE-2015-3184
40cd228 oeqa/sshcontrol: don't source profile
d39192a oeqa/runtime/multilib: refactor ELF class extraction
cc34104 oe-selftest: Enable code coverage on unit tests
06859de meta/conf/machine: use ' inside quoted values
6be94ec runqemu-internal: Replace wacom-tablet with tablet for usbdevice
0cc3810 recipetool: make plugin registration function name consistent with devtool
b381f80 recipetool: add setvar subcommand
1fbd760 lib/oe/recipeutils: refactor patch_recipe_file() to use edit_metadata()
0b850cb devtool: clarify help text
5001f23 devtool: build: enable showing default task in help
f79022d devtool: build: use bbappend to set PARALLEL_MAKE
21481bc lib/oe/recipeutils: check in validate_pn() for names instead of filenames
671f41e devtool: ensure we change back to the original dir on error
74505b4 devtool: search: print SUMMARY value
3f46af2 devtool: drop unused plugin_init() functions
176211a devtool: package: use DEPLOY_DIR_<pkgtype> to get deploy directory
0fe7426 devtool: disable creating workspace for extract and search subcommands
a360fa7 lib/oe/patch: improve extraction of patch header
f79cc4d devtool: upgrade: provide a means to update the source branch
b4d4d21 devtool: upgrade: fetch remote repository before checking out new revision
9b7d45c devtool: upgrade: remove erroneous error when not renaming recipe
9a70444 devtool: upgrade: fix updating PV and SRCREV
6a52c73 devtool: upgrade: fix removing other recipes from workspace on reset
44ef78a devtool: include do_patch in SRCTREECOVEREDTASKS
804f5b8 image.py: avoid mkdir race when building multiple images
312862f package_manager.py: define info_dir and status_file when OPKGLIBDIR isn't the default
b00f734 image.py: Avoid creating empty .env file in _write_wic_env
a88505b lib/oe/terminal: use C locale when determining version
8d784ba toolchain-shar-extract.sh: Ensure it's ran in clean environment
7f3c20f toolchain-shar-extract.sh: do not allow $ in paths for ext SDK
2d21e5d create-pull-request: handle empty ODIR
c63b36f scripts/gen-lockedsig-cache: improve output
67af6d6 wic: exec_native_cmd: implement support for pseudo
8ffba25 toolchain-shar-relocate: don't assume last state of env_setup_script is good
b8ee7ae sanity: don't enforce DISPLAY for testimage
b364183 oeqa/qemurunner: pass nographic to runqemu if DISPLAY isn't set
46755cc base: add automatic dependency on lzip-native for .lz SRC_URI
6ea39c2 base: decode SRC_URI before adding implicit fetch dependencies
eded9c2 buildhistory.bbclass: support extending the content of the build history
d95df11 license.bbclass: Create image license manifest
efdab52 license.bbclass: Add function get_deployed_files
cc0d044 license.bbclass: Added function get_deployed_dependencies
d45e10e license.bbclass: Added get_boot_dependencies function
8b1e7bc license.bbclass: Split license create manifest
1a210e6 license.bbclass: Write recipeinfo file in license folder
74c7cd5 populate_sdk_ext.bbclass: Be more permissive on the name of the buildtools
5ba6382 populate_sdk_base: Add sysroot symlink check
7fed655 classes/populate_sdk_ext: fail if SDK_ARCH != BUILD_ARCH
2948169 classes/populate_sdk_ext: tweak reporting of workspace exclusion
28a2ea7 classes/populate_sdk_ext: make it clear when SDK installation has failed
124c6aa classes/populate_sdk_ext: tidy up preparation log file writing
d348624 boot-directdisk.bbclass: remove HDDIMG before create
03f15e5 sstate: Ensure siginfo and sig files are also touched
615ccae weston: Add PACKAGECONFIG option for colord CMS
cdad67c opkg: add cache filename length fixes
2ec77de openjade-native: statically link local libs
29747d4 sysklogd: inhibit updatercd for non-sysvinit
add3451 connman: depend on readline
7a557a2 latencytop: obey LDFLAGS
8aeec87 tcf-agent: obey LDFLAGS
9025d2e blkspace: fix ldflags for iowatcher
1732a8a bluez5: enable sysvinit support
160fdd8 sysprof: use packageconfig for the gui
425d020 mc: upgrade to 4.8.15
7386647 packagegroup-core-directfb: Don't depend on pango-modules
ac5ed8e xkeyboard-config: Upgrade 2.15 -> 2.16
3a71fab xkbcomp: Upgrade 1.3.0 -> 1.3.1
b7cb308 xinput: Upgrade 1.6.1 -> 1.6.2
05eca73 xf86-video-omap: Upgrade 0.4.3 -> 0.4.4
cfcc5e5 xf86-input-synaptics: Upgrade 1.8.2 -> 1.8.3
4c9256f xf86-input-evdev: Upgrade 2.9.2 -> 2.10.0
96ddcc5 xorg-driver-input: add xorg configuration to FILES
a1003f5 xserver-xorg: Upgrade 1.17.2 -> 1.18.0
a336b8a libxcb: Remove unused git-version of the recipe
05ba0db libxcb: Upgrade 1.11 -> 1.11.1
44233d3 pixman: Upgrade 0.32.6 -> 0.32.8
7ab0466 libxi: Upgrade 1.7.4 -> 1.7.5
63feef0 gtk-icon-utils-native: Upgrade 3.16.6 -> 3.18.2
38924d9 package_regex.inc: Add gtk-icon-utils-native
060b482 gtk+3: Upgrade 3.16.6 -> 3.18.2
4f3d2b3 adwaita-icon-theme: Upgrade 3.16.2.1 -> 3.18.0
c8849ac librsvg: Upgrade 2.40.10 -> 2.40.11
81769ca pango: add RPROVIDES for removed packages
c9b06f5 pango: Upgrade 1.36.8 -> 1.38.1
ced8d49 gdk-pixbuf: Upgrade 2.30.8 -> 2.32.1
918c773 libsoup-2.4: Upgrade 2.50.0 -> 2.52.1
5bd9305 at-spi2-atk: Upgrade 2.16.0 -> 2.18.1
8eb0c8f atk-spi2-core: Upgrade 2.16.0 -> 2.18.1
78130eb atk: Upgrade 2.16.0 -> 2.18.0
e7141ab glib-networking: Upgrade 2.44.0 -> 2.46.1
fcd7494 glib-2.0: build dependency cleanup
5357764 glib-2.0: Enable more tests while cross-compiling
1e271af glib-2.0: Upgrade 2.44.1 -> 2.46.1
bc1be07 qemu: Backport malloc-trace disabling
bca5a7a logrotate: do not move binary logrotate to /usr/bin
0069c0d systemd: drop unneeded $D check in prerm
cd1f2b4 systemd: chown hwdb.bin to root:root for do_rootfs
7ca8cd9 systemd: for valgrind, define VALGRIND=1
46fa8ab systemd: make coredump a PACKAGECONFIG
ac34784 systemd: add machine-id to conffiles
04937cc systemd: ignore .so filenames in systemd-doc
6821854 systemd: fix Upstream-Status tag
82107b1 mdadm: fix CFLAGS and ptest issues
d8adfd2 gcc-4.9: Fix various _FOR_BUILD and related variables
8ae27fa devtool: add sync command
6bfa1dc boost.inc: remove unused parameter from get_boost_parallel_make()
16d7bfd wireless-tools: remove unused files
ee923bf gstreamer1.0: fix install race
0ae52c8 gcc-multilib-config: make aarch64 support multilib
8514d21 libxml2: fix CVE-2015-7942 and CVE-2015-8035
e864f71 terminal: Open a new window instead of split on older tmux versions (<1.9)
5056581 flex: fix test-bison-yylval and test-bison-yylloc failed
c54540e gdbm 1.8.3: install libgdbm_compat
b9f87ed harfbuzz: update to 1.0.6
3f75537 ethtool: bump version to 4.2
9a4da3c openssl: fix ptest issues
9163a5d base-files: stage /etc/skel
d60c5ff mktemp: raise the priority to avoid conflicting with coreutils
b06eacd libunwind: fix build for qemuarm
c4acace gma500_gfx: Avoid inserting gma500_gfx module for certain devices
6c3f680 libsndfile: fix CVE-2014-9756
aa07eb1 python-pycurl: update version to 7.19.5.2
696aa7e rt-tests: upgrade to version 0.96
6ec7dc2 rpcbind: don't use '-w' for starting rpcbind
eddd88f libsecret: add dependency on intltool-native
2e8efb1 openssl: use subdir= instead of moving files in do_configure_prepend()
036d2dc openssl: sanity check that the bignum module is present
cf366d8 libsdl2: require GLES when building Wayland support
4b38be6 meta: add some missing Upstream-Status tags to patches
42c75cd weston: delete unused patch
521fac6 glibc: fix Upstream-Status tag
44a7bbc linux-firmware: package Broadcom BCM4339 firmware
f9d51cd libusb1: fix make install race
cb01f6d libusb1: upgrade from 1.0.19 to 1.0.20
b4e6f63 perl: fix spaces in brackets while using CC version
a59d019 u-boot: Update to 2015.10 release
e67c5b0 bitbake-prserv-tool: check file name
4e2c5e1 recipetool.append: don't choke on a trailing ; in a url
a35f79d yocto-bsp: Set SRCREV meta/machine revisions to AUTOREV
9d585b5 yocto-bsp: Set KTYPE to user selected base branch
1542c2a yocto-bsp: Typo on the file extension
f674ffa yocto-bsp: Avoid duplication of user patches ({{=machine}}-user-patches.scc)
49a465c package_manager.py: Delete installed_pkgs.txt file
ace895d rootfs.py: Stop using installed_pkgs.txt
ccb1616 lib/oe/distro_check: don't set empty proxy keys
8137a84 lib/oe/copy_buildsystem: Don't expand BB_TASKDEPDATA
a6c68d8 oeqa/selftest/sstatetests: prettier output for allarch test
92328b4 oeqa/selftest/signing: Added new test for signing sstate.
fbb03a8 oeqa/selftest/signing: New test for Signing packages in the package feeds.
13a4c38 qemu.bbclass: fix vardeps of QEMU_OPTIONS
51bd011 qemu.bbclass: correct the fsl ppc QEMU_EXTRAOPTIONS
753f31e autotools: Allow recipe-individual configure scripts
e281791 allarch: Force TARGET_*FLAGS variable values
e28e17e distro/maintainers.inc: include stress package details
76d2e46 image_types: improve wks path specification
70ae7a6 insane.bbclass: Avoid libdir QA check if PACKAGE_DEBUG_SPLIT_STYLE='debug-file-directory'
cf0dfdb classes/cpan-base: fix libdir for nativesdk
a205c4c bbclass: fix spelling mistakes
cf218e5 rootfs_*.bbclass: don't add BUILDNAME to do_rootfs vardepsexclude
7d8616c insane: Don't depend on BB_TASKDEPDATA
a9cc27e kernel: fix race condition between compile_kernelmodules and shared_workdir
fecb077 classes: Ensure pass setVar/setVarFlag strings, not integers
9167f20 classes/license: fix intermittent license collection warning
43c8867 classes/metadata_scm: fix git errors showing up on non-git repositories
59b27d5 sstate: respect GPG_BIN and GPG_HOME
4415dc5 archiver.bbclass: add bbappend when do_ar_recipe kernel and gcc packages
2f0ff3a archiver.bbclass: fix previous issue regarding work-shared for linux-yocto
0cc4eef waf.bbclass: filter out non -j from PARALLEL_MAKE
95719b0 ptest-gnome: extend EXTRA_OECONF in all builds, not just target
1b25a70 yocto-project-qs, ref-manual, poky.ent: CentOS Package updates
2e649d7 dev-manual: Updated runqemu command options list
bd62289 toaster-manual: Removed SDKMACHINE from the json file example.
c674cd7 ref-manual: Updated list of supported distros.
33d8cff ref-manual: Updated the GCC 5 migration section for 2.0
d9aabf9 gcc: Drop 4.8
2cb1aee layer.conf: Correct gcc-cross dependency
88f9310 bitbake: toaster: builds pages Fix the download cooker log link
d04af8b bitbake: toaster: project pages Link to image recipes table in notifications
70465c7 bitbake: toaster: tests: Re-write some cases to make them more maintainable
536b73f bitbake: data_smart: Only support lowercase OVERRIDES
fb01a66 bitbake: fetch2: Remove crazy code in unpack
7db88aa bitbake: parse: Don't try to expand __base_depends/__depends
4c04ce0 bitbake: cache: Don't try to expand __inherit_data
9d8e36a bitbake: toaster: localhostbectrl Pass DATABASE_URL in via the process environment
4677d8b bitbake: toaster: Remove the new-build-input button widget
55f4494 bitbake: toaster: projecttopbar Use the project in context to get num builds
e9d4962 bitbake: toaster: projectpage Disable/Enable build input if we have 0 layers
5fa4c73 bitbake: toaster: orm Fix get_number_of_builds to count all apart from IN_PROGRESS
c4032f4 bitbake: codeparser: Only load the codeparser cache once
e3b66c1 maintainers: mass reassign and cleanup
37ddd3e Revert "local.conf.sample: Disable image-prelink by default"
9cc221d yocto-bsp: Default kernel version to 4.1 on x86_64
7100c42 scripts: runqemu: remove QEMUARCH from help message
f47e4ad cairo: update 1.14.2 -> 1.14.4
603b4de cairo.inc: drop obsolete CFLAGS += "-ffat-lto-objects" workaround
e8833a6 cmake: update 3.3.1 -> 3.3.2
8b2b068 oe-selftest: add test for bitbake-layers show-recipes
480bbae oeqa/selftest/layerappend: fix test if build directory is not inside COREBASE
a301f6e oeqa/selftest/devtool: fix test if build directory is not inside COREBASE
fd6bf77 classes/distrodata: split SRC_URI properly before determining type
7cebff6 classes/buildhistory: split package history values only once
10fc534 conf/distro/include: drop old recipes from include files
37cfd80 gitignore: fix overzealous exclusion
1f6599b meta: Fix typos in Upstream-Status labels
7cace4c meta/conf/layer.conf: fix typo
ca8e1e5 texinfo-dummy-native: set SUMMARY instead of DESCRIPTION
64cd113 gstreamer1.0-meta-base: set SUMMARY instead of DESCRIPTION
1d42d59 mmc-utils: set SUMMARY instead of DESCRIPTION
6692540 swig: set SUMMARY instead of DESCRIPTION
47ae8eb alsa-plugins: set SUMMARY instead of DESCRIPTION
eac5fa9 tzcode-native: set SUMMARY instead of DESCRIPTION
0a30a1f linux-yocto.inc: set SUMMARY instead of DESCRIPTION
19e1a73 python-nose: add SUMMARY
b5f58c1 stress: add SUMMARY
5f9392a libunwind: add SUMMARY
1460e01 gptfdisk: add SUMMARY
0821c36 verify-homepage: fix recipe file selection
0c48921 verify-homepage: tidy up output and comments
0e348e7 verify-homepage: get expanded HOMEPAGE value
caaca00 verify-homepage: use scriptpath to find bitbake path
649b6bc libaio: don't disable linking to the system libraries
11a9c24 runqemu: don't specify IP when starting a VNC server
3b95964 qemurunner: Remove the timeout in run_serial
bbd6d07 libxslt: CVE-2015-7995
a0d2ea9 gstreamer1.0-rtsp-server: upgrade to version 1.6.1
2459ec2 gstreamer1.0-libav: upgrade to version 1.6.1
bce06e7 gstreamer1.0-plugins-ugly: upgrade to version 1.6.1
0ec3c62 gstreamer1.0-plugins-bad: upgrade to version 1.6.1
ba1bc63 gstreamer1.0-plugins-good: upgrade to version 1.6.1
4a55d12 gstreamer1.0-plugins-base: upgrade to version 1.6.1
8360f23 gstreamer1.0: upgrade to version 1.6.1
8800033 prelink: Fix various prelink issues on IA32, ARM, and MIPS.
920fb96 gcc: Update default Power GCC settings to use secure-plt
7b1763a glibc: Fix ld.so / prelink interface for ELF_RTYPE_CLASS_EXTERN_PROTECTED_DATA
e63e191 qemurunner: Enable timestamps on kernel boot-up
a1ca788 openssl: fix mips64 configure support
5a10a6f at: modify sources in do_patch
78e0598 unzip: rename patch to reflect CVE fix
b80935a readline: rename patch to contain CVE reference
86d84ff qemu: upgrade to 2.4.0.1
4f0d756 gnome-desktop-testing: fix ptest output format
834de84 default-distrovars: remove less from WHITELIST_GPL-3.0
29bba95 lsof: don't export EXTRA_OEMAKE
3d37768 cmake.bbclass: don't duplicate CMAKE_C_FLAGS in CMAKE_C_FLAGS_RELEASE
efc07c2 rm_work.bbclass: Exclude do_rootfs stamp removal
5f9d16b cairo: fix license for cairo-script-interpreter
7328479 openssh: enable X11Forwarding if distro feature x11 is set
adeb820 acpid: Upgrade to 2.0.25
781dfd8 libidn: 1.30 -> 1.32
351c69a sqlite: 3.8.10.2 -> 3.9.0
c0fe43c rt-tests: bump to v0.94
cf972f9 gbm: Fix "configure: error: gbm requires --enable-dri"
17733cc xinetd: install xinetd supported services configuration
aa1844e combo-layer: introduce ability to exclude component from mass update
dcc446f linux-dtb.inc: refactor common code to function get_real_dtb_path_in_kernel
af9c7a4 linux-dtb.inc: refactor common code to function normalize_dtb
7eecb81 linux-dtb.inc: explicit test for empty string not needed
54df911 ptest-runner: Allow running of specific tests
54325b2 oeqa/testimage: Add support for test folder export.
ecbe135 oeqa/runtime/multilib: run the arch tests on connmand not connman-applet
2d1071e oeqa/runtime: remove dmesg test
42a5378 nfs-utils/statd: fix a segfault
77e3246 qemu: enable user mode for mips64 and mips64el
70600fb gnupg: fix find-version for beta checking
ab123ef rpm: define EM_AARCH64 for debugedit
af8c136 xserver-xorg: drop empty ${PN}-security-policy package
b667067 xserver-xorg: add Xwayland RRECOMMENDS
80f4d71 weston: add a PACKAGECONFIG option for xwayland support
883ab0f systemd: make dbus an optional build time dependency
2c5047f weston: add PACKAGECONFIG to build with systemd-login support
65ffeb5 systemd: add PACKAGECONFIG to build with compatibility libraries
4b29c80 os-release: put double-quotes around variable contents
0f516a5 cpio: fix testcase symlink-bad-lengths [ LIN8-947 ]
bceb9cb cpio: Fix symlink-bad-length test for 64-bit [ LIN8-947 ] architectures.
0ff3fc7 gtk+3: fix ALTERNATIVE_PRIORITY conflict with gtk+
eca12a6 coreutils: fix ALTERNATIVE_PRIORITY conflict with procps and mktemp
8de5315 util-linux: fix ALTERNATIVE_PRIORITY conflict with ncurses procps and e2fsprogs
3befb43 console-tools: fix ALTERNATIVE_PRIORITY conflict with kbd
5385ea8 debianutils: fix ALTERNATIVE_PRIORITY conflict with which
3a0bd40 linux-dtb.inc: use same variable name DTB for all elements of KERNEL_DEVICETREE
a879312 linux-dtb.inc: remove unneeded 'cd'
a23d1ca webkitgtk: Add upstream patch to fix build problem
69836e8 python: don't append -D__SOFTFP__ to TARGET_CC_ARCH for armv6/armv7a
38d1d63 prexport.bbclass: avoid export for native and crosssdk
d3da006 recipes: add distro_features_check for some packages
63690f0 scons.bbclass: SCons packages don't require do_configure
bffdc65 busybox: Schedule mdev after mountall
13ce7c2 busybox: Fix mdev block device automounting
b09f0f2 libarchive: rename patch to reflect CVE
116360f binutils: Fix XLP / Octeon 3 instruction clash
fd4f4d2 binutils: Fix octeon3 disassembly patch
REVERT: b1f23d1 build-appliance-image: Update to jethro head revision
REVERT: 7fe17a2 qemu: Security fix CVE-2016-2198
REVERT: 50700a7 qemu: Security fix CVE-2016-2197
REVERT: 1f0e615 libgcrypt: Security fix CVE-2015-7511
REVERT: dc5f155 uclibc: Security fix CVE-2016-2225
REVERT: ef13511 uclibc: Security fix CVE-2016-2224
REVERT: ae57ea0 libbsd: Security fix CVE-2016-2090
REVERT: eb9666a glibc: Security fix CVE-2015-7547
REVERT: 5b12268 build-appliance-image: Update to jethro head revision
REVERT: a3a374a curl: Secuirty fix CVE-2016-0755
REVERT: f4341a9 curl: Security fix CVE-2016-0754
REVERT: 35f4306 nettle: Security fix CVE-2015-8804
REVERT: 3e8a07b nettle: Security fix CVE-2015-8803 and CVE-2015-8805
REVERT: 5ffc326 socat: Security fix CVE-2016-2217
REVERT: 5cc5f99 libpng: Security fix CVE-2015-8472
REVERT: 21a816c libpng: Security fix CVE-2015-8126
REVERT: 6a0fbfa foomatic-filters: Security fixes CVE-2015-8327
REVERT: d57aaf7 foomatic-filters: Security fix CVE-2015-8560
REVERT: 941874a build-appliance-image: Update to jethro head revision
REVERT: d74a3cb cross-localedef-native: add ABI breaking glibc patch
REVERT: 12fae23 build-appliance-image: Update to jethro head revision
REVERT: 67ac9d6 e2fsprogs: Ensure we use the right mke2fs.conf when restoring from sstate
REVERT: 5812fc9 build-appliance-image: Update to jethro head revision
REVERT: 3de2492 ref-manual: Updated host package install requirements CentOS
REVERT: 79de8cf toaster-manual: Updated the "Installation" to have TOASTER_DIR information
REVERT: a23d262 toaster-manual: Updated instructions for production setup.
REVERT: b6def81 linux-yocto: Update SRCREV for genericx86* for 4.1, fixes CVE-2016-0728
REVERT: db0f8ac linux-yocto: Update SRCREV for genericx86* for 3.19, fixes CVE-2016-0728
REVERT: c8122a0 linux-yocto: Update SRCREV for genericx86* for 3.14, fixes CVE-2016-0728
REVERT: cdeb241 meta-yocto-bsp: Remove uvesafb (v86d) from generic x86 features
REVERT: 52cd219 yocto-bsp: Set SRCREV meta/machine revisions to AUTOREV
REVERT: a88d6cb yocto-bsp: Set KTYPE to user selected base branch
REVERT: 4e74b36 yocto-bsp: Avoid duplication of user patches ({{=machine}}-user-patches.scc)
REVERT: 6680773 yocto-bsp: Default kernel version to 4.1 on x86_64
REVERT: 4c075e7 piglit: don't use /tmp to write generated sources to
REVERT: ee52ac6 gen-lockedsig-cache: fix bad destination path joining
REVERT: e9f95df linux-yocto: Update SRCREV for qemux86* for 4.1, fixes CVE-2016-0728
REVERT: e63bab1 linux-yocto: Update SRCREV for qemux86* for 3.19, fixes CVE-2016-0728
REVERT: 64a4920 linux-yocto: Update SRCREV for qemux86* for 3.14, fixes CVE-2016-0728
REVERT: 5b043da libpng12: update URL that no longer exists
REVERT: 655c8a5 libpng: update URL that no longer exists
REVERT: 96fda8c busybox: fix build of last applet
REVERT: ae037d9 ghostscript: add dependency for pnglibconf.h
REVERT: 26eb877 gcr: Require x11 DISTRO_FEATURE
REVERT: e632cdb uClibc: enable utmp for shadow compatibility
REVERT: e8c9613 git: Security fix CVE-2015-7545
REVERT: 108ea6d glibc-locale: fix QA warning
REVERT: 9a88c1d grub: Security fix CVE-2015-8370
REVERT: 443b09a gdk-pixbuf: Security fix CVE-2015-7674
REVERT: 6c91068 librsvg: Security fix CVE-2015-7558
REVERT: 9fd2349 bind: Security fix CVE-2015-8461
REVERT: 5a40d9f bind: Security fix CVE-2015-8000
REVERT: 1bbf183 libxml2: Security fix CVE-2015-8710
REVERT: 2ec6d1d libxml2: Security fix CVE-2015-8241
REVERT: 55aafb5 dpkg: Security fix CVE-2015-0860
REVERT: 029948b tzdata: update to 2016a
REVERT: 2bcf141 tzcode: update to 2016a
REVERT: cc3a391 kernel-yocto: fix checkout bare-cloned kernel repositories
REVERT: 049be17 libpcre: bug fixes include security
REVERT: 5e94ac7 qemu: Security fix CVE-2015-7295
REVERT: 7ee1828 qemu: Security fix CVE-2016-1568
REVERT: ca6ec2e qemu: Security fix CVE-2015-8345
REVERT: b55a677 qemu: Security fix CVE-2015-7512
REVERT: 4922f47 qemu: Security fix CVE-2015-7504
REVERT: 3ec0e95 qemu: Security fix CVE-2015-8504
REVERT: 942ce53 openssl: Security fix CVE-2016-0701
REVERT: ce8ae1c openssl: Security fix CVE-2015-3197
REVERT: 080e027 tiff: Security fix CVE-2015-8784
REVERT: c6ae9c1 tiff: Security fix CVE-2015-8781
REVERT: 049b7db bind: CVE-2015-8704 and CVE-2015-8705
REVERT: d632a92 rpmresolve.c: Fix unfreed pointers that keep DB opened
REVERT: 5b993ed openssh: CVE-2016-1907
REVERT: 27ee5b4 glibc: CVE-2015-8776
REVERT: a4134af glibc: CVE-2015-9761
REVERT: e10ec6f glibc: CVE-2015-8779
REVERT: a5a965d glibc: CVE-2015-8777.patch
REVERT: 2fb7ee2 bitbake: toaster: make runbuilds loop
REVERT: b9ad87b nativesdk-buildtools-perl-dummy: Bump PR
REVERT: 0a1c63a nativesdk-buildtools-perl-dummy: properly set PACKAGE_ARCH
REVERT: d4b400e nativesdk-buildtools-perl-dummy: fix rebuilding when SDKMACHINE changes
REVERT: 8c8c4ed Revert "gstreamer1.0-plugins-good.inc: add gudev back to PACKAGECONFIG"
REVERT: b832202 Revert "gstreamer: Deal with merge conflict which breaks systemd builds"
REVERT: dd0ba9e build-appliance-image: Update to jethro head revision
REVERT: 325d205 gstreamer: Deal with merge conflict which breaks systemd builds
REVERT: 53b114b build-appliance-image: Update to jethro head revision
REVERT: 02be35d poky.conf: Bump version for 2.0.1 jethro release
REVERT: f5551f8 ref-manual: Updated the list of supported image types.
REVERT: aa179ae dev-manual: Added three new wic option descriptions.
REVERT: 20007c8 dev-manual: Added the --overhead-factor wic option description.
REVERT: 2dd7f46 dev-manual: Added the --extra-space wic option description.
REVERT: 81cc737 dev-manual: Added wic --notable option description.
REVERT: 2b1dce5 dev-manual:
REVERT: a6f5293 kernel/kernel-arch: Explicitly mapping between i386/x86_64 and x86 for kernel ARCH
REVERT: e79a538 openssh: update to 7.1p2
REVERT: b171076 devtool: reset: do clean for multiple recipes at once with -a
REVERT: 255115f devtool: sdk-update: fix error checking
REVERT: 3f69105 devtool: sdk-update: fix metadata update step
REVERT: 5ba94af devtool: sdk-update: fix not using updateserver config file option
REVERT: d03d145 classes/populate_sdk_ext: disable signature warnings
REVERT: 00ff950 classes/populate_sdk_ext: fix cascading from preparation failure
REVERT: 22446c6 scripts/oe-publish-sdk: add missing call to git update-server-info
REVERT: 8597a61 devtool: use cp instead of shutil.copytree
REVERT: 95cc641 buildhistory: fix not recording SDK information
REVERT: 84d48ac recipetool: create: fix error when extracting source to a specified directory
REVERT: 4369329 recipetool: create: detect when specified URL returns a web page
REVERT: 4c3191f recipetool: create: prevent attempting to unpack entire DL_DIR
REVERT: caca77e recipetool: create: fix do_install handling for makefile-only software
REVERT: 383159e recipetool: create: avoid traceback on fetch error
REVERT: be40baa recipetool: create: handle https://....git URLs
REVERT: a897bfd devtool: sdk-update: fix traceback without update server set
REVERT: 9c4b61e classes/populate_sdk_ext: error out of install if buildtools install fails
REVERT: 4c07dd2 gstreamer1.0-plugins-good.inc: add gudev back to PACKAGECONFIG
REVERT: 83b72d8 linux-yocto: Update Genericx86* BSP to 4.1.15 kernel
REVERT: 44639bd libaio: don't disable linking to the system libraries
REVERT: a0be9bd linux-yocto/4.1: update to v4.1.15
REVERT: 53f0290 libxml2: security fix CVE-2015-5312
REVERT: f4b0c49 libxml2: security fix CVE-2015-8242
REVERT: fb409c9 libxml2: security fix CVE-2015-7500
REVERT: 55d097a libxml2: security fix CVE-2015-7499
REVERT: 8e6b2d6 libxml2: security fix CVE-2015-7497
REVERT: 332eb1d libxml2: security fix CVE-2015-7498
REVERT: cbc4e83 libxml2: security fix CVE-2015-8035
REVERT: c4b71e1 libxml2: security fix CVE-2015-7942
REVERT: fdea03d libxml2: security fix CVE-2015-8317
REVERT: 6fc1109 libxml2: security fix CVE-2015-7941
REVERT: 9eb4ce0 openssl: fix for CVE-2015-3195
REVERT: 6880f82 openssl: fix for CVE-2015-3194
REVERT: 7dcaa84 openssl: fix for CVE-2015-3193
REVERT: 435139b logrotate: do not move binary logrotate to /usr/bin
REVERT: 5f49c0a cairo: fix license for cairo-script-interpreter
REVERT: a29ec81 glibc: Fix ld.so / prelink interface for ELF_RTYPE_CLASS_EXTERN_PROTECTED_DATA
REVERT: b1e980f gcc: Update default Power GCC settings to use secure-plt
REVERT: ed82690 prelink: Fix various prelink issues on IA32, ARM, and MIPS.
REVERT: 9a620da autotools: Allow recipe-individual configure scripts
REVERT: f828071 toolchain-scripts.bbclass: unset command_not_found_handle
REVERT: 49858bd devtool: upgrade: fetch remote repository before checking out new revision
REVERT: d213452 devtool: upgrade: remove erroneous error when not renaming recipe
REVERT: fec97f6 devtool: upgrade: fix updating PV and SRCREV
REVERT: 3b4f659 devtool: upgrade: fix removing other recipes from workspace on reset
REVERT: 61a7de0 devtool: include do_patch in SRCTREECOVEREDTASKS
REVERT: 82c0072 toolchain-shar-extract.sh: do not allow $ in paths for ext SDK
REVERT: f181e72 scripts/gen-lockedsig-cache: improve output
REVERT: 4b5d4ca toolchain-shar-extract.sh: proper fix for additional env setup scripts
REVERT: d2ea8f1 toolchain-shar-relocate: don't assume last state of env_setup_script is good
REVERT: 02ef437 populate_sdk_ext.bbclass: Be more permissive on the name of the buildtools
REVERT: 3653b17 classes/populate_sdk_ext: fail if SDK_ARCH != BUILD_ARCH
REVERT: 8879571 classes/populate_sdk_ext: tweak reporting of workspace exclusion
REVERT: eeda3c6 classes/populate_sdk_ext: make it clear when SDK installation has failed
REVERT: dee9fbe classes/populate_sdk_ext: tidy up preparation log file writing
REVERT: d001d46 classes/license: fix intermittent license collection warning
REVERT: 777451c classes/metadata_scm: fix git errors showing up on non-git repositories
REVERT: cb0ca72 oeqa/selftest/layerappend: fix test if build directory is not inside COREBASE
REVERT: 8970ad6 oeqa/selftest/devtool: fix test if build directory is not inside COREBASE
REVERT: 4f7fdd0 classes/distrodata: split SRC_URI properly before determining type
REVERT: 3b7df55 uninative.bbclass: Choose the correct loader based on BUILD_ARCH
REVERT: f3d7c3f openssl: sanity check that the bignum module is present
REVERT: 96b1b5c glibc: Backported a patch to fix glibc's bug(18589)
REVERT: 7aecb57 directfb.inc: force bfd linker for armv7a
REVERT: 75ca2c8 texinfo: don't create dependency on INHERIT variable
REVERT: 02c7b3f package_manager.py: define info_dir and status_file when OPKGLIBDIR isn't the default
REVERT: 003c94f libsdl2: require GLES when building Wayland support
REVERT: ad6db01 gst-plugins-bad: add PACKAGECONFIGs for voamrwbenc, voaacenc, resindvd
REVERT: f0d87fe gstreamer1.0-plugins-good: fix PACKAGECONFIG for gudev and add one for v4l2 and libv4l2
REVERT: 35f34a6 gstreamer1.0-plugins-bad: fix dependencies for uvch264 PACKAGECONFIG
REVERT: 3b77e20 gstreamer1.0-plugins-{base,good}: update PACKAGECONFIGs
REVERT: e2d4412 libunwind: fix build for qemuarm
REVERT: ef69078 guile, mailx, gcc, opensp, gstreamer1.0-libav, libunwind: disable thumb where it fails for qemuarm
REVERT: 4700e40 icu: force arm mode
REVERT: 743ee04 libxcb: Add a workaround for gcc5 bug on mips
REVERT: 8a3deca bitbake: fetch: use orig localpath when calling orig method
REVERT: 0073b23 yocto-bsp: Typo on the file extension
REVERT: 71dbbcd bsp-guide: Updated the license statement.
REVERT: 41f1026 dev-manual: Correction to the KVM stuff in the runqemu commands.
REVERT: 38e3c6e mega-manual: Added four new figures for GUI example.
REVERT: b99ec28 poky.ent: Fixed POKYVERSION variable.
REVERT: c670dc7 yocto-project-qs, ref-manual, poky.ent: CentOS Package updates
REVERT: b968190 dev-manual: Updated runqemu command options list
REVERT: 1278753 toaster-manual: Removed SDKMACHINE from the json file example.
REVERT: 7b25b70 ref-manual: Updated list of supported distros.
REVERT: d9423fb ref-manual: Updated the GCC 5 migration section for 2.0
REVERT: 347347a bitbake: lib/bb/utils: improve edit_bblayers_conf() handling of bblayers.conf formatting
REVERT: 5935783 bitbake: lib/bb/utils: fix error in edit_metadata() when deleting first line
REVERT: 7fdad70 rpcbind: Security Advisory - rpcbind - CVE-2015-7236
REVERT: 0cb2fa5 subversion: fix CVE-2015-3187
REVERT: 5b52e9b subversion: fix CVE-2015-3184
REVERT: 59bdde4 linux-firmware: rtl8192cx: Add latest available firmware
REVERT: 8ad2bcc init-install-efi: fix script for gummiboot loader
REVERT: c3087bd init-install-efi: fix script for eMMC installation
REVERT: d2bf9fb pulseaudio: Fix HDMI profile selection
REVERT: 0556c58 allarch: Force TARGET_*FLAGS variable values
REVERT: e683dac libsndfile: fix CVE-2014-9756
REVERT: 092757e libxslt: CVE-2015-7995
REVERT: dab5555 unzip: rename patch to reflect CVE fix
REVERT: 1753d4a readline: rename patch to contain CVE reference
REVERT: 9dd3422 libarchive: rename patch to reflect CVE
REVERT: 1401976 binutils: Fix octeon3 disassembly patch
REVERT: a54a0db opkg: add cache filename length fixes

git-subtree-dir: yocto-poky
git-subtree-split: 8358e543ab95a1d2b1d19c1e944275daa17378c1
Signed-off-by: Patrick Williams <patrick@stwcx.xyz>
diff --git a/yocto-poky/meta/classes/allarch.bbclass b/yocto-poky/meta/classes/allarch.bbclass
index 4af38d7..208cde6 100644
--- a/yocto-poky/meta/classes/allarch.bbclass
+++ b/yocto-poky/meta/classes/allarch.bbclass
@@ -1,5 +1,5 @@
 #
-# This class is used for architecture independent recipes/data files (usally scripts)
+# This class is used for architecture independent recipes/data files (usually scripts)
 #
 
 # Expand STAGING_DIR_HOST since for cross-canadian/native/nativesdk, this will
diff --git a/yocto-poky/meta/classes/archiver.bbclass b/yocto-poky/meta/classes/archiver.bbclass
index 41a552c..2f3b278 100644
--- a/yocto-poky/meta/classes/archiver.bbclass
+++ b/yocto-poky/meta/classes/archiver.bbclass
@@ -53,6 +53,12 @@
 
 python () {
     pn = d.getVar('PN', True)
+    assume_provided = (d.getVar("ASSUME_PROVIDED", True) or "").split()
+    if pn in assume_provided:
+        for p in d.getVar("PROVIDES", True).split():
+            if p != pn:
+                pn = p
+                break
 
     included, reason = copyleft_should_include(d)
     if not included:
@@ -61,6 +67,12 @@
     else:
         bb.debug(1, 'archiver: %s is included: %s' % (pn, reason))
 
+    # We just archive gcc-source for all the gcc related recipes
+    if d.getVar('BPN', True) in ['gcc', 'libgcc'] \
+            and not pn.startswith('gcc-source'):
+        bb.debug(1, 'archiver: %s is excluded, covered by gcc-source' % pn)
+        return
+
     ar_src = d.getVarFlag('ARCHIVER_MODE', 'src', True)
     ar_dumpdata = d.getVarFlag('ARCHIVER_MODE', 'dumpdata', True)
     ar_recipe = d.getVarFlag('ARCHIVER_MODE', 'recipe', True)
@@ -73,8 +85,15 @@
         # We can't use "addtask do_ar_configured after do_configure" since it
         # will cause the deptask of do_populate_sysroot to run not matter what
         # archives we need, so we add the depends here.
-        d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_configure' % pn)
+
+        # There is a corner case with "gcc-source-${PV}" recipes, they don't have
+        # the "do_configure" task, so we need to use "do_preconfigure"
+        if pn.startswith("gcc-source-"):
+            d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_preconfigure' % pn)
+        else:
+            d.appendVarFlag('do_ar_configured', 'depends', ' %s:do_configure' % pn)
         d.appendVarFlag('do_deploy_archives', 'depends', ' %s:do_ar_configured' % pn)
+
     elif ar_src:
         bb.fatal("Invalid ARCHIVER_MODE[src]: %s" % ar_src)
 
@@ -119,21 +138,9 @@
         if os.path.isfile(local):
             shutil.copy(local, ar_outdir)
         elif os.path.isdir(local):
-            basename = os.path.basename(local)
-
             tmpdir = tempfile.mkdtemp(dir=d.getVar('ARCHIVER_WORKDIR', True))
             fetch.unpack(tmpdir, (url,))
-
-            os.chdir(tmpdir)
-            # We eliminate any AUTOINC+ in the revision.
-            try:
-                src_rev = bb.fetch2.get_srcrev(d).replace('AUTOINC+','')
-            except:
-                src_rev = 'NOREV'
-            tarname = os.path.join(ar_outdir, basename + '.' + src_rev + '.tar.gz')
-            tar = tarfile.open(tarname, 'w:gz')
-            tar.add('.')
-            tar.close()
+            create_tarball(d, tmpdir + '/.', '', ar_outdir)
 
     # Emit patch series files for 'original'
     bb.note('Writing patch series files...')
@@ -156,8 +163,9 @@
 
     # Get the ARCHIVER_OUTDIR before we reset the WORKDIR
     ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
+    ar_workdir = d.getVar('ARCHIVER_WORKDIR', True)
     bb.note('Archiving the patched source...')
-    d.setVar('WORKDIR', ar_outdir)
+    d.setVar('WORKDIR', ar_workdir)
     create_tarball(d, d.getVar('S', True), 'patched', ar_outdir)
 }
 
@@ -167,11 +175,18 @@
     ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
     if d.getVarFlag('ARCHIVER_MODE', 'src', True) == 'configured':
         bb.note('Archiving the configured source...')
+        pn = d.getVar('PN', True)
+        # "gcc-source-${PV}" recipes don't have "do_configure"
+        # task, so we need to run "do_preconfigure" instead
+        if pn.startswith("gcc-source-"):
+            d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
+            bb.build.exec_func('do_preconfigure', d)
+
         # The libtool-native's do_configure will remove the
         # ${STAGING_DATADIR}/aclocal/libtool.m4, so we can't re-run the
         # do_configure, we archive the already configured ${S} to
         # instead of.
-        if d.getVar('PN', True) != 'libtool-native':
+        elif pn != 'libtool-native':
             # Change the WORKDIR to make do_configure run in another dir.
             d.setVar('WORKDIR', d.getVar('ARCHIVER_WORKDIR', True))
             if bb.data.inherits_class('kernel-yocto', d):
@@ -203,12 +218,15 @@
     import tarfile
 
     # Make sure we are only creating a single tarball for gcc sources
-    if d.getVar('SRC_URI', True) == "" and 'gcc' in d.getVar('PN', True):
+    if (d.getVar('SRC_URI', True) == ""):
         return
 
     bb.utils.mkdirhier(ar_outdir)
-    tarname = os.path.join(ar_outdir, '%s-%s.tar.gz' % \
-            (d.getVar('PF', True), suffix))
+    if suffix:
+        filename = '%s-%s.tar.gz' % (d.getVar('PF', True), suffix)
+    else:
+        filename = '%s.tar.gz' % d.getVar('PF', True)
+    tarname = os.path.join(ar_outdir, filename)
 
     srcdir = srcdir.rstrip('/')
     dirname = os.path.dirname(srcdir)
@@ -250,21 +268,19 @@
             [ 'patched', 'configured'] and \
             d.getVarFlag('ARCHIVER_MODE', 'diff', True) != '1':
         return
-    # Change the WORKDIR to make do_unpack do_patch run in another dir.
     ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
-    d.setVar('WORKDIR', ar_outdir)
+    ar_workdir = d.getVar('ARCHIVER_WORKDIR', True)
 
-    # The changed 'WORKDIR' also casued 'B' changed, create dir 'B' for the
-    # possibly requiring of the following tasks (such as some recipes's
-    # do_patch required 'B' existed).
-    bb.utils.mkdirhier(d.getVar('B', True))
+    # The kernel class functions require it to be on work-shared, so we dont change WORKDIR
+    if not bb.data.inherits_class('kernel-yocto', d):
+        # Change the WORKDIR to make do_unpack do_patch run in another dir.
+        d.setVar('WORKDIR', ar_workdir)
 
-    # The kernel source is ready after do_validate_branches
-    if bb.data.inherits_class('kernel-yocto', d):
-        bb.build.exec_func('do_unpack', d)
-        bb.build.exec_func('do_kernel_checkout', d)
-        bb.build.exec_func('do_validate_branches', d)
-    else:
+        # The changed 'WORKDIR' also caused 'B' changed, create dir 'B' for the
+        # possibly requiring of the following tasks (such as some recipes's
+        # do_patch required 'B' existed).
+        bb.utils.mkdirhier(d.getVar('B', True))
+
         bb.build.exec_func('do_unpack', d)
 
     # Save the original source for creating the patches
@@ -273,8 +289,8 @@
         src_orig = '%s.orig' % src
         oe.path.copytree(src, src_orig)
 
-    # Make sure gcc sources are patched only once
-    if not ((d.getVar('SRC_URI', True) == "" and 'gcc' in d.getVar('PN', True))):
+    # Make sure gcc and kernel sources are patched only once
+    if not ((d.getVar('SRC_URI', True) == "" or bb.data.inherits_class('kernel-yocto', d))):
         bb.build.exec_func('do_patch', d)
 
     # Create the patches
@@ -299,6 +315,16 @@
     bb.utils.mkdirhier(outdir)
     shutil.copy(bbfile, outdir)
 
+    pn = d.getVar('PN', True)
+    bbappend_files = d.getVar('BBINCLUDED', True).split()
+    # If recipe name is aa, we need to match files like aa.bbappend and aa_1.1.bbappend
+    # Files like aa1.bbappend or aa1_1.1.bbappend must be excluded.
+    bbappend_re = re.compile( r".*/%s_[^/]*\.bbappend$" %pn)
+    bbappend_re1 = re.compile( r".*/%s\.bbappend$" %pn)
+    for file in bbappend_files:
+        if bbappend_re.match(file) or bbappend_re1.match(file):
+            shutil.copy(file, outdir)
+
     dirname = os.path.dirname(bbfile)
     bbpath = '%s:%s' % (dirname, d.getVar('BBPATH', True))
     f = open(bbfile, 'r')
@@ -326,27 +352,29 @@
     dumpfile = os.path.join(d.getVar('ARCHIVER_OUTDIR', True), \
         '%s-showdata.dump' % d.getVar('PF', True))
     bb.note('Dumping metadata into %s' % dumpfile)
-    f = open(dumpfile, 'w')
-    # emit variables and shell functions
-    bb.data.emit_env(f, d, True)
-    # emit the metadata which isn't valid shell
-    for e in d.keys():
-        if bb.data.getVarFlag(e, 'python', d):
-            f.write("\npython %s () {\n%s}\n" % (e, bb.data.getVar(e, d, True)))
-    f.close()
+    with open(dumpfile, "w") as f:
+        # emit variables and shell functions
+        bb.data.emit_env(f, d, True)
+        # emit the metadata which isn't valid shell
+        for e in d.keys():
+            if d.getVarFlag(e, "python", False):
+                f.write("\npython %s () {\n%s}\n" % (e, d.getVar(e, False)))
 }
 
 SSTATETASKS += "do_deploy_archives"
 do_deploy_archives () {
-    echo "Deploying source archive files ..."
+    echo "Deploying source archive files from ${ARCHIVER_TOPDIR} to ${DEPLOY_DIR_SRC}."
 }
 python do_deploy_archives_setscene () {
     sstate_setscene(d)
 }
+do_deploy_archives[dirs] = "${ARCHIVER_TOPDIR}"
 do_deploy_archives[sstate-inputdirs] = "${ARCHIVER_TOPDIR}"
 do_deploy_archives[sstate-outputdirs] = "${DEPLOY_DIR_SRC}"
+addtask do_deploy_archives_setscene
 
 addtask do_ar_original after do_unpack
+addtask do_unpack_and_patch after do_patch
 addtask do_ar_patched after do_unpack_and_patch
 addtask do_ar_configured after do_unpack_and_patch
 addtask do_dumpdata
@@ -364,6 +392,4 @@
     # Add tasks in the correct order, specifically for linux-yocto to avoid race condition
     if bb.data.inherits_class('kernel-yocto', d):
         bb.build.addtask('do_kernel_configme', 'do_configure', 'do_unpack_and_patch', d)
-    else:
-        bb.build.addtask('do_unpack_and_patch', None, 'do_patch', d)
 }
diff --git a/yocto-poky/meta/classes/autotools.bbclass b/yocto-poky/meta/classes/autotools.bbclass
index d546a5c..6649f5d 100644
--- a/yocto-poky/meta/classes/autotools.bbclass
+++ b/yocto-poky/meta/classes/autotools.bbclass
@@ -83,17 +83,16 @@
 AUTOTOOLS_AUXDIR ?= "${AUTOTOOLS_SCRIPT_PATH}"
 
 oe_runconf () {
-	cfgscript="${CONFIGURE_SCRIPT}"
+	# Use relative path to avoid buildpaths in files
+	cfgscript_name="`basename ${CONFIGURE_SCRIPT}`"
+	cfgscript=`python -c "import os; print os.path.relpath(os.path.dirname('${CONFIGURE_SCRIPT}'), '.')"`/$cfgscript_name
 	if [ -x "$cfgscript" ] ; then
 		bbnote "Running $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} $@"
-		set +e
-		${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"
-		if [ "$?" != "0" ]; then
-			echo "Configure failed. The contents of all config.log files follows to aid debugging"
-			find ${B} -ignore_readdir_race -name config.log -print -exec cat {} \;
-			die "oe_runconf failed"
+		if ! ${CACHED_CONFIGUREVARS} $cfgscript ${CONFIGUREOPTS} ${EXTRA_OECONF} "$@"; then
+			bbnote "The following config.log files may provide further information."
+			bbnote `find ${B} -ignore_readdir_race -type f -name config.log`
+			bbfatal_log "configure failed"
 		fi
-		set -e
 	else
 		bbfatal "no configure script found at $cfgscript"
 	fi
@@ -113,8 +112,7 @@
 				# regenerate them even if CFLAGS/LDFLAGS are different
 				cd ${S}
 				if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then
-					echo "Running \"${MAKE} clean\" in ${S}"
-					${MAKE} clean
+					oe_runmake clean
 				fi
 				find ${S} -ignore_readdir_race -name \*.la -delete
 			fi
@@ -124,6 +122,7 @@
 
 autotools_postconfigure(){
 	if [ -n "${CONFIGURESTAMPFILE}" ]; then
+		mkdir -p `dirname ${CONFIGURESTAMPFILE}`
 		echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
 	fi
 }
@@ -149,20 +148,26 @@
     bb.utils.mkdirhier(aclocaldir)
     start = None
     configuredeps = []
+    # Detect bitbake -b usage
+    # Everything but quilt-native would have dependencies
+    nodeps = (pn != "quilt-native")
 
     for dep in taskdepdata:
         data = taskdepdata[dep]
         if data[1] == "do_configure" and data[0] == pn:
             start = dep
+        if not nodeps and start:
             break
+        if nodeps and data[0] != pn:
+            nodeps = False
     if start is None:
         bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?")
 
     # We need to find configure tasks which are either from <target> -> <target>
     # or <native> -> <native> but not <target> -> <native> unless they're direct
     # dependencies. This mirrors what would get restored from sstate.
-    done = [dep]
-    next = [dep]
+    done = [start]
+    next = [start]
     while next:
         new = []
         for dep in next:
@@ -189,7 +194,11 @@
     #bb.warn(str(configuredeps2))
 
     cp = []
-    siteconf = []    
+    if nodeps:
+        bb.warn("autotools: Unable to find task dependencies, -b being used? Pulling in all m4 files")
+        for l in [d.expand("${STAGING_DATADIR_NATIVE}/aclocal/"), d.expand("${STAGING_DATADIR}/aclocal/")]:
+            cp.extend(os.path.join(l, f) for f in os.listdir(l))
+
     for c in configuredeps:
         if c.endswith("-native"):
             manifest = d.expand("${SSTATE_MANIFESTS}/manifest-${BUILD_ARCH}-%s.populate_sysroot" % c)
@@ -227,9 +236,9 @@
 	# for a package whose autotools are old, on an x86_64 machine, which the old
 	# config.sub does not support.  Work around this by installing them manually
 	# regardless.
-	( for ac in `find ${S} -ignore_readdir_race -name configure.in -o -name configure.ac`; do
+	for ac in `find ${S} -ignore_readdir_race -name configure.in -o -name configure.ac`; do
 		rm -f `dirname $ac`/configure
-		done )
+	done
 	if [ -e ${AUTOTOOLS_SCRIPT_PATH}/configure.in -o -e ${AUTOTOOLS_SCRIPT_PATH}/configure.ac ]; then
 		olddir=`pwd`
 		cd ${AUTOTOOLS_SCRIPT_PATH}
diff --git a/yocto-poky/meta/classes/autotools_stage.bbclass b/yocto-poky/meta/classes/autotools_stage.bbclass
deleted file mode 100644
index b3c41e4..0000000
--- a/yocto-poky/meta/classes/autotools_stage.bbclass
+++ /dev/null
@@ -1,2 +0,0 @@
-inherit autotools
-
diff --git a/yocto-poky/meta/classes/base.bbclass b/yocto-poky/meta/classes/base.bbclass
index 9bd5499..a7ca3a6 100644
--- a/yocto-poky/meta/classes/base.bbclass
+++ b/yocto-poky/meta/classes/base.bbclass
@@ -204,7 +204,7 @@
         bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser))
 
 addhandler base_eventhandler
-base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.runqueue.sceneQueueComplete"
+base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.runqueue.sceneQueueComplete bb.event.RecipeParsed"
 python base_eventhandler() {
     import bb.runqueue
 
@@ -230,7 +230,8 @@
                     statuslines.extend(flines)
 
         statusheader = e.data.getVar('BUILDCFG_HEADER', True)
-        bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
+        if statusheader:
+            bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines)))
 
     # This code is to silence warnings where the SDK variables overwrite the 
     # target ones and we'd see dulpicate key names overwriting each other
@@ -254,6 +255,24 @@
             bb.debug(1, "Executing SceneQueue Completion commands: %s" % "\n".join(cmds))
             bb.build.exec_func("completion_function", e.data)
             os.remove(completions)
+
+    if isinstance(e, bb.event.RecipeParsed):
+        #
+        # If we have multiple providers of virtual/X and a PREFERRED_PROVIDER_virtual/X is set
+        # skip parsing for all the other providers which will mean they get uninstalled from the
+        # sysroot since they're now "unreachable". This makes switching virtual/kernel work in 
+        # particular.
+        #
+        pn = d.getVar('PN', True)
+        source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False)
+        if not source_mirror_fetch:
+            provs = (d.getVar("PROVIDES", True) or "").split()
+            multiwhitelist = (d.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
+            for p in provs:
+                if p.startswith("virtual/") and p not in multiwhitelist:
+                    profprov = d.getVar("PREFERRED_PROVIDER_" + p, True)
+                    if profprov and pn != profprov:
+                        raise bb.parse.SkipPackage("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn))
 }
 
 CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
@@ -273,6 +292,7 @@
 		fi
 	fi
 	if [ -n "${CONFIGURESTAMPFILE}" ]; then
+		mkdir -p `dirname ${CONFIGURESTAMPFILE}`
 		echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE}
 	fi
 }
@@ -363,7 +383,10 @@
                     newappends.append(a)
                 elif a.startswith("virtual/"):
                     subs = a.split("/", 1)[1]
-                    newappends.append("virtual/" + prefix + subs + extension)
+                    if subs.startswith(prefix):
+                        newappends.append(a + extension)
+                    else:
+                        newappends.append("virtual/" + prefix + subs + extension)
                 else:
                     if a.startswith(prefix):
                         newappends.append(a + extension)
@@ -375,11 +398,11 @@
             if not appends:
                 return
             if varname.find("DEPENDS") != -1:
-                if pn.startswith("nativesdk-"):
+                if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d) :
                     appends = expandFilter(appends, "", "nativesdk-")
-                if pn.endswith("-native"):
+                elif bb.data.inherits_class('native', d):
                     appends = expandFilter(appends, "-native", "")
-                if mlprefix:
+                elif mlprefix:
                     appends = expandFilter(appends, "", mlprefix)
             varname = d.expand(varname)
             d.appendVar(varname, " " + " ".join(appends))
@@ -405,9 +428,12 @@
                     extraconf.append(items[1])
         appendVar('DEPENDS', extradeps)
         appendVar('RDEPENDS_${PN}', extrardeps)
-        if bb.data.inherits_class('cmake', d):
-            appendVar('EXTRA_OECMAKE', extraconf)
-        else:
+        appendVar('PACKAGECONFIG_CONFARGS', extraconf)
+
+        # TODO: once all recipes/classes abusing EXTRA_OECONF
+        # to get PACKAGECONFIG options are fixed to use PACKAGECONFIG_CONFARGS
+        # move this appendVar to autotools.bbclass.
+        if not bb.data.inherits_class('cmake', d):
             appendVar('EXTRA_OECONF', extraconf)
 
     pn = d.getVar('PN', True)
@@ -431,15 +457,26 @@
         d.setVarFlag('do_configure', 'umask', '022')
         d.setVarFlag('do_compile', 'umask', '022')
         d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
-        d.setVarFlag('do_install', 'fakeroot', 1)
+        d.setVarFlag('do_install', 'fakeroot', '1')
         d.setVarFlag('do_install', 'umask', '022')
         d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
-        d.setVarFlag('do_package', 'fakeroot', 1)
+        d.setVarFlag('do_package', 'fakeroot', '1')
         d.setVarFlag('do_package', 'umask', '022')
-        d.setVarFlag('do_package_setscene', 'fakeroot', 1)
+        d.setVarFlag('do_package_setscene', 'fakeroot', '1')
         d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
-        d.setVarFlag('do_devshell', 'fakeroot', 1)
+        d.setVarFlag('do_devshell', 'fakeroot', '1')
         d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot')
+
+    need_machine = d.getVar('COMPATIBLE_MACHINE', True)
+    if need_machine:
+        import re
+        compat_machines = (d.getVar('MACHINEOVERRIDES', True) or "").split(":")
+        for m in compat_machines:
+            if re.match(need_machine, m):
+                break
+        else:
+            raise bb.parse.SkipPackage("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE', True))
+
     source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', 0)
     if not source_mirror_fetch:
         need_host = d.getVar('COMPATIBLE_HOST', True)
@@ -449,17 +486,6 @@
             if not re.match(need_host, this_host):
                 raise bb.parse.SkipPackage("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host)
 
-        need_machine = d.getVar('COMPATIBLE_MACHINE', True)
-        if need_machine:
-            import re
-            compat_machines = (d.getVar('MACHINEOVERRIDES', True) or "").split(":")
-            for m in compat_machines:
-                if re.match(need_machine, m):
-                    break
-            else:
-                raise bb.parse.SkipPackage("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE', True))
-
-
         bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE', True) or "").split()
 
         check_license = False if pn.startswith("nativesdk-") else True
@@ -476,31 +502,23 @@
 
             whitelist = []
             incompatwl = []
-            htincompatwl = []
             for lic in bad_licenses:
                 spdx_license = return_spdx(d, lic)
-                for w in ["HOSTTOOLS_WHITELIST_", "LGPLv2_WHITELIST_", "WHITELIST_"]:
+                for w in ["LGPLv2_WHITELIST_", "WHITELIST_"]:
                     whitelist.extend((d.getVar(w + lic, True) or "").split())
                     if spdx_license:
                         whitelist.extend((d.getVar(w + spdx_license, True) or "").split())
                     '''
-                    We need to track what we are whitelisting and why. If pn is 
-                    incompatible and is not HOSTTOOLS_WHITELIST_ we need to be 
-                    able to note that the image that is created may infact 
-                    contain incompatible licenses despite INCOMPATIBLE_LICENSE 
-                    being set.
+                    We need to track what we are whitelisting and why. If pn is
+                    incompatible we need to be able to note that the image that
+                    is created may infact contain incompatible licenses despite
+                    INCOMPATIBLE_LICENSE being set.
                     '''
-                    if "HOSTTOOLS" in w:
-                        htincompatwl.extend((d.getVar(w + lic, True) or "").split())
-                        if spdx_license:
-                            htincompatwl.extend((d.getVar(w + spdx_license, True) or "").split())
-                    else:
-                        incompatwl.extend((d.getVar(w + lic, True) or "").split())
-                        if spdx_license:
-                            incompatwl.extend((d.getVar(w + spdx_license, True) or "").split())
+                    incompatwl.extend((d.getVar(w + lic, True) or "").split())
+                    if spdx_license:
+                        incompatwl.extend((d.getVar(w + spdx_license, True) or "").split())
 
             if not pn in whitelist:
-                recipe_license = d.getVar('LICENSE', True)
                 pkgs = d.getVar('PACKAGES', True).split()
                 skipped_pkgs = []
                 unskipped_pkgs = []
@@ -512,54 +530,71 @@
                 all_skipped = skipped_pkgs and not unskipped_pkgs
                 if unskipped_pkgs:
                     for pkg in skipped_pkgs:
-                        bb.debug(1, "SKIPPING the package " + pkg + " at do_rootfs because it's " + recipe_license)
+                        bb.debug(1, "SKIPPING the package " + pkg + " at do_rootfs because it's " + license)
                         mlprefix = d.getVar('MLPREFIX', True)
                         d.setVar('LICENSE_EXCLUSION-' + mlprefix + pkg, 1)
                     for pkg in unskipped_pkgs:
                         bb.debug(1, "INCLUDING the package " + pkg)
                 elif all_skipped or incompatible_license(d, bad_licenses):
-                    bb.debug(1, "SKIPPING recipe %s because it's %s" % (pn, recipe_license))
-                    raise bb.parse.SkipPackage("incompatible with license %s" % recipe_license)
+                    bb.debug(1, "SKIPPING recipe %s because it's %s" % (pn, license))
+                    raise bb.parse.SkipPackage("incompatible with license %s" % license)
             elif pn in whitelist:
                 if pn in incompatwl:
                     bb.note("INCLUDING " + pn + " as buildable despite INCOMPATIBLE_LICENSE because it has been whitelisted")
-                elif pn in htincompatwl:
-                    bb.note("INCLUDING " + pn + " as buildable despite INCOMPATIBLE_LICENSE because it has been whitelisted for HOSTTOOLS")
 
+    needsrcrev = False
     srcuri = d.getVar('SRC_URI', True)
-    # Svn packages should DEPEND on subversion-native
-    if "svn://" in srcuri:
-        d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot')
+    for uri in srcuri.split():
+        (scheme, _ , path) = bb.fetch.decodeurl(uri)[:3]
 
-    # Git packages should DEPEND on git-native
-    if "git://" in srcuri:
-        d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot')
+        # HTTP/FTP use the wget fetcher
+        if scheme in ("http", "https", "ftp"):
+            d.appendVarFlag('do_fetch', 'depends', ' wget-native:do_populate_sysroot')
 
-    # Mercurial packages should DEPEND on mercurial-native
-    elif "hg://" in srcuri:
-        d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot')
+        # Svn packages should DEPEND on subversion-native
+        if scheme == "svn":
+            needsrcrev = True
+            d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot')
 
-    # OSC packages should DEPEND on osc-native
-    elif "osc://" in srcuri:
-        d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
+        # Git packages should DEPEND on git-native
+        elif scheme in ("git", "gitsm"):
+            needsrcrev = True
+            d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot')
 
-    # *.lz4 should depends on lz4-native for unpacking
-    # Not endswith because of "*.patch.lz4;patch=1". Need bb.fetch.decodeurl in future
-    if '.lz4' in srcuri:
-        d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot')
+        # Mercurial packages should DEPEND on mercurial-native
+        elif scheme == "hg":
+            needsrcrev = True
+            d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot')
 
-    # *.xz should depends on xz-native for unpacking
-    # Not endswith because of "*.patch.xz;patch=1". Need bb.fetch.decodeurl in future
-    if '.xz' in srcuri:
-        d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
+        # OSC packages should DEPEND on osc-native
+        elif scheme == "osc":
+            d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot')
 
-    # unzip-native should already be staged before unpacking ZIP recipes
-    if ".zip" in srcuri:
-        d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
+        elif scheme == "npm":
+            d.appendVarFlag('do_fetch', 'depends', ' nodejs-native:do_populate_sysroot')
 
-    # file is needed by rpm2cpio.sh
-    if ".src.rpm" in srcuri:
-        d.appendVarFlag('do_unpack', 'depends', ' file-native:do_populate_sysroot')
+        # *.lz4 should DEPEND on lz4-native for unpacking
+        if path.endswith('.lz4'):
+            d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot')
+
+        # *.lz should DEPEND on lzip-native for unpacking
+        elif path.endswith('.lz'):
+            d.appendVarFlag('do_unpack', 'depends', ' lzip-native:do_populate_sysroot')
+
+        # *.xz should DEPEND on xz-native for unpacking
+        elif path.endswith('.xz'):
+            d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot')
+
+        # .zip should DEPEND on unzip-native for unpacking
+        elif path.endswith('.zip'):
+            d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot')
+
+        # file is needed by rpm2cpio.sh
+        elif path.endswith('.src.rpm'):
+            d.appendVarFlag('do_unpack', 'depends', ' file-native:do_populate_sysroot')
+
+    if needsrcrev:
+        d.setVar("SRCPV", "${@bb.fetch2.get_srcrev(d)}")
 
     set_packagetriplet(d)
 
@@ -612,8 +647,9 @@
 python do_cleansstate() {
         sstate_clean_cachefiles(d)
 }
-
 addtask cleanall after do_cleansstate
+do_cleansstate[nostamp] = "1"
+
 python do_cleanall() {
     src_uri = (d.getVar('SRC_URI', True) or "").split()
     if len(src_uri) == 0:
diff --git a/yocto-poky/meta/classes/bash-completion.bbclass b/yocto-poky/meta/classes/bash-completion.bbclass
new file mode 100644
index 0000000..74a878e
--- /dev/null
+++ b/yocto-poky/meta/classes/bash-completion.bbclass
@@ -0,0 +1,5 @@
+PACKAGES += "${PN}-bash-completion"
+
+FILES_${PN}-bash-completion = "${datadir}/bash-completion ${sysconfdir}/bash_completion.d"
+
+RDEPENDS_${PN}-bash-completion = "bash-completion"
diff --git a/yocto-poky/meta/classes/boot-directdisk.bbclass b/yocto-poky/meta/classes/boot-directdisk.bbclass
deleted file mode 100644
index 600e21a..0000000
--- a/yocto-poky/meta/classes/boot-directdisk.bbclass
+++ /dev/null
@@ -1,194 +0,0 @@
-# boot-directdisk.bbclass
-# (loosly based off bootimg.bbclass Copyright (C) 2004, Advanced Micro Devices, Inc.)
-#
-# Create an image which can be placed directly onto a harddisk using dd and then
-# booted.
-#
-# This uses syslinux. extlinux would have been nice but required the ext2/3 
-# partition to be mounted. grub requires to run itself as part of the install 
-# process.
-#
-# The end result is a 512 boot sector populated with an MBR and partition table
-# followed by an msdos fat16 partition containing syslinux and a linux kernel
-# completed by the ext2/3 rootfs.
-#
-# We have to push the msdos parition table size > 16MB so fat 16 is used as parted
-# won't touch fat12 partitions.
-
-# External variables needed
-
-# ${ROOTFS} - the rootfs image to incorporate
-
-do_bootdirectdisk[depends] += "dosfstools-native:do_populate_sysroot \
-                               virtual/kernel:do_deploy \
-                               syslinux:do_populate_sysroot \
-                               syslinux-native:do_populate_sysroot \
-                               parted-native:do_populate_sysroot \
-                               mtools-native:do_populate_sysroot "
-
-PACKAGES = " "
-EXCLUDE_FROM_WORLD = "1"
-
-BOOTDD_VOLUME_ID   ?= "boot"
-BOOTDD_EXTRA_SPACE ?= "16384"
-
-EFI = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
-EFI_PROVIDER ?= "grub-efi"
-EFI_CLASS = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}"
-
-# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
-# contain "efi". This way legacy is supported by default if neither is
-# specified, maintaining the original behavior.
-def pcbios(d):
-    pcbios = bb.utils.contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
-    if pcbios == "0":
-        pcbios = bb.utils.contains("MACHINE_FEATURES", "efi", "0", "1", d)
-    return pcbios
-
-def pcbios_class(d):
-    if d.getVar("PCBIOS", True) == "1":
-        return "syslinux"
-    return ""
-
-PCBIOS = "${@pcbios(d)}"
-PCBIOS_CLASS = "${@pcbios_class(d)}"
-
-inherit ${PCBIOS_CLASS}
-inherit ${EFI_CLASS}
-
-# Get the build_syslinux_cfg() function from the syslinux class
-
-AUTO_SYSLINUXCFG = "1"
-DISK_SIGNATURE ?= "${DISK_SIGNATURE_GENERATED}"
-SYSLINUX_ROOT ?= "root=/dev/sda2"
-SYSLINUX_TIMEOUT ?= "10"
-
-IS_VM = '${@bb.utils.contains_any("IMAGE_FSTYPES", ["vmdk", "vdi", "qcow2"], "true", "false", d)}'
-
-boot_direct_populate() {
-	dest=$1
-	install -d $dest
-
-	# Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
-	if [ -e ${DEPLOY_DIR_IMAGE}/bzImage ]; then
-		install -m 0644 ${DEPLOY_DIR_IMAGE}/bzImage $dest/vmlinuz
-	fi
-
-	# initrd is made of concatenation of multiple filesystem images
-	if [ -n "${INITRD}" ]; then
-		rm -f $dest/initrd
-		for fs in ${INITRD}
-		do
-			if [ -s "${fs}" ]; then
-				cat ${fs} >> $dest/initrd
-			else
-				bbfatal "${fs} is invalid. initrd image creation failed."
-			fi
-		done
-		chmod 0644 $dest/initrd
-	fi
-}
-
-build_boot_dd() {
-	HDDDIR="${S}/hdd/boot"
-	HDDIMG="${S}/hdd.image"
-	IMAGE=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect
-
-	boot_direct_populate $HDDDIR
-
-	if [ "${PCBIOS}" = "1" ]; then
-		syslinux_hddimg_populate $HDDDIR
-	fi
-	if [ "${EFI}" = "1" ]; then
-		efi_hddimg_populate $HDDDIR
-	fi
-
-	if [ "${IS_VM}" = "true" ]; then
-		if [ "x${AUTO_SYSLINUXMENU}" = "x1" ] ; then
-			install -m 0644 ${STAGING_DIR}/${MACHINE}/usr/share/syslinux/vesamenu.c32 $HDDDIR/${SYSLINUXDIR}/
-			if [ "x${SYSLINUX_SPLASH}" != "x" ] ; then
-				install -m 0644 ${SYSLINUX_SPLASH} $HDDDIR/${SYSLINUXDIR}/splash.lss
-			fi
-		fi
-	fi
-
-	BLOCKS=`du -bks $HDDDIR | cut -f 1`
-	BLOCKS=`expr $BLOCKS + ${BOOTDD_EXTRA_SPACE}`
-
-	# Ensure total sectors is an integral number of sectors per
-	# track or mcopy will complain. Sectors are 512 bytes, and we
-	# generate images with 32 sectors per track. This calculation is
-	# done in blocks, thus the mod by 16 instead of 32.
-	BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
-
-	mkdosfs -n ${BOOTDD_VOLUME_ID} -S 512 -C $HDDIMG $BLOCKS 
-	mcopy -i $HDDIMG -s $HDDDIR/* ::/
-
-	if [ "${PCBIOS}" = "1" ]; then
-		syslinux_hdddirect_install $HDDIMG
-	fi	
-	chmod 644 $HDDIMG
-
-	ROOTFSBLOCKS=`du -Lbks ${ROOTFS} | cut -f 1`
-	TOTALSIZE=`expr $BLOCKS + $ROOTFSBLOCKS`
-	END1=`expr $BLOCKS \* 1024`
-	END2=`expr $END1 + 512`
-	END3=`expr \( $ROOTFSBLOCKS \* 1024 \) + $END1`
-
-	echo $ROOTFSBLOCKS $TOTALSIZE $END1 $END2 $END3
-	rm -rf $IMAGE
-	dd if=/dev/zero of=$IMAGE bs=1024 seek=$TOTALSIZE count=1
-
-	parted $IMAGE mklabel msdos
-	parted $IMAGE mkpart primary fat16 0 ${END1}B
-	parted $IMAGE unit B mkpart primary ext2 ${END2}B ${END3}B
-	parted $IMAGE set 1 boot on 
-
-	parted $IMAGE print
-
-	awk "BEGIN { printf \"$(echo ${DISK_SIGNATURE} | fold -w 2 | tac | paste -sd '' | sed 's/\(..\)/\\x&/g')\" }" | \
-		dd of=$IMAGE bs=1 seek=440 conv=notrunc
-
-	OFFSET=`expr $END2 / 512`
-	if [ "${PCBIOS}" = "1" ]; then
-		dd if=${STAGING_DATADIR}/syslinux/mbr.bin of=$IMAGE conv=notrunc
-	fi
-
-	dd if=$HDDIMG of=$IMAGE conv=notrunc seek=1 bs=512
-	dd if=${ROOTFS} of=$IMAGE conv=notrunc seek=$OFFSET bs=512
-
-	cd ${DEPLOY_DIR_IMAGE}
-	rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
-	ln -s ${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
-} 
-
-python do_bootdirectdisk() {
-    validate_disk_signature(d)
-    if d.getVar("PCBIOS", True) == "1":
-        bb.build.exec_func('build_syslinux_cfg', d)
-    if d.getVar("EFI", True) == "1":
-        bb.build.exec_func('build_efi_cfg', d)
-    bb.build.exec_func('build_boot_dd', d)
-}
-
-def generate_disk_signature():
-    import uuid
-
-    signature = str(uuid.uuid4())[:8]
-
-    if signature != '00000000':
-        return signature
-    else:
-        return 'ffffffff'
-
-def validate_disk_signature(d):
-    import re
-
-    disk_signature = d.getVar("DISK_SIGNATURE", True)
-
-    if not re.match(r'^[0-9a-fA-F]{8}$', disk_signature):
-        bb.fatal("DISK_SIGNATURE '%s' must be an 8 digit hex string" % disk_signature)
-
-DISK_SIGNATURE_GENERATED := "${@generate_disk_signature()}"
-
-addtask bootdirectdisk before do_build
diff --git a/yocto-poky/meta/classes/bootimg.bbclass b/yocto-poky/meta/classes/bootimg.bbclass
deleted file mode 100644
index ec9d0b7..0000000
--- a/yocto-poky/meta/classes/bootimg.bbclass
+++ /dev/null
@@ -1,303 +0,0 @@
-# Copyright (C) 2004, Advanced Micro Devices, Inc.  All Rights Reserved
-# Released under the MIT license (see packages/COPYING)
-
-# Creates a bootable image using syslinux, your kernel and an optional
-# initrd
-
-#
-# End result is two things:
-#
-# 1. A .hddimg file which is an msdos filesystem containing syslinux, a kernel,
-# an initrd and a rootfs image. These can be written to harddisks directly and
-# also booted on USB flash disks (write them there with dd).
-#
-# 2. A CD .iso image
-
-# Boot process is that the initrd will boot and process which label was selected
-# in syslinux. Actions based on the label are then performed (e.g. installing to
-# an hdd)
-
-# External variables (also used by syslinux.bbclass)
-# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
-# ${COMPRESSISO} - Transparent compress ISO, reduce size ~40% if set to 1
-# ${NOISO}  - skip building the ISO image if set to 1
-# ${NOHDD}  - skip building the HDD image if set to 1
-# ${HDDIMG_ID} - FAT image volume-id
-# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
-
-do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
-                        mtools-native:do_populate_sysroot \
-                        cdrtools-native:do_populate_sysroot \
-                        virtual/kernel:do_deploy \
-                        ${@oe.utils.ifelse(d.getVar('COMPRESSISO', False),'zisofs-tools-native:do_populate_sysroot','')}"
-
-PACKAGES = " "
-EXCLUDE_FROM_WORLD = "1"
-
-HDDDIR = "${S}/hddimg"
-ISODIR = "${S}/iso"
-EFIIMGDIR = "${S}/efi_img"
-COMPACT_ISODIR = "${S}/iso.z"
-COMPRESSISO ?= "0"
-
-BOOTIMG_VOLUME_ID   ?= "boot"
-BOOTIMG_EXTRA_SPACE ?= "512"
-
-EFI = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
-EFI_PROVIDER ?= "grub-efi"
-EFI_CLASS = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}"
-
-KERNEL_IMAGETYPE ??= "bzImage"
-
-# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
-# contain "efi". This way legacy is supported by default if neither is
-# specified, maintaining the original behavior.
-def pcbios(d):
-    pcbios = bb.utils.contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
-    if pcbios == "0":
-        pcbios = bb.utils.contains("MACHINE_FEATURES", "efi", "0", "1", d)
-    return pcbios
-
-PCBIOS = "${@pcbios(d)}"
-
-# The syslinux is required for the isohybrid command and boot catalog
-inherit syslinux
-inherit ${EFI_CLASS}
-
-populate() {
-	DEST=$1
-	install -d ${DEST}
-
-	# Install kernel, initrd, and rootfs.img in DEST for all loaders to use.
-	install -m 0644 ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} ${DEST}/vmlinuz
-	
-	# initrd is made of concatenation of multiple filesystem images
-	if [ -n "${INITRD}" ]; then
-		rm -f ${DEST}/initrd
-		for fs in ${INITRD}
-		do
-			if [ -s "${fs}" ]; then
-				cat ${fs} >> ${DEST}/initrd
-			else
-				bbfatal "${fs} is invalid. initrd image creation failed."
-			fi
-		done
-		chmod 0644 ${DEST}/initrd
-	fi
-
-	if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then
-		install -m 0644 ${ROOTFS} ${DEST}/rootfs.img
-	fi
-
-}
-
-build_iso() {
-	# Only create an ISO if we have an INITRD and NOISO was not set
-	if [ -z "${INITRD}" ] || [ "${NOISO}" = "1" ]; then
-		bbnote "ISO image will not be created."
-		return
-	fi
-	# ${INITRD} is a list of multiple filesystem images
-	for fs in ${INITRD}
-	do
-		if [ ! -s "${fs}" ]; then
-			bbnote "ISO image will not be created. ${fs} is invalid."
-			return
-		fi
-	done
-
-
-	populate ${ISODIR}
-
-	if [ "${PCBIOS}" = "1" ]; then
-		syslinux_iso_populate ${ISODIR}
-	fi
-	if [ "${EFI}" = "1" ]; then
-		efi_iso_populate ${ISODIR}
-		build_fat_img ${EFIIMGDIR} ${ISODIR}/efi.img
-	fi
-
-	# EFI only
-	if [ "${PCBIOS}" != "1" ] && [ "${EFI}" = "1" ] ; then
-		# Work around bug in isohybrid where it requires isolinux.bin
-		# In the boot catalog, even though it is not used
-		mkdir -p ${ISODIR}/${ISOLINUXDIR}
-		install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin ${ISODIR}${ISOLINUXDIR}
-	fi
-
-	if [ "${COMPRESSISO}" = "1" ] ; then
-		# create compact directory, compress iso
-		mkdir -p ${COMPACT_ISODIR}
-		mkzftree -z 9 -p 4 -F ${ISODIR}/rootfs.img ${COMPACT_ISODIR}/rootfs.img
-
-		# move compact iso to iso, then remove compact directory
-		mv ${COMPACT_ISODIR}/rootfs.img ${ISODIR}/rootfs.img
-		rm -Rf ${COMPACT_ISODIR}
-		mkisofs_compress_opts="-R -z -D -l"
-	else
-		mkisofs_compress_opts="-r"
-	fi
-
-	# Check the size of ${ISODIR}/rootfs.img, use mkisofs -iso-level 3
-	# when it exceeds 3.8GB, the specification is 4G - 1 bytes, we need
-	# leave a few space for other files.
-	mkisofs_iso_level=""
-
-        if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then
-		rootfs_img_size=`stat -c '%s' ${ISODIR}/rootfs.img`
-		# 4080218931 = 3.8 * 1024 * 1024 * 1024
-		if [ $rootfs_img_size -gt 4080218931 ]; then
-			bbnote "${ISODIR}/rootfs.img execeeds 3.8GB, using '-iso-level 3' for mkisofs"
-			mkisofs_iso_level="-iso-level 3"
-		fi
-	fi
-
-	if [ "${PCBIOS}" = "1" ] && [ "${EFI}" != "1" ] ; then
-		# PCBIOS only media
-		mkisofs -V ${BOOTIMG_VOLUME_ID} \
-		        -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
-			-b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
-			$mkisofs_compress_opts \
-			${MKISOFS_OPTIONS} $mkisofs_iso_level ${ISODIR}
-	else
-		# EFI only OR EFI+PCBIOS
-		mkisofs -A ${BOOTIMG_VOLUME_ID} -V ${BOOTIMG_VOLUME_ID} \
-		        -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
-			-b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
-			$mkisofs_compress_opts ${MKISOFS_OPTIONS} $mkisofs_iso_level \
-			-eltorito-alt-boot -eltorito-platform efi \
-			-b efi.img -no-emul-boot \
-			${ISODIR}
-		isohybrid_args="-u"
-	fi
-
-	isohybrid $isohybrid_args ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso
-
-	cd ${DEPLOY_DIR_IMAGE}
-	rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso
-	ln -s ${IMAGE_NAME}.iso ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso
-}
-
-build_fat_img() {
-	FATSOURCEDIR=$1
-	FATIMG=$2
-
-	# Calculate the size required for the final image including the
-	# data and filesystem overhead.
-	# Sectors: 512 bytes
-	#  Blocks: 1024 bytes
-
-	# Determine the sector count just for the data
-	SECTORS=$(expr $(du --apparent-size -ks ${FATSOURCEDIR} | cut -f 1) \* 2)
-
-	# Account for the filesystem overhead. This includes directory
-	# entries in the clusters as well as the FAT itself.
-	# Assumptions:
-	#   FAT32 (12 or 16 may be selected by mkdosfs, but the extra
-	#   padding will be minimal on those smaller images and not
-	#   worth the logic here to caclulate the smaller FAT sizes)
-	#   < 16 entries per directory
-	#   8.3 filenames only
-
-	# 32 bytes per dir entry
-	DIR_BYTES=$(expr $(find ${FATSOURCEDIR} | tail -n +2 | wc -l) \* 32)
-	# 32 bytes for every end-of-directory dir entry
-	DIR_BYTES=$(expr $DIR_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 32))
-	# 4 bytes per FAT entry per sector of data
-	FAT_BYTES=$(expr $SECTORS \* 4)
-	# 4 bytes per FAT entry per end-of-cluster list
-	FAT_BYTES=$(expr $FAT_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 4))
-
-	# Use a ceiling function to determine FS overhead in sectors
-	DIR_SECTORS=$(expr $(expr $DIR_BYTES + 511) / 512)
-	# There are two FATs on the image
-	FAT_SECTORS=$(expr $(expr $(expr $FAT_BYTES + 511) / 512) \* 2)
-	SECTORS=$(expr $SECTORS + $(expr $DIR_SECTORS + $FAT_SECTORS))
-
-	# Determine the final size in blocks accounting for some padding
-	BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE})
-
-	# Ensure total sectors is an integral number of sectors per
-	# track or mcopy will complain. Sectors are 512 bytes, and we
-	# generate images with 32 sectors per track. This calculation is
-	# done in blocks, thus the mod by 16 instead of 32.
-	BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
-
-	# mkdosfs will sometimes use FAT16 when it is not appropriate,
-	# resulting in a boot failure from SYSLINUX. Use FAT32 for
-	# images larger than 512MB, otherwise let mkdosfs decide.
-	if [ $(expr $BLOCKS / 1024) -gt 512 ]; then
-		FATSIZE="-F 32"
-	fi
-
-	# mkdosfs will fail if ${FATIMG} exists. Since we are creating an
-	# new image, it is safe to delete any previous image.
-	if [ -e ${FATIMG} ]; then
-		rm ${FATIMG}
-	fi
-
-	if [ -z "${HDDIMG_ID}" ]; then
-		mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} \
-			${BLOCKS}
-	else
-		mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} \
-		${BLOCKS} -i ${HDDIMG_ID}
-	fi
-
-	# Copy FATSOURCEDIR recursively into the image file directly
-	mcopy -i ${FATIMG} -s ${FATSOURCEDIR}/* ::/
-}
-
-build_hddimg() {
-	# Create an HDD image
-	if [ "${NOHDD}" != "1" ] ; then
-		populate ${HDDDIR}
-
-		if [ "${PCBIOS}" = "1" ]; then
-			syslinux_hddimg_populate ${HDDDIR}
-		fi
-		if [ "${EFI}" = "1" ]; then
-			efi_hddimg_populate ${HDDDIR}
-		fi
-
-		# Check the size of ${HDDDIR}/rootfs.img, error out if it
-		# exceeds 4GB, it is the single file's max size of FAT fs.
-		if [ -f ${HDDDIR}/rootfs.img ]; then
-			rootfs_img_size=`stat -c '%s' ${HDDDIR}/rootfs.img`
-			max_size=`expr 4 \* 1024 \* 1024 \* 1024`
-			if [ $rootfs_img_size -gt $max_size ]; then
-				bberror "${HDDDIR}/rootfs.img execeeds 4GB,"
-				bberror "this doesn't work on FAT filesystem, you can try either of:"
-				bberror "1) Reduce the size of rootfs.img"
-				bbfatal "2) Use iso, vmdk or vdi to instead of hddimg\n"
-			fi
-		fi
-
-		build_fat_img ${HDDDIR} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
-
-		if [ "${PCBIOS}" = "1" ]; then
-			syslinux_hddimg_install
-		fi
-
-		chmod 644 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
-
-		cd ${DEPLOY_DIR_IMAGE}
-		rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hddimg
-		ln -s ${IMAGE_NAME}.hddimg ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hddimg
-	fi
-}
-
-python do_bootimg() {
-    if d.getVar("PCBIOS", True) == "1":
-        bb.build.exec_func('build_syslinux_cfg', d)
-    if d.getVar("EFI", True) == "1":
-        bb.build.exec_func('build_efi_cfg', d)
-    bb.build.exec_func('build_hddimg', d)
-    bb.build.exec_func('build_iso', d)
-}
-
-IMAGE_TYPEDEP_iso = "ext4"
-IMAGE_TYPEDEP_hddimg = "ext4"
-IMAGE_TYPES_MASKED += "iso hddimg"
-
-addtask bootimg before do_build
diff --git a/yocto-poky/meta/classes/buildhistory.bbclass b/yocto-poky/meta/classes/buildhistory.bbclass
index 5e2581f..581d532 100644
--- a/yocto-poky/meta/classes/buildhistory.bbclass
+++ b/yocto-poky/meta/classes/buildhistory.bbclass
@@ -3,7 +3,7 @@
 #
 # Based in part on testlab.bbclass and packagehistory.bbclass
 #
-# Copyright (C) 2011-2014 Intel Corporation
+# Copyright (C) 2011-2016 Intel Corporation
 # Copyright (C) 2007-2011 Koen Kooi <koen@openembedded.org>
 #
 
@@ -11,20 +11,54 @@
 BUILDHISTORY_DIR ?= "${TOPDIR}/buildhistory"
 BUILDHISTORY_DIR_IMAGE = "${BUILDHISTORY_DIR}/images/${MACHINE_ARCH}/${TCLIBC}/${IMAGE_BASENAME}"
 BUILDHISTORY_DIR_PACKAGE = "${BUILDHISTORY_DIR}/packages/${MULTIMACH_TARGET_SYS}/${PN}"
-BUILDHISTORY_DIR_SDK = "${BUILDHISTORY_DIR}/sdk/${SDK_NAME}/${IMAGE_BASENAME}"
+
+# Setting this to non-empty will remove the old content of the buildhistory as part of
+# the current bitbake invocation and replace it with information about what was built
+# during the build.
+#
+# This is meant to be used in continuous integration (CI) systems when invoking bitbake
+# for full world builds. The effect in that case is that information about packages
+# that no longer get build also gets removed from the buildhistory, which is not
+# the case otherwise.
+#
+# The advantage over manually cleaning the buildhistory outside of bitbake is that
+# the "version-going-backwards" check still works. When relying on that, be careful
+# about failed world builds: they will lead to incomplete information in the
+# buildhistory because information about packages that could not be built will
+# also get removed. A CI system should handle that by discarding the buildhistory
+# of failed builds.
+#
+# The expected usage is via auto.conf, but passing via the command line also works
+# with: BB_ENV_EXTRAWHITE=BUILDHISTORY_RESET BUILDHISTORY_RESET=1
+BUILDHISTORY_RESET ?= ""
+
+BUILDHISTORY_OLD_DIR = "${BUILDHISTORY_DIR}/${@ "old" if "${BUILDHISTORY_RESET}" else ""}"
+BUILDHISTORY_OLD_DIR_PACKAGE = "${BUILDHISTORY_OLD_DIR}/packages/${MULTIMACH_TARGET_SYS}/${PN}"
+BUILDHISTORY_DIR_SDK = "${BUILDHISTORY_DIR}/sdk/${SDK_NAME}${SDK_EXT}/${IMAGE_BASENAME}"
 BUILDHISTORY_IMAGE_FILES ?= "/etc/passwd /etc/group"
+BUILDHISTORY_SDK_FILES ?= "conf/local.conf conf/bblayers.conf conf/auto.conf conf/locked-sigs.inc conf/devtool.conf"
 BUILDHISTORY_COMMIT ?= "0"
 BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory <buildhistory@${DISTRO}>"
 BUILDHISTORY_PUSH_REPO ?= ""
 
 SSTATEPOSTINSTFUNCS_append = " buildhistory_emit_pkghistory"
-# We want to avoid influence the signatures of sstate tasks - first the function itself:
+# We want to avoid influencing the signatures of sstate tasks - first the function itself:
 sstate_install[vardepsexclude] += "buildhistory_emit_pkghistory"
 # then the value added to SSTATEPOSTINSTFUNCS:
 SSTATEPOSTINSTFUNCS[vardepvalueexclude] .= "| buildhistory_emit_pkghistory"
 
+# All items excepts those listed here will be removed from a recipe's
+# build history directory by buildhistory_emit_pkghistory(). This is
+# necessary because some of these items (package directories, files that
+# we no longer emit) might be obsolete.
 #
-# Write out metadata about this package for comparision when writing future packages
+# When extending build history, derive your class from buildhistory.bbclass
+# and extend this list here with the additional files created by the derived
+# class.
+BUILDHISTORY_PRESERVE = "latest latest_srcrev"
+
+#
+# Write out metadata about this package for comparison when writing future packages
 #
 python buildhistory_emit_pkghistory() {
     if not d.getVar('BB_CURRENTTASK', True) in ['packagedata', 'packagedata_setscene']:
@@ -38,6 +72,7 @@
     import errno
 
     pkghistdir = d.getVar('BUILDHISTORY_DIR_PACKAGE', True)
+    oldpkghistdir = d.getVar('BUILDHISTORY_OLD_DIR_PACKAGE', True)
 
     class RecipeInfo:
         def __init__(self, name):
@@ -80,7 +115,7 @@
         pkginfo = PackageInfo(pkg)
         with open(histfile, "r") as f:
             for line in f:
-                lns = line.split('=')
+                lns = line.split('=', 1)
                 name = lns[0].strip()
                 value = lns[1].strip(" \t\r\n").strip('"')
                 if name == "PE":
@@ -128,13 +163,13 @@
 
     def getlastpkgversion(pkg):
         try:
-            histfile = os.path.join(pkghistdir, pkg, "latest")
+            histfile = os.path.join(oldpkghistdir, pkg, "latest")
             return readPackageInfo(pkg, histfile)
         except EnvironmentError:
             return None
 
     def sortpkglist(string):
-        pkgiter = re.finditer(r'[a-zA-Z0-9.+-]+( \([><=]+ [^ )]+\))?', string, 0)
+        pkgiter = re.finditer(r'[a-zA-Z0-9.+-]+( \([><=]+[^)]+\))?', string, 0)
         pkglist = [p.group(0) for p in pkgiter]
         pkglist.sort()
         return ' '.join(pkglist)
@@ -165,12 +200,13 @@
             raise
 
     packagelist = packages.split()
+    preserve = d.getVar('BUILDHISTORY_PRESERVE', True).split()
     if not os.path.exists(pkghistdir):
         bb.utils.mkdirhier(pkghistdir)
     else:
         # Remove files for packages that no longer exist
         for item in os.listdir(pkghistdir):
-            if item != "latest" and item != "latest_srcrev":
+            if item not in preserve:
                 if item not in packagelist:
                     itempath = os.path.join(pkghistdir, item)
                     if os.path.isdir(itempath):
@@ -325,18 +361,21 @@
 def buildhistory_list_installed(d, rootfs_type="image"):
     from oe.rootfs import image_list_installed_packages
     from oe.sdk import sdk_list_installed_packages
+    from oe.utils import format_pkg_list
 
     process_list = [('file', 'bh_installed_pkgs.txt'),\
                     ('deps', 'bh_installed_pkgs_deps.txt')]
 
+    if rootfs_type == "image":
+        pkgs = image_list_installed_packages(d)
+    else:
+        pkgs = sdk_list_installed_packages(d, rootfs_type == "sdk_target")
+
     for output_type, output_file in process_list:
         output_file_full = os.path.join(d.getVar('WORKDIR', True), output_file)
 
         with open(output_file_full, 'w') as output:
-            if rootfs_type == "image":
-                output.write(image_list_installed_packages(d, output_type))
-            else:
-                output.write(sdk_list_installed_packages(d, rootfs_type == "sdk_target", output_type))
+            output.write(format_pkg_list(pkgs, output_type))
 
 python buildhistory_list_installed_image() {
     buildhistory_list_installed(d)
@@ -377,15 +416,8 @@
 	rm $1/depends.tmp
 
 	# Produce installed package sizes list
-	printf "" > $1/installed-package-sizes.tmp
-	cat $pkgcache | while read pkg pkgfile pkgarch
-	do
-		size=`oe-pkgdata-util -p ${PKGDATA_DIR} read-value "PKGSIZE" ${pkg}_${pkgarch}`
-		if [ "$size" != "" ] ; then
-			echo "$size $pkg" >> $1/installed-package-sizes.tmp
-		fi
-	done
-	cat $1/installed-package-sizes.tmp | sort -n -r | awk '{print $1 "\tKiB " $2}' > $1/installed-package-sizes.txt
+	oe-pkgdata-util -p ${PKGDATA_DIR} read-value "PKGSIZE" -n -f $pkgcache > $1/installed-package-sizes.tmp
+	cat $1/installed-package-sizes.tmp | awk '{print $2 "\tKiB " $1}' | sort -n -r > $1/installed-package-sizes.txt
 	rm $1/installed-package-sizes.tmp
 
 	# We're now done with the cache, delete it
@@ -393,7 +425,7 @@
 
 	if [ "$2" != "sdk" ] ; then
 		# Produce some cut-down graphs (for readability)
-		grep -v kernel_image $1/depends.dot | grep -v kernel-2 | grep -v kernel-3 > $1/depends-nokernel.dot
+		grep -v kernel-image $1/depends.dot | grep -v kernel-3 | grep -v kernel-4 > $1/depends-nokernel.dot
 		grep -v libc6 $1/depends-nokernel.dot | grep -v libgcc > $1/depends-nokernel-nolibc.dot
 		grep -v update- $1/depends-nokernel-nolibc.dot > $1/depends-nokernel-nolibc-noupdate.dot
 		grep -v kernel-module $1/depends-nokernel-nolibc-noupdate.dot > $1/depends-nokernel-nolibc-noupdate-nomodules.dot
@@ -465,6 +497,7 @@
 		return
 	fi
 
+        mkdir -p ${BUILDHISTORY_DIR_IMAGE}
 	buildhistory_list_files ${IMAGE_ROOTFS} ${BUILDHISTORY_DIR_IMAGE}/files-in-image.txt
 
 	# Collect files requested in BUILDHISTORY_IMAGE_FILES
@@ -499,6 +532,15 @@
 
 	buildhistory_list_files ${SDK_OUTPUT} ${BUILDHISTORY_DIR_SDK}/files-in-sdk.txt
 
+	# Collect files requested in BUILDHISTORY_SDK_FILES
+	rm -rf ${BUILDHISTORY_DIR_SDK}/sdk-files
+	for f in ${BUILDHISTORY_SDK_FILES}; do
+		if [ -f ${SDK_OUTPUT}/${SDKPATH}/$f ] ; then
+			mkdir -p ${BUILDHISTORY_DIR_SDK}/sdk-files/`dirname $f`
+			cp ${SDK_OUTPUT}/${SDKPATH}/$f ${BUILDHISTORY_DIR_SDK}/sdk-files/$f
+		fi
+	done
+
 	# Record some machine-readable meta-information about the SDK
 	printf ""  > ${BUILDHISTORY_DIR_SDK}/sdk-info.txt
 	cat >> ${BUILDHISTORY_DIR_SDK}/sdk-info.txt <<END
@@ -508,6 +550,30 @@
 	echo "SDKSIZE = $sdksize" >> ${BUILDHISTORY_DIR_SDK}/sdk-info.txt
 }
 
+python buildhistory_get_extra_sdkinfo() {
+    import operator
+    import math
+    if d.getVar('BB_CURRENTTASK', True) == 'populate_sdk_ext':
+        tasksizes = {}
+        filesizes = {}
+        for root, _, files in os.walk(d.expand('${SDK_OUTPUT}/${SDKPATH}/sstate-cache')):
+            for fn in files:
+                if fn.endswith('.tgz'):
+                    fsize = int(math.ceil(float(os.path.getsize(os.path.join(root, fn))) / 1024))
+                    task = fn.rsplit(':', 1)[1].split('_', 1)[1].split('.')[0]
+                    origtotal = tasksizes.get(task, 0)
+                    tasksizes[task] = origtotal + fsize
+                    filesizes[fn] = fsize
+        with open(d.expand('${BUILDHISTORY_DIR_SDK}/sstate-package-sizes.txt'), 'w') as f:
+            filesizes_sorted = sorted(filesizes.items(), key=operator.itemgetter(1), reverse=True)
+            for fn, size in filesizes_sorted:
+                f.write('%10d KiB %s\n' % (size, fn))
+        with open(d.expand('${BUILDHISTORY_DIR_SDK}/sstate-task-sizes.txt'), 'w') as f:
+            tasksizes_sorted = sorted(tasksizes.items(), key=operator.itemgetter(1), reverse=True)
+            for task, size in tasksizes_sorted:
+                f.write('%10d KiB %s\n' % (size, task))
+}
+
 # By using ROOTFS_POSTUNINSTALL_COMMAND we get in after uninstallation of
 # unneeded packages but before the removal of packaging files
 ROOTFS_POSTUNINSTALL_COMMAND += " buildhistory_list_installed_image ;\
@@ -516,12 +582,16 @@
 IMAGE_POSTPROCESS_COMMAND += " buildhistory_get_imageinfo ; "
 
 # We want these to be the last run so that we get called after complementary package installation
-POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_list_installed_sdk_target ;\
-                                            buildhistory_get_sdk_installed_target ; "
-POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_list_installed_sdk_host ;\
-                                          buildhistory_get_sdk_installed_host ; "
+POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_list_installed_sdk_target;"
+POPULATE_SDK_POST_TARGET_COMMAND_append = " buildhistory_get_sdk_installed_target;"
+POPULATE_SDK_POST_TARGET_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_target;| buildhistory_get_sdk_installed_target;"
 
-SDK_POSTPROCESS_COMMAND_append = " buildhistory_get_sdkinfo ; "
+POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_list_installed_sdk_host;"
+POPULATE_SDK_POST_HOST_COMMAND_append = " buildhistory_get_sdk_installed_host;"
+POPULATE_SDK_POST_HOST_COMMAND[vardepvalueexclude] .= "| buildhistory_list_installed_sdk_host;| buildhistory_get_sdk_installed_host;"
+
+SDK_POSTPROCESS_COMMAND_append = " buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
+SDK_POSTPROCESS_COMMAND[vardepvalueexclude] .= "| buildhistory_get_sdkinfo ; buildhistory_get_extra_sdkinfo; "
 
 def buildhistory_get_build_id(d):
     if d.getVar('BB_WORKERCONTEXT', True) != '1':
@@ -573,7 +643,10 @@
     if d.getVar('BB_WORKERCONTEXT', True) != '1':
         return ""
     sdkvars = "DISTRO DISTRO_VERSION SDK_NAME SDK_VERSION SDKMACHINE SDKIMAGE_FEATURES BAD_RECOMMENDATIONS NO_RECOMMENDATIONS PACKAGE_EXCLUDE"
-    listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE"
+    if d.getVar('BB_CURRENTTASK', True) == 'populate_sdk_ext':
+        # Extensible SDK uses some additional variables
+        sdkvars += " SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST SDK_UPDATE_URL SDK_EXT_TYPE SDK_RECRDEP_TASKS"
+    listvars = "SDKIMAGE_FEATURES BAD_RECOMMENDATIONS PACKAGE_EXCLUDE SDK_LOCAL_CONF_WHITELIST SDK_LOCAL_CONF_BLACKLIST SDK_INHERIT_BLACKLIST"
     return outputvars(sdkvars, listvars, d)
 
 
@@ -634,7 +707,7 @@
 
 	( cd ${BUILDHISTORY_DIR}/
 		# Initialise the repo if necessary
-		if [ ! -d .git ] ; then
+		if [ ! -e .git ] ; then
 			git init -q
 		else
 			git tag -f build-minus-3 build-minus-2 > /dev/null 2>&1 || true
@@ -672,17 +745,35 @@
 
 python buildhistory_eventhandler() {
     if e.data.getVar('BUILDHISTORY_FEATURES', True).strip():
-        if e.data.getVar("BUILDHISTORY_COMMIT", True) == "1":
-            bb.note("Writing buildhistory")
-            localdata = bb.data.createCopy(e.data)
-            localdata.setVar('BUILDHISTORY_BUILD_FAILURES', str(e._failures))
-            interrupted = getattr(e, '_interrupted', 0)
-            localdata.setVar('BUILDHISTORY_BUILD_INTERRUPTED', str(interrupted))
-            bb.build.exec_func("buildhistory_commit", localdata)
+        reset = e.data.getVar("BUILDHISTORY_RESET", True)
+        olddir = e.data.getVar("BUILDHISTORY_OLD_DIR", True)
+        if isinstance(e, bb.event.BuildStarted):
+            if reset:
+                import shutil
+                # Clean up after potentially interrupted build.
+                if os.path.isdir(olddir):
+                    shutil.rmtree(olddir)
+                rootdir = e.data.getVar("BUILDHISTORY_DIR", True)
+                entries = [ x for x in os.listdir(rootdir) if not x.startswith('.') ]
+                bb.utils.mkdirhier(olddir)
+                for entry in entries:
+                    os.rename(os.path.join(rootdir, entry),
+                              os.path.join(olddir, entry))
+        elif isinstance(e, bb.event.BuildCompleted):
+            if reset:
+                import shutil
+                shutil.rmtree(olddir)
+            if e.data.getVar("BUILDHISTORY_COMMIT", True) == "1":
+                bb.note("Writing buildhistory")
+                localdata = bb.data.createCopy(e.data)
+                localdata.setVar('BUILDHISTORY_BUILD_FAILURES', str(e._failures))
+                interrupted = getattr(e, '_interrupted', 0)
+                localdata.setVar('BUILDHISTORY_BUILD_INTERRUPTED', str(interrupted))
+                bb.build.exec_func("buildhistory_commit", localdata)
 }
 
 addhandler buildhistory_eventhandler
-buildhistory_eventhandler[eventmask] = "bb.event.BuildCompleted"
+buildhistory_eventhandler[eventmask] = "bb.event.BuildCompleted bb.event.BuildStarted"
 
 
 # FIXME this ought to be moved into the fetcher
diff --git a/yocto-poky/meta/classes/buildstats-summary.bbclass b/yocto-poky/meta/classes/buildstats-summary.bbclass
index 05ead9f..d73350b 100644
--- a/yocto-poky/meta/classes/buildstats-summary.bbclass
+++ b/yocto-poky/meta/classes/buildstats-summary.bbclass
@@ -3,8 +3,7 @@
     import collections
     import os.path
 
-    bn = get_bn(e)
-    bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
+    bsdir = e.data.expand("${BUILDSTATS_BASE}/${BUILDNAME}")
     if not os.path.exists(bsdir):
         return
 
diff --git a/yocto-poky/meta/classes/buildstats.bbclass b/yocto-poky/meta/classes/buildstats.bbclass
index 22ec571..34ecb03 100644
--- a/yocto-poky/meta/classes/buildstats.bbclass
+++ b/yocto-poky/meta/classes/buildstats.bbclass
@@ -1,135 +1,72 @@
 BUILDSTATS_BASE = "${TMPDIR}/buildstats/"
-BUILDSTATS_BNFILE = "${BUILDSTATS_BASE}/.buildname"
-BUILDSTATS_DEVFILE = "${BUILDSTATS_BASE}/.device"
 
 ################################################################################
 # Build statistics gathering.
 #
 # The CPU and Time gathering/tracking functions and bbevent inspiration
-# were written by Christopher Larson and can be seen here:
-# http://kergoth.pastey.net/142813
+# were written by Christopher Larson.
 #
 ################################################################################
 
-def get_process_cputime(pid):
+def get_buildprocess_cputime(pid):
     with open("/proc/%d/stat" % pid, "r") as f:
         fields = f.readline().rstrip().split()
     # 13: utime, 14: stime, 15: cutime, 16: cstime
     return sum(int(field) for field in fields[13:16])
 
+def get_process_cputime(pid):
+    import resource
+    with open("/proc/%d/stat" % pid, "r") as f:
+        fields = f.readline().rstrip().split()
+    stats = { 
+        'utime'  : fields[13],
+        'stime'  : fields[14], 
+        'cutime' : fields[15], 
+        'cstime' : fields[16],  
+    }
+    iostats = {}
+    if os.path.isfile("/proc/%d/io" % pid):
+        with open("/proc/%d/io" % pid, "r") as f:
+            while True:
+                i = f.readline().strip()
+                if not i:
+                    break
+                i = i.split(": ")
+                iostats[i[0]] = i[1]
+    resources = resource.getrusage(resource.RUSAGE_SELF)
+    childres = resource.getrusage(resource.RUSAGE_CHILDREN)
+    return stats, iostats, resources, childres
+
 def get_cputime():
     with open("/proc/stat", "r") as f:
         fields = f.readline().rstrip().split()[1:]
     return sum(int(field) for field in fields)
 
-def set_bn(e):
-    bn = e.getPkgs()[0] + "-" + e.data.getVar('MACHINE', True)
-    try:
-        os.remove(e.data.getVar('BUILDSTATS_BNFILE', True))
-    except:
-        pass
-    with open(e.data.getVar('BUILDSTATS_BNFILE', True), "w") as f:
-        f.write(os.path.join(bn, e.data.getVar('BUILDNAME', True)))
+def set_timedata(var, d, server_time):
+    d.setVar(var, server_time)
 
-def get_bn(e):
-    with open(e.data.getVar('BUILDSTATS_BNFILE', True)) as f:
-        bn = f.readline()
-    return bn
-
-def set_device(e):
-    tmpdir = e.data.getVar('TMPDIR', True)
-    devfile = e.data.getVar('BUILDSTATS_DEVFILE', True)
-    try:
-        os.remove(devfile)
-    except:
-        pass
-    ############################################################################
-    # We look for the volume TMPDIR lives on. To do all disks would make little
-    # sense and not give us any particularly useful data. In theory we could do
-    # something like stick DL_DIR on a different partition and this would
-    # throw stats gathering off. The same goes with SSTATE_DIR. However, let's
-    # get the basics in here and work on the cornercases later.
-    # A note. /proc/diskstats does not contain info on encryptfs, tmpfs, etc.
-    # If we end up hitting one of these fs, we'll just skip diskstats collection.
-    ############################################################################
-    device = os.stat(tmpdir)
-    majordev = os.major(long(device.st_dev))
-    minordev = os.minor(long(device.st_dev))
-    ############################################################################
-    # Bug 1700:
-    # Because tmpfs/encryptfs/ramfs etc inserts no entry in /proc/diskstats
-    # we set rdev to NoLogicalDevice and search for it later. If we find NLD
-    # we do not collect diskstats as the method to collect meaningful statistics
-    # for these fs types requires a bit more research.
-    ############################################################################
-    rdev = "NoLogicalDevice"
-    try:
-        with open("/proc/diskstats", "r") as f:
-            for line in f:
-                if majordev == int(line.split()[0]) and minordev == int(line.split()[1]):
-                    rdev = line.split()[2]
-    except:
-        pass
-    with open(devfile, "w") as f:
-        f.write(rdev)
-
-def get_device(e):
-    with open(e.data.getVar('BUILDSTATS_DEVFILE', True)) as f:
-        device = f.readline()
-    return device
-
-def get_diskstats(dev):
-    import itertools
-    ############################################################################
-    # For info on what these are, see kernel doc file iostats.txt
-    ############################################################################
-    DSTAT_KEYS = ['ReadsComp', 'ReadsMerged', 'SectRead', 'TimeReads', 'WritesComp', 'SectWrite', 'TimeWrite', 'IOinProgress', 'TimeIO', 'WTimeIO']
-    try:
-        with open("/proc/diskstats", "r") as f:
-            for x in f:
-                if dev in x:
-                    diskstats_val = x.rstrip().split()[4:]
-    except IOError as e:
+def get_timedata(var, d, end_time):
+    oldtime = d.getVar(var, False)
+    if oldtime is None:
         return
-    diskstats = dict(itertools.izip(DSTAT_KEYS, diskstats_val))
-    return diskstats
+    return end_time - oldtime
 
-def set_diskdata(var, dev, data):
-    data.setVar(var, get_diskstats(dev))
-
-def get_diskdata(var, dev, data):
-    olddiskdata = data.getVar(var, False)
-    diskdata = {}
-    if olddiskdata is None:
-        return
-    newdiskdata = get_diskstats(dev)
-    for key in olddiskdata.iterkeys():
-        diskdata["Start"+key] = str(int(olddiskdata[key]))
-        diskdata["End"+key] = str(int(newdiskdata[key]))
-    return diskdata
-
-def set_timedata(var, data, server_time=None):
+def set_buildtimedata(var, d):
     import time
-    if server_time:
-        time = server_time
-    else:
-        time = time.time()
+    time = time.time()
     cputime = get_cputime()
-    proctime = get_process_cputime(os.getpid())
-    data.setVar(var, (time, cputime, proctime))
+    proctime = get_buildprocess_cputime(os.getpid())
+    d.setVar(var, (time, cputime, proctime))
 
-def get_timedata(var, data, server_time=None):
+def get_buildtimedata(var, d):
     import time
-    timedata = data.getVar(var, False)
+    timedata = d.getVar(var, False)
     if timedata is None:
         return
     oldtime, oldcpu, oldproc = timedata
-    procdiff = get_process_cputime(os.getpid()) - oldproc
+    procdiff = get_buildprocess_cputime(os.getpid()) - oldproc
     cpudiff = get_cputime() - oldcpu
-    if server_time:
-        end_time = server_time
-    else:
-        end_time = time.time()
+    end_time = time.time()
     timediff = end_time - oldtime
     if cpudiff > 0:
         cpuperc = float(procdiff) * 100 / cpudiff
@@ -137,30 +74,27 @@
         cpuperc = None
     return timediff, cpuperc
 
-def write_task_data(status, logfile, dev, e):
-    bn = get_bn(e)
-    bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
+def write_task_data(status, logfile, e, d):
+    bn = d.getVar('BUILDNAME', True)
+    bsdir = os.path.join(d.getVar('BUILDSTATS_BASE', True), bn)
     with open(os.path.join(logfile), "a") as f:
-        timedata = get_timedata("__timedata_task", e.data, e.time)
-        if timedata:
-            elapsedtime, cpu = timedata
-            f.write(bb.data.expand("${PF}: %s: Elapsed time: %0.2f seconds \n" %
-                                    (e.task, elapsedtime), e.data))
+        elapsedtime = get_timedata("__timedata_task", d, e.time)
+        if elapsedtime:
+            f.write(d.expand("${PF}: %s: Elapsed time: %0.2f seconds \n" %
+                                    (e.task, elapsedtime)))
+            cpu, iostats, resources, childres = get_process_cputime(os.getpid())
             if cpu:
-                f.write("CPU usage: %0.1f%% \n" % cpu)
-        ############################################################################
-        # Here we gather up disk data. In an effort to avoid lying with stats
-        # I do a bare minimum of analysis of collected data.
-        # The simple fact is, doing disk io collection on a per process basis
-        # without effecting build time would be difficult.
-        # For the best information, running things with BB_TOTAL_THREADS = "1"
-        # would return accurate per task results.
-        ############################################################################
-        if dev != "NoLogicalDevice":
-            diskdata = get_diskdata("__diskdata_task", dev, e.data)
-            if diskdata:
-                for key in sorted(diskdata.iterkeys()):
-                    f.write(key + ": " + diskdata[key] + "\n")
+                f.write("utime: %s\n" % cpu['utime'])
+                f.write("stime: %s\n" % cpu['stime'])
+                f.write("cutime: %s\n" % cpu['cutime'])
+                f.write("cstime: %s\n" % cpu['cstime'])
+            for i in iostats:
+                f.write("IO %s: %s\n" % (i, iostats[i]))
+            rusages = ["ru_utime", "ru_stime", "ru_maxrss", "ru_minflt", "ru_majflt", "ru_inblock", "ru_oublock", "ru_nvcsw", "ru_nivcsw"]
+            for i in rusages:
+                f.write("rusage %s: %s\n" % (i, getattr(resources, i)))
+            for i in rusages:
+                f.write("Child rusage %s: %s\n" % (i, getattr(childres, i)))
         if status is "passed":
             f.write("Status: PASSED \n")
         else:
@@ -170,25 +104,26 @@
 python run_buildstats () {
     import bb.build
     import bb.event
-    import bb.data
     import time, subprocess, platform
 
+    bn = d.getVar('BUILDNAME', True)
+    bsdir = os.path.join(d.getVar('BUILDSTATS_BASE', True), bn)
+    taskdir = os.path.join(bsdir, d.getVar('PF', True))
+
     if isinstance(e, bb.event.BuildStarted):
         ########################################################################
-        # at first pass make the buildstats heriarchy and then
+        # If the kernel was not configured to provide I/O statistics, issue
+        # a one time warning.
+        ########################################################################
+        if not os.path.isfile("/proc/%d/io" % os.getpid()):
+            bb.warn("The Linux kernel on your build host was not configured to provide process I/O statistics. (CONFIG_TASK_IO_ACCOUNTING is not set)")
+
+        ########################################################################
+        # at first pass make the buildstats hierarchy and then
         # set the buildname
         ########################################################################
-        bb.utils.mkdirhier(e.data.getVar('BUILDSTATS_BASE', True))
-        set_bn(e)
-        bn = get_bn(e)
-        set_device(e)
-        device = get_device(e)
-
-        bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
         bb.utils.mkdirhier(bsdir)
-        if device != "NoLogicalDevice":
-            set_diskdata("__diskdata_build", device, e.data)
-        set_timedata("__timedata_build", e.data)
+        set_buildtimedata("__timedata_build", d)
         build_time = os.path.join(bsdir, "build_stats")
         # write start of build into build_time
         with open(build_time, "a") as f:
@@ -201,35 +136,21 @@
             f.write("Build Started: %0.2f \n" % time.time())
 
     elif isinstance(e, bb.event.BuildCompleted):
-        bn = get_bn(e)
-        device = get_device(e)
-        bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
         build_time = os.path.join(bsdir, "build_stats")
         with open(build_time, "a") as f:
             ########################################################################
             # Write build statistics for the build
             ########################################################################
-            timedata = get_timedata("__timedata_build", e.data)
+            timedata = get_buildtimedata("__timedata_build", d)
             if timedata:
                 time, cpu = timedata
                 # write end of build and cpu used into build_time
                 f.write("Elapsed time: %0.2f seconds \n" % (time))
                 if cpu:
                     f.write("CPU usage: %0.1f%% \n" % cpu)
-            if device != "NoLogicalDevice":
-                diskio = get_diskdata("__diskdata_build", device, e.data)
-                if diskio:
-                    for key in sorted(diskio.iterkeys()):
-                        f.write(key + ": " + diskio[key] + "\n")
 
     if isinstance(e, bb.build.TaskStarted):
-        bn = get_bn(e)
-        device = get_device(e)
-        bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
-        taskdir = os.path.join(bsdir, e.data.getVar('PF', True))
-        if device != "NoLogicalDevice":
-            set_diskdata("__diskdata_task", device, e.data)
-        set_timedata("__timedata_task", e.data, e.time)
+        set_timedata("__timedata_task", d, e.time)
         bb.utils.mkdirhier(taskdir)
         # write into the task event file the name and start time
         with open(os.path.join(taskdir, e.task), "a") as f:
@@ -237,24 +158,18 @@
             f.write("Started: %0.2f \n" % e.time)
 
     elif isinstance(e, bb.build.TaskSucceeded):
-        bn = get_bn(e)
-        device = get_device(e)
-        bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
-        taskdir = os.path.join(bsdir, e.data.getVar('PF', True))
-        write_task_data("passed", os.path.join(taskdir, e.task), device, e)
+        write_task_data("passed", os.path.join(taskdir, e.task), e, d)
         if e.task == "do_rootfs":
             bs = os.path.join(bsdir, "build_stats")
             with open(bs, "a") as f:
-                rootfs = e.data.getVar('IMAGE_ROOTFS', True)
+                rootfs = d.getVar('IMAGE_ROOTFS', True)
                 rootfs_size = subprocess.Popen(["du", "-sh", rootfs], stdout=subprocess.PIPE).stdout.read()
                 f.write("Uncompressed Rootfs size: %s" % rootfs_size)
 
     elif isinstance(e, bb.build.TaskFailed):
-        bn = get_bn(e)
-        device = get_device(e)
-        bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
-        taskdir = os.path.join(bsdir, e.data.getVar('PF', True))
-        write_task_data("failed", os.path.join(taskdir, e.task), device, e)
+        # Can have a failure before TaskStarted so need to mkdir here too
+        bb.utils.mkdirhier(taskdir)
+        write_task_data("failed", os.path.join(taskdir, e.task), e, d)
         ########################################################################
         # Lets make things easier and tell people where the build failed in
         # build_status. We do this here because BuildCompleted triggers no
@@ -262,7 +177,7 @@
         ########################################################################
         build_status = os.path.join(bsdir, "build_stats")
         with open(build_status, "a") as f:
-            f.write(e.data.expand("Failed at: ${PF} at task: %s \n" % e.task))
+            f.write(d.expand("Failed at: ${PF} at task: %s \n" % e.task))
 }
 
 addhandler run_buildstats
diff --git a/yocto-poky/meta/classes/chrpath.bbclass b/yocto-poky/meta/classes/chrpath.bbclass
index e9160af..9c68855 100644
--- a/yocto-poky/meta/classes/chrpath.bbclass
+++ b/yocto-poky/meta/classes/chrpath.bbclass
@@ -6,7 +6,7 @@
 
     p = sub.Popen([cmd, '-l', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
     err, out = p.communicate()
-    # If returned succesfully, process stderr for results
+    # If returned successfully, process stderr for results
     if p.returncode != 0:
         return
 
@@ -45,7 +45,7 @@
 
     p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', fpath],stdout=sub.PIPE,stderr=sub.PIPE)
     err, out = p.communicate()
-    # If returned succesfully, process stderr for results
+    # If returned successfully, process stderr for results
     if p.returncode != 0:
         return
     for l in err.split("\n"):
diff --git a/yocto-poky/meta/classes/cmake.bbclass b/yocto-poky/meta/classes/cmake.bbclass
index ae3cc02..02f313a 100644
--- a/yocto-poky/meta/classes/cmake.bbclass
+++ b/yocto-poky/meta/classes/cmake.bbclass
@@ -7,9 +7,6 @@
 # We need to unset CCACHE otherwise cmake gets too confused
 CCACHE = ""
 
-# We want the staging and installing functions from autotools
-inherit autotools
-
 # C/C++ Compiler (without cpu arch/tune arguments)
 OECMAKE_C_COMPILER ?= "`echo ${CC} | sed 's/^\([^ ]*\).*/\1/'`"
 OECMAKE_CXX_COMPILER ?= "`echo ${CXX} | sed 's/^\([^ ]*\).*/\1/'`"
@@ -18,8 +15,8 @@
 # Compiler flags
 OECMAKE_C_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CFLAGS}"
 OECMAKE_CXX_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS}"
-OECMAKE_C_FLAGS_RELEASE ?= "${SELECTED_OPTIMIZATION} ${CFLAGS} -DNDEBUG"
-OECMAKE_CXX_FLAGS_RELEASE ?= "${SELECTED_OPTIMIZATION} ${CXXFLAGS} -DNDEBUG"
+OECMAKE_C_FLAGS_RELEASE ?= "-DNDEBUG"
+OECMAKE_CXX_FLAGS_RELEASE ?= "-DNDEBUG"
 OECMAKE_C_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CPPFLAGS} ${LDFLAGS}"
 OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LDFLAGS}"
 
@@ -30,6 +27,8 @@
 OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM = "ONLY"
 OECMAKE_FIND_ROOT_PATH_MODE_PROGRAM_class-native = "BOTH"
 
+EXTRA_OECMAKE_append = " ${PACKAGECONFIG_CONFARGS}"
+
 # CMake expects target architectures in the format of uname(2),
 # which do not always match TARGET_ARCH, so all the necessary
 # conversions should happen here.
@@ -53,9 +52,9 @@
 set( CMAKE_C_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "CFLAGS" )
 set( CMAKE_CXX_FLAGS "${OECMAKE_CXX_FLAGS}" CACHE STRING "CXXFLAGS" )
 set( CMAKE_ASM_FLAGS "${OECMAKE_C_FLAGS}" CACHE STRING "ASM FLAGS" )
-set( CMAKE_C_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "CFLAGS for release" )
-set( CMAKE_CXX_FLAGS_RELEASE "${OECMAKE_CXX_FLAGS_RELEASE}" CACHE STRING "CXXFLAGS for release" )
-set( CMAKE_ASM_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "ASM FLAGS for release" )
+set( CMAKE_C_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "Additional CFLAGS for release" )
+set( CMAKE_CXX_FLAGS_RELEASE "${OECMAKE_CXX_FLAGS_RELEASE}" CACHE STRING "Additional CXXFLAGS for release" )
+set( CMAKE_ASM_FLAGS_RELEASE "${OECMAKE_C_FLAGS_RELEASE}" CACHE STRING "Additional ASM FLAGS for release" )
 set( CMAKE_C_LINK_FLAGS "${OECMAKE_C_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
 set( CMAKE_CXX_LINK_FLAGS "${OECMAKE_CXX_LINK_FLAGS}" CACHE STRING "LDFLAGS" )
 
@@ -132,7 +131,7 @@
 
 cmake_do_install() {
 	cd ${B}
-	autotools_do_install
+	oe_runmake 'DESTDIR=${D}' install
 }
 
 EXPORT_FUNCTIONS do_configure do_compile do_install do_generate_toolchain_file
diff --git a/yocto-poky/meta/classes/cml1.bbclass b/yocto-poky/meta/classes/cml1.bbclass
index 95cf584..b5dc028 100644
--- a/yocto-poky/meta/classes/cml1.bbclass
+++ b/yocto-poky/meta/classes/cml1.bbclass
@@ -26,7 +26,8 @@
     except OSError:
         mtime = 0
 
-    oe_terminal("${SHELL} -c \"make ${KCONFIG_CONFIG_COMMAND}; if [ \$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"", '${PN} Configuration', d)
+    oe_terminal("${SHELL} -c \"make %s; if [ \$? -ne 0 ]; then echo 'Command failed.'; printf 'Press any key to continue... '; read r; fi\"" % d.getVar('KCONFIG_CONFIG_COMMAND', True),
+                d.getVar('PN', True ) + ' Configuration', d)
 
     # FIXME this check can be removed when the minimum bitbake version has been bumped
     if hasattr(bb.build, 'write_taint'):
diff --git a/yocto-poky/meta/classes/compress_doc.bbclass b/yocto-poky/meta/classes/compress_doc.bbclass
index 9b58d82..8073c17 100644
--- a/yocto-poky/meta/classes/compress_doc.bbclass
+++ b/yocto-poky/meta/classes/compress_doc.bbclass
@@ -40,8 +40,8 @@
     compress_cmds = {}
     decompress_cmds = {}
     for mode in compress_list:
-        compress_cmds[mode] = d.getVarFlag('DOC_COMPRESS_CMD', mode)
-        decompress_cmds[mode] = d.getVarFlag('DOC_DECOMPRESS_CMD', mode)
+        compress_cmds[mode] = d.getVarFlag('DOC_COMPRESS_CMD', mode, True)
+        decompress_cmds[mode] = d.getVarFlag('DOC_DECOMPRESS_CMD', mode, True)
 
     mandir = os.path.abspath(dvar + os.sep + d.getVar("mandir", True))
     if os.path.exists(mandir):
diff --git a/yocto-poky/meta/classes/core-image.bbclass b/yocto-poky/meta/classes/core-image.bbclass
index 8e340d9..705cad8 100644
--- a/yocto-poky/meta/classes/core-image.bbclass
+++ b/yocto-poky/meta/classes/core-image.bbclass
@@ -24,7 +24,6 @@
 # - nfs-client          - NFS client
 # - ssh-server-dropbear - SSH server (dropbear)
 # - ssh-server-openssh  - SSH server (openssh)
-# - qt4-pkgs            - Qt4/X11 and demo applications
 # - hwcodecs            - Install hardware acceleration codecs
 # - package-management  - installs package management tools and preserves the package manager database
 # - debug-tweaks        - makes an image suitable for development, e.g. allowing passwordless root logins
@@ -46,7 +45,6 @@
 FEATURE_PACKAGES_nfs-client = "packagegroup-core-nfs-client"
 FEATURE_PACKAGES_ssh-server-dropbear = "packagegroup-core-ssh-dropbear"
 FEATURE_PACKAGES_ssh-server-openssh = "packagegroup-core-ssh-openssh"
-FEATURE_PACKAGES_qt4-pkgs = "packagegroup-core-qt-demoapps"
 FEATURE_PACKAGES_hwcodecs = "${MACHINE_HWCODECS}"
 
 
diff --git a/yocto-poky/meta/classes/cpan-base.bbclass b/yocto-poky/meta/classes/cpan-base.bbclass
index 7810a4d..55ac052 100644
--- a/yocto-poky/meta/classes/cpan-base.bbclass
+++ b/yocto-poky/meta/classes/cpan-base.bbclass
@@ -29,31 +29,12 @@
             return m.group(1)
     return None
 
-# Determine where the library directories are
-def perl_get_libdirs(d):
-    libdir = d.getVar('libdir', True)
-    if is_target(d) == "no":
-        libdir += '/perl-native'
-    libdir += '/perl'
-    return libdir
-
 def is_target(d):
     if not bb.data.inherits_class('native', d):
         return "yes"
     return "no"
 
-PERLLIBDIRS := "${@perl_get_libdirs(d)}"
+PERLLIBDIRS = "${libdir}/perl"
+PERLLIBDIRS_class-native = "${libdir}/perl-native"
 PERLVERSION := "${@get_perl_version(d)}"
 PERLVERSION[vardepvalue] = ""
-
-FILES_${PN}-dbg += "${PERLLIBDIRS}/auto/*/.debug \
-                    ${PERLLIBDIRS}/auto/*/*/.debug \
-                    ${PERLLIBDIRS}/auto/*/*/*/.debug \
-                    ${PERLLIBDIRS}/auto/*/*/*/*/.debug \
-                    ${PERLLIBDIRS}/auto/*/*/*/*/*/.debug \
-                    ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/.debug \
-                    ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/*/.debug \
-                    ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/*/*/.debug \
-                    ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/*/*/*/.debug \
-                    ${PERLLIBDIRS}/vendor_perl/${PERLVERSION}/auto/*/*/*/*/*/.debug \
-                    "
diff --git a/yocto-poky/meta/classes/cross-canadian.bbclass b/yocto-poky/meta/classes/cross-canadian.bbclass
index ea17f09..e07b1bd 100644
--- a/yocto-poky/meta/classes/cross-canadian.bbclass
+++ b/yocto-poky/meta/classes/cross-canadian.bbclass
@@ -103,7 +103,7 @@
 HOST_AS_ARCH = "${SDK_AS_ARCH}"
 
 #assign DPKG_ARCH
-DPKG_ARCH = "${SDK_ARCH}"
+DPKG_ARCH = "${@debian_arch_map(d.getVar('SDK_ARCH', True), '')}"
 
 CPPFLAGS = "${BUILDSDK_CPPFLAGS}"
 CFLAGS = "${BUILDSDK_CFLAGS}"
@@ -143,9 +143,6 @@
 libexecdir = "${exec_prefix}/libexec/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
 
 FILES_${PN} = "${prefix}"
-FILES_${PN}-dbg += "${prefix}/.debug \
-                    ${prefix}/bin/.debug \
-                   "
 
 export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${layout_libdir}/pkgconfig"
 export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
diff --git a/yocto-poky/meta/classes/cross.bbclass b/yocto-poky/meta/classes/cross.bbclass
index 3eab5b9..81d1c9d 100644
--- a/yocto-poky/meta/classes/cross.bbclass
+++ b/yocto-poky/meta/classes/cross.bbclass
@@ -68,10 +68,4 @@
 
 USE_NLS = "no"
 
-deltask package
-deltask packagedata
-deltask package_qa
-deltask package_write_ipk
-deltask package_write_deb
-deltask package_write_rpm
-deltask package_write
+inherit nopackages
diff --git a/yocto-poky/meta/classes/crosssdk.bbclass b/yocto-poky/meta/classes/crosssdk.bbclass
index 87d5cf5..7315c38 100644
--- a/yocto-poky/meta/classes/crosssdk.bbclass
+++ b/yocto-poky/meta/classes/crosssdk.bbclass
@@ -30,7 +30,7 @@
 do_populate_sysroot[stamp-extra-info] = ""
 do_packagedata[stamp-extra-info] = ""
 
-# Need to force this to ensure consitency accross architectures
+# Need to force this to ensure consitency across architectures
 EXTRA_OECONF_GCC_FLOAT = ""
 
 USE_NLS = "no"
diff --git a/yocto-poky/meta/classes/debian.bbclass b/yocto-poky/meta/classes/debian.bbclass
index 1b6979a..be7cacc 100644
--- a/yocto-poky/meta/classes/debian.bbclass
+++ b/yocto-poky/meta/classes/debian.bbclass
@@ -53,7 +53,7 @@
         return (s[stat.ST_MODE] & stat.S_IEXEC)
 
     def add_rprovides(pkg, d):
-        newpkg = d.getVar('PKG_' + pkg, False)
+        newpkg = d.getVar('PKG_' + pkg, True)
         if newpkg and newpkg != pkg:
             provs = (d.getVar('RPROVIDES_' + pkg, True) or "").split()
             if pkg not in provs:
diff --git a/yocto-poky/meta/classes/devshell.bbclass b/yocto-poky/meta/classes/devshell.bbclass
index 4451436..341d9c0 100644
--- a/yocto-poky/meta/classes/devshell.bbclass
+++ b/yocto-poky/meta/classes/devshell.bbclass
@@ -3,7 +3,7 @@
 DEVSHELL = "${SHELL}"
 
 python do_devshell () {
-    if d.getVarFlag("do_devshell", "manualfakeroot"):
+    if d.getVarFlag("do_devshell", "manualfakeroot", True):
        d.prependVar("DEVSHELL", "pseudo ")
        fakeenv = d.getVar("FAKEROOTENV", True).split()
        for f in fakeenv:
@@ -27,7 +27,7 @@
 # be done as the normal user. We therfore carefully construct the envionment
 # manually
 python () {
-    if d.getVarFlag("do_devshell", "fakeroot"):
+    if d.getVarFlag("do_devshell", "fakeroot", True):
        # We need to signal our code that we want fakeroot however we
        # can't manipulate the environment and variables here yet (see YOCTO #4795)
        d.setVarFlag("do_devshell", "manualfakeroot", "1")
diff --git a/yocto-poky/meta/classes/distrodata.bbclass b/yocto-poky/meta/classes/distrodata.bbclass
index 44c06e1..51bfc1e 100644
--- a/yocto-poky/meta/classes/distrodata.bbclass
+++ b/yocto-poky/meta/classes/distrodata.bbclass
@@ -104,6 +104,7 @@
             line = line + "," + i
         bb.note("%s\n" % line)
 }
+do_distrodata_np[vardepsexclude] = "DATETIME"
 
 addtask distrodata
 do_distrodata[nostamp] = "1"
@@ -196,6 +197,7 @@
             f.close()
         bb.utils.unlockfile(lf)
 }
+do_distrodata[vardepsexclude] = "DATETIME"
 
 addtask distrodataall after do_distrodata
 do_distrodataall[recrdeptask] = "do_distrodataall do_distrodata"
diff --git a/yocto-poky/meta/classes/distutils-common-base.bbclass b/yocto-poky/meta/classes/distutils-common-base.bbclass
index 427275b..08511f5 100644
--- a/yocto-poky/meta/classes/distutils-common-base.bbclass
+++ b/yocto-poky/meta/classes/distutils-common-base.bbclass
@@ -5,8 +5,6 @@
 export STAGING_INCDIR
 export STAGING_LIBDIR
 
-PACKAGES = "${PN}-staticdev ${PN}-dev ${PN}-dbg ${PN}-doc ${PN}"
-
 FILES_${PN} += "${libdir}/* ${libdir}/${PYTHON_DIR}/*"
 
 FILES_${PN}-staticdev += "\
@@ -17,8 +15,3 @@
   ${libdir}/pkgconfig \
   ${PYTHON_SITEPACKAGES_DIR}/*.la \
 "
-FILES_${PN}-dbg += "\
-  ${PYTHON_SITEPACKAGES_DIR}/.debug \
-  ${PYTHON_SITEPACKAGES_DIR}/*/.debug \
-  ${PYTHON_SITEPACKAGES_DIR}/*/*/.debug \
-"
diff --git a/yocto-poky/meta/classes/distutils-tools.bbclass b/yocto-poky/meta/classes/distutils-tools.bbclass
index f43450e..8d9b3f7 100644
--- a/yocto-poky/meta/classes/distutils-tools.bbclass
+++ b/yocto-poky/meta/classes/distutils-tools.bbclass
@@ -10,14 +10,14 @@
          STAGING_LIBDIR=${STAGING_LIBDIR} \
          BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
          ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \
-         bbfatal "${PYTHON_PN} setup.py build_ext execution failed."
+         bbfatal_log "${PYTHON_PN} setup.py build_ext execution failed."
 }
 
 distutils_stage_headers() {
         install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
         BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
         ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
-        bbfatal "${PYTHON_PN} setup.py install_headers execution failed."
+        bbfatal_log "${PYTHON_PN} setup.py install_headers execution failed."
 }
 
 distutils_stage_all() {
@@ -27,7 +27,7 @@
         PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
         BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
         ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
-        bbfatal "${PYTHON_PN} setup.py install (stage) execution failed."
+        bbfatal_log "${PYTHON_PN} setup.py install (stage) execution failed."
 }
 
 distutils_do_install() {
@@ -39,7 +39,7 @@
         PYTHONPATH=${D}/${PYTHON_SITEPACKAGES_DIR} \
         BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
         ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
-        bbfatal "${PYTHON_PN} setup.py install execution failed."
+        bbfatal_log "${PYTHON_PN} setup.py install execution failed."
 
         echo "Step 3 of ${PN} Install ..."
         # support filenames with *spaces*
diff --git a/yocto-poky/meta/classes/distutils.bbclass b/yocto-poky/meta/classes/distutils.bbclass
index cd06713..da48a2e 100644
--- a/yocto-poky/meta/classes/distutils.bbclass
+++ b/yocto-poky/meta/classes/distutils.bbclass
@@ -12,14 +12,14 @@
          STAGING_LIBDIR=${STAGING_LIBDIR} \
          BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
          ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py build ${DISTUTILS_BUILD_ARGS} || \
-         bbfatal "${PYTHON_PN} setup.py build execution failed."
+         bbfatal_log "${PYTHON_PN} setup.py build execution failed."
 }
 
 distutils_stage_headers() {
         install -d ${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR}
         BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
         ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
-        bbfatal "${PYTHON_PN} setup.py install_headers execution failed."
+        bbfatal_log "${PYTHON_PN} setup.py install_headers execution failed."
 }
 
 distutils_stage_all() {
@@ -29,7 +29,7 @@
         PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
         BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
         ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
-        bbfatal "${PYTHON_PN} setup.py install (stage) execution failed."
+        bbfatal_log "${PYTHON_PN} setup.py install (stage) execution failed."
 }
 
 distutils_do_install() {
@@ -39,7 +39,7 @@
         PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
         BUILD_SYS=${BUILD_SYS} HOST_SYS=${HOST_SYS} \
         ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
-        bbfatal "${PYTHON_PN} setup.py install execution failed."
+        bbfatal_log "${PYTHON_PN} setup.py install execution failed."
 
         # support filenames with *spaces*
         # only modify file if it contains path  and recompile it
diff --git a/yocto-poky/meta/classes/distutils3-base.bbclass b/yocto-poky/meta/classes/distutils3-base.bbclass
index af3aa00..2a093d3 100644
--- a/yocto-poky/meta/classes/distutils3-base.bbclass
+++ b/yocto-poky/meta/classes/distutils3-base.bbclass
@@ -1,7 +1,7 @@
 DEPENDS  += "${@["${PYTHON_PN}-native ${PYTHON_PN}", ""][(d.getVar('PACKAGES', True) == '')]}"
 RDEPENDS_${PN} += "${@['', '${PYTHON_PN}-core']['${CLASSOVERRIDE}' == 'class-target']}"
 
-PYTHON_BASEVERSION = "3.4"
+PYTHON_BASEVERSION = "3.5"
 PYTHON_ABI = "m"
 
 inherit distutils-common-base python3native
diff --git a/yocto-poky/meta/classes/distutils3-native-base.bbclass b/yocto-poky/meta/classes/distutils3-native-base.bbclass
index 1117101..db9a1a7 100644
--- a/yocto-poky/meta/classes/distutils3-native-base.bbclass
+++ b/yocto-poky/meta/classes/distutils3-native-base.bbclass
@@ -1,4 +1,4 @@
-PYTHON_BASEVERSION = "3.4"
+PYTHON_BASEVERSION = "3.5"
 PYTHON_ABI = "m"
 
 inherit distutils-native-base
diff --git a/yocto-poky/meta/classes/distutils3.bbclass b/yocto-poky/meta/classes/distutils3.bbclass
index 443bf3a..4f6ca44 100644
--- a/yocto-poky/meta/classes/distutils3.bbclass
+++ b/yocto-poky/meta/classes/distutils3.bbclass
@@ -19,7 +19,7 @@
         BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
         ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py \
         build ${DISTUTILS_BUILD_ARGS} || \
-        bbfatal "${PYTHON_PN} setup.py build_ext execution failed."
+        bbfatal_log "${PYTHON_PN} setup.py build_ext execution failed."
 }
 distutils3_do_compile[vardepsexclude] = "MACHINE"
 
@@ -32,7 +32,7 @@
         fi
         BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
         ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install_headers ${DISTUTILS_STAGE_HEADERS_ARGS} || \
-        bbfatal "${PYTHON_PN} setup.py install_headers execution failed."
+        bbfatal_log "${PYTHON_PN} setup.py install_headers execution failed."
 }
 distutils3_stage_headers[vardepsexclude] = "MACHINE"
 
@@ -48,7 +48,7 @@
         PYTHONPATH=${STAGING_DIR_HOST}${PYTHON_SITEPACKAGES_DIR} \
         BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
         ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install ${DISTUTILS_STAGE_ALL_ARGS} || \
-        bbfatal "${PYTHON_PN} setup.py install (stage) execution failed."
+        bbfatal_log "${PYTHON_PN} setup.py install (stage) execution failed."
 }
 distutils3_stage_all[vardepsexclude] = "MACHINE"
 
@@ -64,7 +64,7 @@
         PYTHONPATH=${D}${PYTHON_SITEPACKAGES_DIR} \
         BUILD_SYS=${BUILD_SYS} HOST_SYS=${SYS} \
         ${STAGING_BINDIR_NATIVE}/${PYTHON_PN}-native/${PYTHON_PN} setup.py install --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${DISTUTILS_INSTALL_ARGS} || \
-        bbfatal "${PYTHON_PN} setup.py install execution failed."
+        bbfatal_log "${PYTHON_PN} setup.py install execution failed."
 
         # support filenames with *spaces*
         find ${D} -name "*.py" -exec grep -q ${D} {} \; -exec sed -i -e s:${D}::g {} \;
diff --git a/yocto-poky/meta/classes/externalsrc.bbclass b/yocto-poky/meta/classes/externalsrc.bbclass
index f7ed66d..da7eb47 100644
--- a/yocto-poky/meta/classes/externalsrc.bbclass
+++ b/yocto-poky/meta/classes/externalsrc.bbclass
@@ -25,6 +25,7 @@
 #
 
 SRCTREECOVEREDTASKS ?= "do_patch do_unpack do_fetch"
+EXTERNALSRC_SYMLINKS ?= "oe-workdir:${WORKDIR} oe-logs:${T}"
 
 python () {
     externalsrc = d.getVar('EXTERNALSRC', True)
@@ -51,7 +52,7 @@
             # Dummy value because the default function can't be called with blank SRC_URI
             d.setVar('SRCPV', '999')
 
-        tasks = filter(lambda k: d.getVarFlag(k, "task"), d.keys())
+        tasks = filter(lambda k: d.getVarFlag(k, "task", True), d.keys())
 
         for task in tasks:
             if task.endswith("_setscene"):
@@ -82,12 +83,72 @@
             bb.build.deltask(task, d)
 
         d.prependVarFlag('do_compile', 'prefuncs', "externalsrc_compile_prefunc ")
+        d.prependVarFlag('do_configure', 'prefuncs', "externalsrc_configure_prefunc ")
 
-        # Ensure compilation happens every time
-        d.setVarFlag('do_compile', 'nostamp', '1')
+        # Force the recipe to be always re-parsed so that the file_checksums
+        # function is run every time
+        d.setVar('BB_DONT_CACHE', '1')
+        d.setVarFlag('do_compile', 'file-checksums', '${@srctree_hash_files(d)}')
+
+        # We don't want the workdir to go away
+        d.appendVar('RM_WORK_EXCLUDE', ' ' + d.getVar('PN', True))
+
+        # If B=S the same builddir is used even for different architectures.
+        # Thus, use a shared CONFIGURESTAMPFILE and STAMP directory so that
+        # change of do_configure task hash is correctly detected and stamps are
+        # invalidated if e.g. MACHINE changes.
+        if d.getVar('S', True) == d.getVar('B', True):
+            configstamp = '${TMPDIR}/work-shared/${PN}/${EXTENDPE}${PV}-${PR}/configure.sstate'
+            d.setVar('CONFIGURESTAMPFILE', configstamp)
+            d.setVar('STAMP', '${STAMPS_DIR}/work-shared/${PN}/${EXTENDPE}${PV}-${PR}')
+}
+
+python externalsrc_configure_prefunc() {
+    # Create desired symlinks
+    symlinks = (d.getVar('EXTERNALSRC_SYMLINKS', True) or '').split()
+    for symlink in symlinks:
+        symsplit = symlink.split(':', 1)
+        lnkfile = os.path.join(d.getVar('S', True), symsplit[0])
+        target = d.expand(symsplit[1])
+        if len(symsplit) > 1:
+            if os.path.islink(lnkfile):
+                # Link already exists, leave it if it points to the right location already
+                if os.readlink(lnkfile) == target:
+                    continue
+                os.unlink(lnkfile)
+            elif os.path.exists(lnkfile):
+                # File/dir exists with same name as link, just leave it alone
+                continue
+            os.symlink(target, lnkfile)
 }
 
 python externalsrc_compile_prefunc() {
     # Make it obvious that this is happening, since forgetting about it could lead to much confusion
     bb.plain('NOTE: %s: compiling from external source tree %s' % (d.getVar('PN', True), d.getVar('EXTERNALSRC', True)))
 }
+
+def srctree_hash_files(d):
+    import shutil
+    import subprocess
+    import tempfile
+
+    s_dir = d.getVar('EXTERNALSRC', True)
+    git_dir = os.path.join(s_dir, '.git')
+    oe_hash_file = os.path.join(git_dir, 'oe-devtool-tree-sha1')
+
+    ret = " "
+    if os.path.exists(git_dir):
+        with tempfile.NamedTemporaryFile(dir=git_dir, prefix='oe-devtool-index') as tmp_index:
+            # Clone index
+            shutil.copy2(os.path.join(git_dir, 'index'), tmp_index.name)
+            # Update our custom index
+            env = os.environ.copy()
+            env['GIT_INDEX_FILE'] = tmp_index.name
+            subprocess.check_output(['git', 'add', '.'], cwd=s_dir, env=env)
+            sha1 = subprocess.check_output(['git', 'write-tree'], cwd=s_dir, env=env)
+        with open(oe_hash_file, 'w') as fobj:
+            fobj.write(sha1)
+        ret = oe_hash_file + ':True'
+    else:
+        ret = d.getVar('EXTERNALSRC', True) + '/*:True'
+    return ret
diff --git a/yocto-poky/meta/classes/extrausers.bbclass b/yocto-poky/meta/classes/extrausers.bbclass
index faf57b1..43900f3 100644
--- a/yocto-poky/meta/classes/extrausers.bbclass
+++ b/yocto-poky/meta/classes/extrausers.bbclass
@@ -33,22 +33,22 @@
 		# this setting is actually a serial process. So we only retry once.
 		case $cmd in
 			useradd)
-				perform_useradd "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
+				perform_useradd "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
 				;;
 			groupadd)
-				perform_groupadd "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
+				perform_groupadd "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
 				;;
 			userdel)
-				perform_userdel "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
+				perform_userdel "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
 				;;
 			groupdel)
-				perform_groupdel "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
+				perform_groupdel "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
 				;;
 			usermod)
-				perform_usermod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
+				perform_usermod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
 				;;
 			groupmod)
-				perform_groupmod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts" 1
+				perform_groupmod "${IMAGE_ROOTFS}" "-R ${IMAGE_ROOTFS} $opts"
 				;;
 			*)
 				bbfatal "Invalid command in EXTRA_USERS_PARAMS: $cmd"
diff --git a/yocto-poky/meta/classes/gio-module-cache.bbclass b/yocto-poky/meta/classes/gio-module-cache.bbclass
new file mode 100644
index 0000000..91461b1
--- /dev/null
+++ b/yocto-poky/meta/classes/gio-module-cache.bbclass
@@ -0,0 +1,37 @@
+DEPENDS += "qemu-native"
+inherit qemu
+
+GIO_MODULE_PACKAGES ??= "${PN}"
+
+gio_module_cache_common() {
+if [ "x$D" != "x" ]; then
+    $INTERCEPT_DIR/postinst_intercept update_gio_module_cache ${PKG} \
+            mlprefix=${MLPREFIX} \
+            binprefix=${MLPREFIX} \
+            libdir=${libdir} \
+            base_libdir=${base_libdir} \
+            bindir=${bindir}
+else
+    ${libexecdir}/${MLPREFIX}gio-querymodules ${libdir}/gio/modules/
+fi
+}
+
+python populate_packages_append () {
+    packages = d.getVar('GIO_MODULE_PACKAGES', True).split()
+
+    for pkg in packages:
+        bb.note("adding gio-module-cache postinst and postrm scripts to %s" % pkg)
+
+        postinst = d.getVar('pkg_postinst_%s' % pkg, True)
+        if not postinst:
+            postinst = '#!/bin/sh\n'
+        postinst += d.getVar('gio_module_cache_common', True)
+        d.setVar('pkg_postinst_%s' % pkg, postinst)
+
+        postrm = d.getVar('pkg_postrm_%s' % pkg, True)
+        if not postrm:
+            postrm = '#!/bin/sh\n'
+        postrm += d.getVar('gio_module_cache_common', True)
+        d.setVar('pkg_postrm_%s' % pkg, postrm)
+}
+
diff --git a/yocto-poky/meta/classes/gnomebase.bbclass b/yocto-poky/meta/classes/gnomebase.bbclass
index d22ba5a..e5c6776 100644
--- a/yocto-poky/meta/classes/gnomebase.bbclass
+++ b/yocto-poky/meta/classes/gnomebase.bbclass
@@ -28,6 +28,3 @@
 	rm -f ${D}${datadir}/applications/*.cache
 }
 
-EXTRA_OECONF += "--disable-introspection"
-
-UNKNOWN_CONFIGURE_WHITELIST += "--disable-introspection"
diff --git a/yocto-poky/meta/classes/gobject-introspection-data.bbclass b/yocto-poky/meta/classes/gobject-introspection-data.bbclass
new file mode 100644
index 0000000..b1bdd26
--- /dev/null
+++ b/yocto-poky/meta/classes/gobject-introspection-data.bbclass
@@ -0,0 +1,9 @@
+# This variable is set to True if gobject-introspection-data is in
+# DISTRO_FEATURES and qemu-usermode is in MACHINE_FEATURES, and False otherwise.
+#
+# It should be used in recipes to determine whether introspection data should be built,
+# so that qemu use can be avoided when necessary.
+GI_DATA_ENABLED = "${@bb.utils.contains('DISTRO_FEATURES', 'gobject-introspection-data', \
+                      bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', 'True', 'False', d), 'False', d)}"
+
+
diff --git a/yocto-poky/meta/classes/gobject-introspection.bbclass b/yocto-poky/meta/classes/gobject-introspection.bbclass
new file mode 100644
index 0000000..2d73e40
--- /dev/null
+++ b/yocto-poky/meta/classes/gobject-introspection.bbclass
@@ -0,0 +1,37 @@
+# Inherit this class in recipes to enable building their introspection files
+
+# This sets up autoconf-based recipes to build introspection data (or not),
+# depending on distro and machine features (see gobject-introspection-data class).
+inherit gobject-introspection-data
+EXTRA_OECONF_prepend = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
+
+UNKNOWN_CONFIGURE_WHITELIST_append = " --enable-introspection --disable-introspection"
+
+# Generating introspection data depends on a combination of native and target
+# introspection tools, and qemu to run the target tools.
+DEPENDS_append = " gobject-introspection gobject-introspection-native qemu-native"
+
+# This is necessary for python scripts to succeed - distutils fails if these
+# are not set
+export BUILD_SYS
+export HOST_SYS
+export STAGING_INCDIR
+export STAGING_LIBDIR
+
+# This is used by introspection tools to find .gir includes
+export XDG_DATA_DIRS = "${STAGING_DATADIR}"
+
+do_configure_prepend_class-target () {
+    # introspection.m4 pre-packaged with upstream tarballs does not yet
+    # have our fixes
+    mkdir -p ${S}/m4
+    cp ${STAGING_DIR_TARGET}/${datadir}/aclocal/introspection.m4 ${S}/m4
+}
+
+# .typelib files are needed at runtime and so they go to the main package (so
+# they'll be together with libraries they support).
+FILES_${PN}_append = " ${libdir}/girepository-*/*.typelib" 
+    
+# .gir files go to dev package, as they're needed for developing (but not for
+# running) things that depends on introspection.
+FILES_${PN}-dev_append = " ${datadir}/gir-*/*.gir"
diff --git a/yocto-poky/meta/classes/grub-efi.bbclass b/yocto-poky/meta/classes/grub-efi.bbclass
index 9a4220a..4ce3d28 100644
--- a/yocto-poky/meta/classes/grub-efi.bbclass
+++ b/yocto-poky/meta/classes/grub-efi.bbclass
@@ -14,17 +14,21 @@
 # ${APPEND} - an override list of append strings for each label
 # ${GRUB_OPTS} - additional options to add to the config, ';' delimited # (optional)
 # ${GRUB_TIMEOUT} - timeout before executing the deault label (optional)
+# ${GRUB_ROOT} - grub's root device.
 
 do_bootimg[depends] += "${MLPREFIX}grub-efi:do_deploy"
 do_bootdirectdisk[depends] += "${MLPREFIX}grub-efi:do_deploy"
 
 GRUB_SERIAL ?= "console=ttyS0,115200"
-GRUBCFG = "${S}/grub.cfg"
+GRUB_CFG_VM = "${S}/grub_vm.cfg"
+GRUB_CFG_LIVE = "${S}/grub_live.cfg"
 GRUB_TIMEOUT ?= "10"
 #FIXME: build this from the machine config
 GRUB_OPTS ?= "serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"
 
 EFIDIR = "/EFI/BOOT"
+GRUB_ROOT ?= "${ROOT}"
+APPEND ?= ""
 
 # Need UUID utility code.
 inherit fs-uuid
@@ -42,7 +46,7 @@
 	fi
 	install -m 0644 ${DEPLOY_DIR_IMAGE}/${GRUB_IMAGE} ${DEST}${EFIDIR}
 
-	install -m 0644 ${GRUBCFG} ${DEST}${EFIDIR}/grub.cfg
+	install -m 0644 ${GRUB_CFG} ${DEST}${EFIDIR}/grub.cfg
 }
 
 efi_iso_populate() {
@@ -82,9 +86,9 @@
         bb.debug(1, "No labels, nothing to do")
         return
 
-    cfile = d.getVar('GRUBCFG', True)
+    cfile = d.getVar('GRUB_CFG', True)
     if not cfile:
-        raise bb.build.FuncFailed('Unable to read GRUBCFG')
+        raise bb.build.FuncFailed('Unable to read GRUB_CFG')
 
     try:
          cfgfile = file(cfile, 'w')
@@ -106,6 +110,10 @@
     else:
         cfgfile.write('timeout=50\n')
 
+    root = d.getVar('GRUB_ROOT', True)
+    if not root:
+        raise bb.build.FuncFailed('GRUB_ROOT not defined')
+
     if gfxserial == "1":
         btypes = [ [ " graphics console", "" ],
             [ " serial console", d.getVar('GRUB_SERIAL', True) or "" ] ]
@@ -129,6 +137,8 @@
                 lb = "install-efi"
             cfgfile.write('linux /vmlinuz LABEL=%s' % (lb))
 
+            cfgfile.write(' %s' % replace_rootfs_uuid(d, root))
+
             append = localdata.getVar('APPEND', True)
             initrd = localdata.getVar('INITRD', True)
 
diff --git a/yocto-poky/meta/classes/gtk-immodules-cache.bbclass b/yocto-poky/meta/classes/gtk-immodules-cache.bbclass
index 0a6316d..c099cd3 100644
--- a/yocto-poky/meta/classes/gtk-immodules-cache.bbclass
+++ b/yocto-poky/meta/classes/gtk-immodules-cache.bbclass
@@ -10,47 +10,51 @@
 
 gtk_immodule_cache_postinst() {
 if [ "x$D" != "x" ]; then
-    for maj_ver in 2 3; do
-        if [ -x $D${bindir}/gtk-query-immodules-$maj_ver.0 ]; then
-            IMFILES=$(ls $D${libdir}/gtk-$maj_ver.0/*/immodules/*.so)
-            ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-$maj_ver.0')} \
-                $IMFILES > $D/etc/gtk-$maj_ver.0/gtk.immodules 2>/dev/null &&
-                sed -i -e "s:$D::" $D/etc/gtk-$maj_ver.0/gtk.immodules
-
-            [ $? -ne 0 ] && exit 1
+        if [ -x $D${bindir}/gtk-query-immodules-2.0 ]; then
+            IMFILES=$(ls $D${libdir}/gtk-2.0/*/immodules/*.so)
+            ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-2.0')} \
+                $IMFILES > $D${libdir}/gtk-2.0/2.10.0/immodules.cache 2>/dev/null &&
+                sed -i -e "s:$D::" $D${libdir}/gtk-2.0/2.10.0/immodules.cache
+        elif [ -x $D${bindir}/gtk-query-immodules-3.0 ]; then
+            IMFILES=$(ls $D${libdir}/gtk-3.0/*/immodules/*.so)
+            ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-3.0')} \
+                $IMFILES > $D${libdir}/gtk-3.0/3.0.0/immodules.cache 2>/dev/null &&
+                sed -i -e "s:$D::" $D${libdir}/gtk-3.0/3.0.0/immodules.cache
         fi
-    done
 
+    [ $? -ne 0 ] && exit 1
     exit 0
 fi
 if [ ! -z `which gtk-query-immodules-2.0` ]; then
-    gtk-query-immodules-2.0 > /etc/gtk-2.0/gtk.immodules
+    gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
 fi
 if [ ! -z `which gtk-query-immodules-3.0` ]; then
-    gtk-query-immodules-3.0 > /etc/gtk-3.0/gtk.immodules
+    gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
 fi
 }
 
 gtk_immodule_cache_postrm() {
 if [ "x$D" != "x" ]; then
-    for maj_ver in 2 3; do
-        if [ -x $D${bindir}/gtk-query-immodules-$maj_ver.0 ]; then
-            IMFILES=$(ls $D${libdir}/gtk-$maj_ver.0/*/immodules/*.so)
-            ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-$maj_ver.0')} \
-                $IMFILES > $D/etc/gtk-$maj_ver.0/gtk.immodules 2>/dev/null &&
-                sed -i -e "s:$D::" $D/etc/gtk-$maj_ver.0/gtk.immodules
-
-            [ $? -ne 0 ] && exit 1
+        if [ -x $D${bindir}/gtk-query-immodules-2.0 ]; then
+            IMFILES=$(ls $D${libdir}/gtk-2.0/*/immodules/*.so)
+            ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-2.0')} \
+                $IMFILES > $D${libdir}/gtk-2.0/2.10.0/immodules.cache 2>/dev/null &&
+                sed -i -e "s:$D::" $D${libdir}/gtk-2.0/2.10.0/immodules.cache
+        elif [ -x $D${bindir}/gtk-query-immodules-3.0 ]; then
+            IMFILES=$(ls $D${libdir}/gtk-3.0/*/immodules/*.so)
+            ${@qemu_run_binary(d, '$D', '${bindir}/gtk-query-immodules-3.0')} \
+                $IMFILES > $D${libdir}/gtk-3.0/3.0.0/immodules.cache 2>/dev/null &&
+                sed -i -e "s:$D::" $D${libdir}/gtk-3.0/3.0.0/immodules.cache
         fi
-    done
 
+    [ $? -ne 0 ] && exit 1
     exit 0
 fi
 if [ ! -z `which gtk-query-immodules-2.0` ]; then
-    gtk-query-immodules-2.0 > /etc/gtk-2.0/gtk.immodules
+    gtk-query-immodules-2.0 > ${libdir}/gtk-2.0/2.10.0/immodules.cache
 fi
 if [ ! -z `which gtk-query-immodules-3.0` ]; then
-    gtk-query-immodules-3.0 > /etc/gtk-3.0/gtk.immodules
+    gtk-query-immodules-3.0 > ${libdir}/gtk-3.0/3.0.0/immodules.cache
 fi
 }
 
diff --git a/yocto-poky/meta/classes/gummiboot.bbclass b/yocto-poky/meta/classes/gummiboot.bbclass
index 9a97ac1..1ebb946 100644
--- a/yocto-poky/meta/classes/gummiboot.bbclass
+++ b/yocto-poky/meta/classes/gummiboot.bbclass
@@ -4,7 +4,7 @@
 
 # gummiboot.bbclass - equivalent of grub-efi.bbclass
 # Set EFI_PROVIDER = "gummiboot" to use gummiboot on your live images instead of grub-efi
-# (images built by bootimage.bbclass or boot-directdisk.bbclass)
+# (images built by image-live.bbclass or image-vm.bbclass)
 
 do_bootimg[depends] += "${MLPREFIX}gummiboot:do_deploy"
 do_bootdirectdisk[depends] += "${MLPREFIX}gummiboot:do_deploy"
diff --git a/yocto-poky/meta/classes/icecc.bbclass b/yocto-poky/meta/classes/icecc.bbclass
index 61b8bb1..e1c06c4 100644
--- a/yocto-poky/meta/classes/icecc.bbclass
+++ b/yocto-poky/meta/classes/icecc.bbclass
@@ -63,7 +63,7 @@
     Create Symlinks for the icecc in the staging directory
     """
     staging = os.path.join(d.expand('${STAGING_BINDIR}'), "ice")
-    if icc_is_kernel(bb, d):
+    if icecc_is_kernel(bb, d):
         staging += "-kernel"
 
     #check if the icecc path is set by the user
@@ -90,13 +90,13 @@
 
     return staging
 
-def use_icc(bb,d):
+def use_icecc(bb,d):
     if d.getVar('ICECC_DISABLED', False) == "1":
         # don't even try it, when explicitly disabled
         return "no"
 
     # allarch recipes don't use compiler
-    if icc_is_allarch(bb, d):
+    if icecc_is_allarch(bb, d):
         return "no"
 
     pn = d.getVar('PN', True)
@@ -137,29 +137,29 @@
 
     return "yes"
 
-def icc_is_allarch(bb, d):
-    return d.getVar("PACKAGE_ARCH", False) == "all"
+def icecc_is_allarch(bb, d):
+    return d.getVar("PACKAGE_ARCH", True) == "all" or bb.data.inherits_class('allarch', d)
 
-def icc_is_kernel(bb, d):
+def icecc_is_kernel(bb, d):
     return \
         bb.data.inherits_class("kernel", d);
 
-def icc_is_native(bb, d):
+def icecc_is_native(bb, d):
     return \
         bb.data.inherits_class("cross", d) or \
         bb.data.inherits_class("native", d);
 
 # Don't pollute allarch signatures with TARGET_FPU
-icc_version[vardepsexclude] += "TARGET_FPU"
-def icc_version(bb, d):
-    if use_icc(bb, d) == "no":
+icecc_version[vardepsexclude] += "TARGET_FPU"
+def icecc_version(bb, d):
+    if use_icecc(bb, d) == "no":
         return ""
 
     parallel = d.getVar('ICECC_PARALLEL_MAKE', False) or ""
     if not d.getVar('PARALLEL_MAKE', False) == "" and parallel:
         d.setVar("PARALLEL_MAKE", parallel)
 
-    if icc_is_native(bb, d):
+    if icecc_is_native(bb, d):
         archive_name = "local-host-env"
     elif d.expand('${HOST_PREFIX}') == "":
         bb.fatal(d.expand("${PN}"), " NULL prefix")
@@ -169,7 +169,7 @@
         target_sys = d.expand('${TARGET_SYS}')
         float = d.getVar('TARGET_FPU', False) or "hard"
         archive_name = prefix + distro + "-"        + target_sys + "-" + float
-        if icc_is_kernel(bb, d):
+        if icecc_is_kernel(bb, d):
             archive_name += "-kernel"
 
     import socket
@@ -178,29 +178,29 @@
 
     return tar_file
 
-def icc_path(bb,d):
-    if use_icc(bb, d) == "no":
+def icecc_path(bb,d):
+    if use_icecc(bb, d) == "no":
         # don't create unnecessary directories when icecc is disabled
         return
 
-    if icc_is_kernel(bb, d):
+    if icecc_is_kernel(bb, d):
         return create_path( [get_cross_kernel_cc(bb,d), ], bb, d)
 
     else:
         prefix = d.expand('${HOST_PREFIX}')
         return create_path( [prefix+"gcc", prefix+"g++"], bb, d)
 
-def icc_get_external_tool(bb, d, tool):
+def icecc_get_external_tool(bb, d, tool):
     external_toolchain_bindir = d.expand('${EXTERNAL_TOOLCHAIN}${bindir_cross}')
     target_prefix = d.expand('${TARGET_PREFIX}')
     return os.path.join(external_toolchain_bindir, '%s%s' % (target_prefix, tool))
 
 # Don't pollute native signatures with target TUNE_PKGARCH through STAGING_BINDIR_TOOLCHAIN
-icc_get_tool[vardepsexclude] += "STAGING_BINDIR_TOOLCHAIN"
-def icc_get_tool(bb, d, tool):
-    if icc_is_native(bb, d):
+icecc_get_tool[vardepsexclude] += "STAGING_BINDIR_TOOLCHAIN"
+def icecc_get_tool(bb, d, tool):
+    if icecc_is_native(bb, d):
         return bb.utils.which(os.getenv("PATH"), tool)
-    elif icc_is_kernel(bb, d):
+    elif icecc_is_kernel(bb, d):
         return bb.utils.which(os.getenv("PATH"), get_cross_kernel_cc(bb, d))
     else:
         ice_dir = d.expand('${STAGING_BINDIR_TOOLCHAIN}')
@@ -209,17 +209,17 @@
         if os.path.isfile(tool_bin):
             return tool_bin
         else:
-            external_tool_bin = icc_get_external_tool(bb, d, tool)
+            external_tool_bin = icecc_get_external_tool(bb, d, tool)
             if os.path.isfile(external_tool_bin):
                 return external_tool_bin
             else:
                 return ""
 
-def icc_get_and_check_tool(bb, d, tool):
+def icecc_get_and_check_tool(bb, d, tool):
     # Check that g++ or gcc is not a symbolic link to icecc binary in
     # PATH or icecc-create-env script will silently create an invalid
     # compiler environment package.
-    t = icc_get_tool(bb, d, tool)
+    t = icecc_get_tool(bb, d, tool)
     if t and os.popen("readlink -f %s" % t).read()[:-1] == get_icecc(d):
         bb.error("%s is a symlink to %s in PATH and this prevents icecc from working" % (t, get_icecc(d)))
         return ""
@@ -246,27 +246,27 @@
     return
 
 set_icecc_env() {
-    if [ "${@use_icc(bb, d)}" = "no" ]
+    if [ "${@use_icecc(bb, d)}" = "no" ]
     then
         return
     fi
-    ICECC_VERSION="${@icc_version(bb, d)}"
+    ICECC_VERSION="${@icecc_version(bb, d)}"
     if [ "x${ICECC_VERSION}" = "x" ]
     then
         bbwarn "Cannot use icecc: could not get ICECC_VERSION"
         return
     fi
 
-    ICE_PATH="${@icc_path(bb, d)}"
+    ICE_PATH="${@icecc_path(bb, d)}"
     if [ "x${ICE_PATH}" = "x" ]
     then
         bbwarn "Cannot use icecc: could not get ICE_PATH"
         return
     fi
 
-    ICECC_CC="${@icc_get_and_check_tool(bb, d, "gcc")}"
-    ICECC_CXX="${@icc_get_and_check_tool(bb, d, "g++")}"
-    # cannot use icc_get_and_check_tool here because it assumes as without target_sys prefix
+    ICECC_CC="${@icecc_get_and_check_tool(bb, d, "gcc")}"
+    ICECC_CXX="${@icecc_get_and_check_tool(bb, d, "g++")}"
+    # cannot use icecc_get_and_check_tool here because it assumes as without target_sys prefix
     ICECC_WHICH_AS="${@bb.utils.which(os.getenv('PATH'), 'as')}"
     if [ ! -x "${ICECC_CC}" -o ! -x "${ICECC_CXX}" ]
     then
diff --git a/yocto-poky/meta/classes/image-buildinfo.bbclass b/yocto-poky/meta/classes/image-buildinfo.bbclass
index aa17cc8..197b242 100644
--- a/yocto-poky/meta/classes/image-buildinfo.bbclass
+++ b/yocto-poky/meta/classes/image-buildinfo.bbclass
@@ -19,19 +19,24 @@
     ret = ""
     for var in vars:
         value = d.getVar(var, True) or ""
-        if (d.getVarFlag(var, 'type') == "list"):
+        if (d.getVarFlag(var, 'type', True) == "list"):
             value = oe.utils.squashspaces(value)
         ret += "%s = %s\n" % (var, value)
     return ret.rstrip('\n')
 
 # Gets git branch's status (clean or dirty)
 def get_layer_git_status(path):
-    f = os.popen("cd %s; git diff --stat 2>&1 | tail -n 1" % path)
-    data = f.read()
-    if f.close() is None:
-        if len(data) != 0:
-            return "-- modified"
-    return ""
+    import subprocess
+    try:
+        subprocess.check_output("cd %s; PSEUDO_UNLOAD=1 git diff --quiet --no-ext-diff" % path,
+                                shell=True,
+                                stderr=subprocess.STDOUT)
+        return ""
+    except subprocess.CalledProcessError, ex:
+        # Silently treat errors as "modified", without checking for the
+        # (expected) return code 1 in a modified git repo. For example, we get
+        # output and a 129 return code when a layer isn't a git repo at all.
+        return "-- modified"
 
 # Returns layer revisions along with their respective status
 def get_layer_revs(d):
@@ -53,17 +58,21 @@
         return image_buildinfo_outputvars(vars, listvars, d)
 
 # Write build information to target filesystem
-buildinfo () {
-cat > ${IMAGE_ROOTFS}${sysconfdir}/build << END
------------------------
+python buildinfo () {
+    with open(d.expand('${IMAGE_ROOTFS}${sysconfdir}/build'), 'w') as build:
+        build.writelines((
+            '''-----------------------
 Build Configuration:  |
 -----------------------
-${@buildinfo_target(d)}
+''',
+            buildinfo_target(d),
+            '''
 -----------------------
-Layer Revisions:      |   
+Layer Revisions:      |
 -----------------------
-${@get_layer_revs(d)}
-END
+''',
+            get_layer_revs(d)
+       ))
 }
 
 IMAGE_PREPROCESS_COMMAND += "buildinfo;"
diff --git a/yocto-poky/meta/classes/image-live.bbclass b/yocto-poky/meta/classes/image-live.bbclass
index 23e4a5c..c8a8610 100644
--- a/yocto-poky/meta/classes/image-live.bbclass
+++ b/yocto-poky/meta/classes/image-live.bbclass
@@ -1,18 +1,284 @@
+# Copyright (C) 2004, Advanced Micro Devices, Inc.  All Rights Reserved
+# Released under the MIT license (see packages/COPYING)
 
-AUTO_SYSLINUXCFG = "1"
-INITRD_IMAGE ?= "core-image-minimal-initramfs"
-INITRD ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE}-${MACHINE}.cpio.gz"
-SYSLINUX_ROOT ?= "root=/dev/ram0"
-SYSLINUX_TIMEOUT ?= "50"
-SYSLINUX_LABELS ?= "boot install"
-LABELS_append = " ${SYSLINUX_LABELS} "
+# Creates a bootable image using syslinux, your kernel and an optional
+# initrd
 
-ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_BASENAME}-${MACHINE}.ext4"
+#
+# End result is two things:
+#
+# 1. A .hddimg file which is an msdos filesystem containing syslinux, a kernel,
+# an initrd and a rootfs image. These can be written to harddisks directly and
+# also booted on USB flash disks (write them there with dd).
+#
+# 2. A CD .iso image
 
-do_bootimg[depends] += "${INITRD_IMAGE}:do_rootfs"
-do_bootimg[depends] += "${PN}:do_rootfs"
+# Boot process is that the initrd will boot and process which label was selected
+# in syslinux. Actions based on the label are then performed (e.g. installing to
+# an hdd)
 
-inherit bootimg
+# External variables (also used by syslinux.bbclass)
+# ${INITRD} - indicates a list of filesystem images to concatenate and use as an initrd (optional)
+# ${COMPRESSISO} - Transparent compress ISO, reduce size ~40% if set to 1
+# ${NOISO}  - skip building the ISO image if set to 1
+# ${NOHDD}  - skip building the HDD image if set to 1
+# ${HDDIMG_ID} - FAT image volume-id
+# ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional)
+
+inherit live-vm-common
+
+do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \
+                        mtools-native:do_populate_sysroot \
+                        cdrtools-native:do_populate_sysroot \
+                        virtual/kernel:do_deploy \
+                        ${MLPREFIX}syslinux:do_populate_sysroot \
+                        syslinux-native:do_populate_sysroot \
+                        ${@oe.utils.ifelse(d.getVar('COMPRESSISO', False),'zisofs-tools-native:do_populate_sysroot','')} \
+                        ${PN}:do_image_ext4 \
+                        "
+
+
+LABELS_LIVE ?= "boot install"
+ROOT_LIVE ?= "root=/dev/ram0"
+INITRD_IMAGE_LIVE ?= "core-image-minimal-initramfs"
+INITRD_LIVE ?= "${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_LIVE}-${MACHINE}.cpio.gz"
+
+ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.ext4"
 
 IMAGE_TYPEDEP_live = "ext4"
-IMAGE_TYPES_MASKED += "live"
+IMAGE_TYPEDEP_iso = "ext4"
+IMAGE_TYPEDEP_hddimg = "ext4"
+IMAGE_TYPES_MASKED += "live hddimg iso"
+
+python() {
+    image_b = d.getVar('IMAGE_BASENAME', True)
+    initrd_i = d.getVar('INITRD_IMAGE_LIVE', True)
+    if image_b == initrd_i:
+        bb.error('INITRD_IMAGE_LIVE %s cannot use image live, hddimg or iso.' % initrd_i)
+        bb.fatal('Check IMAGE_FSTYPES and INITRAMFS_FSTYPES settings.')
+    else:
+        d.appendVarFlag('do_bootimg', 'depends', ' %s:do_image_complete' % initrd_i)
+}
+
+HDDDIR = "${S}/hddimg"
+ISODIR = "${S}/iso"
+EFIIMGDIR = "${S}/efi_img"
+COMPACT_ISODIR = "${S}/iso.z"
+COMPRESSISO ?= "0"
+
+ISOLINUXDIR ?= "/isolinux"
+ISO_BOOTIMG = "isolinux/isolinux.bin"
+ISO_BOOTCAT = "isolinux/boot.cat"
+MKISOFS_OPTIONS = "-no-emul-boot -boot-load-size 4 -boot-info-table"
+
+BOOTIMG_VOLUME_ID   ?= "boot"
+BOOTIMG_EXTRA_SPACE ?= "512"
+
+populate_live() {
+    populate_kernel $1
+	if [ -s "${ROOTFS}" ]; then
+		install -m 0644 ${ROOTFS} $1/rootfs.img
+	fi
+}
+
+build_iso() {
+	# Only create an ISO if we have an INITRD and NOISO was not set
+	if [ -z "${INITRD}" ] || [ "${NOISO}" = "1" ]; then
+		bbnote "ISO image will not be created."
+		return
+	fi
+	# ${INITRD} is a list of multiple filesystem images
+	for fs in ${INITRD}
+	do
+		if [ ! -s "$fs" ]; then
+			bbnote "ISO image will not be created. $fs is invalid."
+			return
+		fi
+	done
+
+	populate_live ${ISODIR}
+
+	if [ "${PCBIOS}" = "1" ]; then
+		syslinux_iso_populate ${ISODIR}
+	fi
+	if [ "${EFI}" = "1" ]; then
+		efi_iso_populate ${ISODIR}
+		build_fat_img ${EFIIMGDIR} ${ISODIR}/efi.img
+	fi
+
+	# EFI only
+	if [ "${PCBIOS}" != "1" ] && [ "${EFI}" = "1" ] ; then
+		# Work around bug in isohybrid where it requires isolinux.bin
+		# In the boot catalog, even though it is not used
+		mkdir -p ${ISODIR}/${ISOLINUXDIR}
+		install -m 0644 ${STAGING_DATADIR}/syslinux/isolinux.bin ${ISODIR}${ISOLINUXDIR}
+	fi
+
+	if [ "${COMPRESSISO}" = "1" ] ; then
+		# create compact directory, compress iso
+		mkdir -p ${COMPACT_ISODIR}
+		mkzftree -z 9 -p 4 -F ${ISODIR}/rootfs.img ${COMPACT_ISODIR}/rootfs.img
+
+		# move compact iso to iso, then remove compact directory
+		mv ${COMPACT_ISODIR}/rootfs.img ${ISODIR}/rootfs.img
+		rm -Rf ${COMPACT_ISODIR}
+		mkisofs_compress_opts="-R -z -D -l"
+	else
+		mkisofs_compress_opts="-r"
+	fi
+
+	# Check the size of ${ISODIR}/rootfs.img, use mkisofs -iso-level 3
+	# when it exceeds 3.8GB, the specification is 4G - 1 bytes, we need
+	# leave a few space for other files.
+	mkisofs_iso_level=""
+
+        if [ -n "${ROOTFS}" ] && [ -s "${ROOTFS}" ]; then
+		rootfs_img_size=`stat -c '%s' ${ISODIR}/rootfs.img`
+		# 4080218931 = 3.8 * 1024 * 1024 * 1024
+		if [ $rootfs_img_size -gt 4080218931 ]; then
+			bbnote "${ISODIR}/rootfs.img execeeds 3.8GB, using '-iso-level 3' for mkisofs"
+			mkisofs_iso_level="-iso-level 3"
+		fi
+	fi
+
+	if [ "${PCBIOS}" = "1" ] && [ "${EFI}" != "1" ] ; then
+		# PCBIOS only media
+		mkisofs -V ${BOOTIMG_VOLUME_ID} \
+		        -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
+			-b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
+			$mkisofs_compress_opts \
+			${MKISOFS_OPTIONS} $mkisofs_iso_level ${ISODIR}
+	else
+		# EFI only OR EFI+PCBIOS
+		mkisofs -A ${BOOTIMG_VOLUME_ID} -V ${BOOTIMG_VOLUME_ID} \
+		        -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso \
+			-b ${ISO_BOOTIMG} -c ${ISO_BOOTCAT} \
+			$mkisofs_compress_opts ${MKISOFS_OPTIONS} $mkisofs_iso_level \
+			-eltorito-alt-boot -eltorito-platform efi \
+			-b efi.img -no-emul-boot \
+			${ISODIR}
+		isohybrid_args="-u"
+	fi
+
+	isohybrid $isohybrid_args ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.iso
+}
+
+build_fat_img() {
+	FATSOURCEDIR=$1
+	FATIMG=$2
+
+	# Calculate the size required for the final image including the
+	# data and filesystem overhead.
+	# Sectors: 512 bytes
+	#  Blocks: 1024 bytes
+
+	# Determine the sector count just for the data
+	SECTORS=$(expr $(du --apparent-size -ks ${FATSOURCEDIR} | cut -f 1) \* 2)
+
+	# Account for the filesystem overhead. This includes directory
+	# entries in the clusters as well as the FAT itself.
+	# Assumptions:
+	#   FAT32 (12 or 16 may be selected by mkdosfs, but the extra
+	#   padding will be minimal on those smaller images and not
+	#   worth the logic here to caclulate the smaller FAT sizes)
+	#   < 16 entries per directory
+	#   8.3 filenames only
+
+	# 32 bytes per dir entry
+	DIR_BYTES=$(expr $(find ${FATSOURCEDIR} | tail -n +2 | wc -l) \* 32)
+	# 32 bytes for every end-of-directory dir entry
+	DIR_BYTES=$(expr $DIR_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 32))
+	# 4 bytes per FAT entry per sector of data
+	FAT_BYTES=$(expr $SECTORS \* 4)
+	# 4 bytes per FAT entry per end-of-cluster list
+	FAT_BYTES=$(expr $FAT_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 4))
+
+	# Use a ceiling function to determine FS overhead in sectors
+	DIR_SECTORS=$(expr $(expr $DIR_BYTES + 511) / 512)
+	# There are two FATs on the image
+	FAT_SECTORS=$(expr $(expr $(expr $FAT_BYTES + 511) / 512) \* 2)
+	SECTORS=$(expr $SECTORS + $(expr $DIR_SECTORS + $FAT_SECTORS))
+
+	# Determine the final size in blocks accounting for some padding
+	BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE})
+
+	# Ensure total sectors is an integral number of sectors per
+	# track or mcopy will complain. Sectors are 512 bytes, and we
+	# generate images with 32 sectors per track. This calculation is
+	# done in blocks, thus the mod by 16 instead of 32.
+	BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
+
+	# mkdosfs will sometimes use FAT16 when it is not appropriate,
+	# resulting in a boot failure from SYSLINUX. Use FAT32 for
+	# images larger than 512MB, otherwise let mkdosfs decide.
+	if [ $(expr $BLOCKS / 1024) -gt 512 ]; then
+		FATSIZE="-F 32"
+	fi
+
+	# mkdosfs will fail if ${FATIMG} exists. Since we are creating an
+	# new image, it is safe to delete any previous image.
+	if [ -e ${FATIMG} ]; then
+		rm ${FATIMG}
+	fi
+
+	if [ -z "${HDDIMG_ID}" ]; then
+		mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} \
+			${BLOCKS}
+	else
+		mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} \
+		${BLOCKS} -i ${HDDIMG_ID}
+	fi
+
+	# Copy FATSOURCEDIR recursively into the image file directly
+	mcopy -i ${FATIMG} -s ${FATSOURCEDIR}/* ::/
+}
+
+build_hddimg() {
+	# Create an HDD image
+	if [ "${NOHDD}" != "1" ] ; then
+		populate_live ${HDDDIR}
+
+		if [ "${PCBIOS}" = "1" ]; then
+			syslinux_hddimg_populate ${HDDDIR}
+		fi
+		if [ "${EFI}" = "1" ]; then
+			efi_hddimg_populate ${HDDDIR}
+		fi
+
+		# Check the size of ${HDDDIR}/rootfs.img, error out if it
+		# exceeds 4GB, it is the single file's max size of FAT fs.
+		if [ -f ${HDDDIR}/rootfs.img ]; then
+			rootfs_img_size=`stat -c '%s' ${HDDDIR}/rootfs.img`
+			max_size=`expr 4 \* 1024 \* 1024 \* 1024`
+			if [ $rootfs_img_size -gt $max_size ]; then
+				bberror "${HDDDIR}/rootfs.img execeeds 4GB,"
+				bberror "this doesn't work on FAT filesystem, you can try either of:"
+				bberror "1) Reduce the size of rootfs.img"
+				bbfatal "2) Use iso, vmdk or vdi to instead of hddimg\n"
+			fi
+		fi
+
+		build_fat_img ${HDDDIR} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
+
+		if [ "${PCBIOS}" = "1" ]; then
+			syslinux_hddimg_install
+		fi
+
+		chmod 644 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
+	fi
+}
+
+python do_bootimg() {
+    set_live_vm_vars(d, 'LIVE')
+    if d.getVar("PCBIOS", True) == "1":
+        bb.build.exec_func('build_syslinux_cfg', d)
+    if d.getVar("EFI", True) == "1":
+        bb.build.exec_func('build_efi_cfg', d)
+    bb.build.exec_func('build_hddimg', d)
+    bb.build.exec_func('build_iso', d)
+    bb.build.exec_func('create_symlinks', d)
+}
+do_bootimg[subimages] = "hddimg iso"
+do_bootimg[imgsuffix] = "."
+
+addtask bootimg before do_image_complete
diff --git a/yocto-poky/meta/classes/image-mklibs.bbclass b/yocto-poky/meta/classes/image-mklibs.bbclass
index cfb3ffc..5f6df1b 100644
--- a/yocto-poky/meta/classes/image-mklibs.bbclass
+++ b/yocto-poky/meta/classes/image-mklibs.bbclass
@@ -2,39 +2,24 @@
 
 IMAGE_PREPROCESS_COMMAND += "mklibs_optimize_image; "
 
+inherit linuxloader
+
 mklibs_optimize_image_doit() {
 	rm -rf ${WORKDIR}/mklibs
 	mkdir -p ${WORKDIR}/mklibs/dest
 	cd ${IMAGE_ROOTFS}
 	du -bs > ${WORKDIR}/mklibs/du.before.mklibs.txt
-	for i in `find .`; do file $i; done \
-		| grep ELF \
-		| grep "LSB *executable" \
-		| grep "dynamically linked" \
-		| sed "s/:.*//" \
-		| sed "s+^\./++" \
-		> ${WORKDIR}/mklibs/executables.list
 
-	case ${TARGET_ARCH} in
-		powerpc | mips | mipsel | microblaze )
-			dynamic_loader="${base_libdir}/ld.so.1"
-			;;
-		powerpc64)
-			dynamic_loader="${base_libdir}/ld64.so.1"
-			;;
-		x86_64)
-			dynamic_loader="${base_libdir}/ld-linux-x86-64.so.2"
-			;;
-		i*86 )
-			dynamic_loader="${base_libdir}/ld-linux.so.2"
-			;;
-		arm )
-			dynamic_loader="${base_libdir}/ld-linux.so.3"
-			;;
-		* )
-			dynamic_loader="/unknown_dynamic_linker"
-			;;
-	esac
+	# Build a list of dynamically linked executable ELF files.
+	# Omit libc/libpthread as a special case because it has an interpreter
+	# but is primarily what we intend to strip down.
+	for i in `find . -type f -executable ! -name 'libc-*' ! -name 'libpthread-*'`; do
+		file $i | grep -q ELF || continue
+		${HOST_PREFIX}readelf -l $i | grep -q INTERP || continue
+		echo $i
+	done > ${WORKDIR}/mklibs/executables.list
+
+	dynamic_loader=$(linuxloader)
 
 	mklibs -v \
 		--ldlib ${dynamic_loader} \
diff --git a/yocto-poky/meta/classes/image-prelink.bbclass b/yocto-poky/meta/classes/image-prelink.bbclass
index d4bb3ae..4157df0 100644
--- a/yocto-poky/meta/classes/image-prelink.bbclass
+++ b/yocto-poky/meta/classes/image-prelink.bbclass
@@ -1,6 +1,12 @@
 do_rootfs[depends] += "prelink-native:do_populate_sysroot"
 
-IMAGE_PREPROCESS_COMMAND += "prelink_image; "
+IMAGE_PREPROCESS_COMMAND += "prelink_setup; prelink_image; "
+
+python prelink_setup () {
+    oe.utils.write_ld_so_conf(d)
+}
+
+inherit linuxloader
 
 prelink_image () {
 #	export PSEUDO_DEBUG=4
@@ -13,21 +19,36 @@
 
 	# We need a prelink conf on the filesystem, add one if it's missing
 	if [ ! -e ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf ]; then
-		cp ${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf \
+		cp ${STAGING_ETCDIR_NATIVE}/prelink.conf \
 			${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
 		dummy_prelink_conf=true;
 	else
 		dummy_prelink_conf=false;
 	fi
 
+	# We need a ld.so.conf with pathnames in,prelink conf on the filesystem, add one if it's missing
+	ldsoconf=${IMAGE_ROOTFS}${sysconfdir}/ld.so.conf
+	if [ -e $ldsoconf ]; then
+		cp $ldsoconf $ldsoconf.prelink
+	fi
+	cat ${STAGING_DIR_TARGET}${sysconfdir}/ld.so.conf >> $ldsoconf
+
+	dynamic_loader=$(linuxloader)
+
 	# prelink!
-	${STAGING_DIR_NATIVE}${sbindir_native}/prelink --root ${IMAGE_ROOTFS} -amR -N -c ${sysconfdir}/prelink.conf
+	${STAGING_SBINDIR_NATIVE}/prelink --root ${IMAGE_ROOTFS} -amR -N -c ${sysconfdir}/prelink.conf --dynamic-linker $dynamic_loader
 
 	# Remove the prelink.conf if we had to add it.
 	if [ "$dummy_prelink_conf" = "true" ]; then
 		rm -f ${IMAGE_ROOTFS}${sysconfdir}/prelink.conf
 	fi
 
+	if [ -e $ldsoconf.prelink ]; then
+		mv $ldsoconf.prelink $ldsoconf
+	else
+		rm $ldsoconf
+	fi
+
 	pre_prelink_size=`du -ks ${IMAGE_ROOTFS} | awk '{size = $1 ; print size }'`
 	echo "Size after prelinking $pre_prelink_size."
 }
diff --git a/yocto-poky/meta/classes/image-swab.bbclass b/yocto-poky/meta/classes/image-swab.bbclass
index 8931856..6b02cad 100644
--- a/yocto-poky/meta/classes/image-swab.bbclass
+++ b/yocto-poky/meta/classes/image-swab.bbclass
@@ -47,7 +47,7 @@
     # and cross packages which aren't swabber-native or one of its dependencies
     # I have ignored them for now...
     if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('nativesdk', d) and not bb.data.inherits_class('cross', d):
-        deps = (d.getVarFlag('do_setscene', 'depends') or "").split()
+        deps = (d.getVarFlag('do_setscene', 'depends', True) or "").split()
         deps.append('strace-native:do_populate_sysroot')
         d.setVarFlag('do_setscene', 'depends', " ".join(deps))
         logdir = d.expand("${TRACE_LOGDIR}")
diff --git a/yocto-poky/meta/classes/image-vm.bbclass b/yocto-poky/meta/classes/image-vm.bbclass
index 5ddd1cb..47f7326 100644
--- a/yocto-poky/meta/classes/image-vm.bbclass
+++ b/yocto-poky/meta/classes/image-vm.bbclass
@@ -1,19 +1,30 @@
+# image-vm.bbclass
+# (loosly based off image-live.bbclass Copyright (C) 2004, Advanced Micro Devices, Inc.)
+#
+# Create an image which can be placed directly onto a harddisk using dd and then
+# booted.
+#
+# This uses syslinux. extlinux would have been nice but required the ext2/3
+# partition to be mounted. grub requires to run itself as part of the install
+# process.
+#
+# The end result is a 512 boot sector populated with an MBR and partition table
+# followed by an msdos fat16 partition containing syslinux and a linux kernel
+# completed by the ext2/3 rootfs.
+#
+# We have to push the msdos parition table size > 16MB so fat 16 is used as parted
+# won't touch fat12 partitions.
 
-SYSLINUX_PROMPT ?= "0"
-SYSLINUX_LABELS = "boot"
-LABELS_append = " ${SYSLINUX_LABELS} "
+inherit live-vm-common
 
-# Using an initramfs is optional. Enable it by setting INITRD_IMAGE.
-INITRD_IMAGE ?= ""
-INITRD ?= "${@'${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE}-${MACHINE}.cpio.gz' if '${INITRD_IMAGE}' else ''}"
-do_bootdirectdisk[depends] += "${@'${INITRD_IMAGE}:do_rootfs' if '${INITRD_IMAGE}' else ''}"
-
-# need to define the dependency and the ROOTFS for directdisk
-do_bootdirectdisk[depends] += "${PN}:do_rootfs"
-ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_BASENAME}-${MACHINE}.ext4"
-
-# creating VM images relies on having a hddimg so ensure we inherit it here.
-inherit boot-directdisk
+do_bootdirectdisk[depends] += "dosfstools-native:do_populate_sysroot \
+                               virtual/kernel:do_deploy \
+                               syslinux:do_populate_sysroot \
+                               syslinux-native:do_populate_sysroot \
+                               parted-native:do_populate_sysroot \
+                               mtools-native:do_populate_sysroot \
+                               ${PN}:do_image_ext4 \
+                               "
 
 IMAGE_TYPEDEP_vmdk = "ext4"
 IMAGE_TYPEDEP_vdi = "ext4"
@@ -21,19 +32,133 @@
 IMAGE_TYPEDEP_hdddirect = "ext4"
 IMAGE_TYPES_MASKED += "vmdk vdi qcow2 hdddirect"
 
+ROOTFS ?= "${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.ext4"
+
+# Used by bootloader
+LABELS_VM ?= "boot"
+ROOT_VM ?= "root=/dev/sda2"
+# Using an initramfs is optional. Enable it by setting INITRD_IMAGE_VM.
+INITRD_IMAGE_VM ?= ""
+INITRD_VM ?= "${@'${DEPLOY_DIR_IMAGE}/${INITRD_IMAGE_VM}-${MACHINE}.cpio.gz' if '${INITRD_IMAGE_VM}' else ''}"
+do_bootdirectdisk[depends] += "${@'${INITRD_IMAGE_VM}:do_image_complete' if '${INITRD_IMAGE_VM}' else ''}"
+
+BOOTDD_VOLUME_ID   ?= "boot"
+BOOTDD_EXTRA_SPACE ?= "16384"
+
+DISK_SIGNATURE ?= "${DISK_SIGNATURE_GENERATED}"
+DISK_SIGNATURE[vardepsexclude] = "DISK_SIGNATURE_GENERATED"
+
+build_boot_dd() {
+	HDDDIR="${S}/hdd/boot"
+	HDDIMG="${S}/hdd.image"
+	IMAGE=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect
+
+	populate_kernel $HDDDIR
+
+	if [ "${PCBIOS}" = "1" ]; then
+		syslinux_hddimg_populate $HDDDIR
+	fi
+	if [ "${EFI}" = "1" ]; then
+		efi_hddimg_populate $HDDDIR
+	fi
+
+	BLOCKS=`du -bks $HDDDIR | cut -f 1`
+	BLOCKS=`expr $BLOCKS + ${BOOTDD_EXTRA_SPACE}`
+
+	# Ensure total sectors is an integral number of sectors per
+	# track or mcopy will complain. Sectors are 512 bytes, and we
+	# generate images with 32 sectors per track. This calculation is
+	# done in blocks, thus the mod by 16 instead of 32.
+	BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
+
+	# Remove it since mkdosfs would fail when it exists
+	rm -f $HDDIMG
+	mkdosfs -n ${BOOTDD_VOLUME_ID} -S 512 -C $HDDIMG $BLOCKS 
+	mcopy -i $HDDIMG -s $HDDDIR/* ::/
+
+	if [ "${PCBIOS}" = "1" ]; then
+		syslinux_hdddirect_install $HDDIMG
+	fi	
+	chmod 644 $HDDIMG
+
+	ROOTFSBLOCKS=`du -Lbks ${ROOTFS} | cut -f 1`
+	TOTALSIZE=`expr $BLOCKS + $ROOTFSBLOCKS`
+	END1=`expr $BLOCKS \* 1024`
+	END2=`expr $END1 + 512`
+	END3=`expr \( $ROOTFSBLOCKS \* 1024 \) + $END1`
+
+	echo $ROOTFSBLOCKS $TOTALSIZE $END1 $END2 $END3
+	rm -rf $IMAGE
+	dd if=/dev/zero of=$IMAGE bs=1024 seek=$TOTALSIZE count=1
+
+	parted $IMAGE mklabel msdos
+	parted $IMAGE mkpart primary fat16 0 ${END1}B
+	parted $IMAGE unit B mkpart primary ext2 ${END2}B ${END3}B
+	parted $IMAGE set 1 boot on 
+
+	parted $IMAGE print
+
+	awk "BEGIN { printf \"$(echo ${DISK_SIGNATURE} | fold -w 2 | tac | paste -sd '' | sed 's/\(..\)/\\x&/g')\" }" | \
+		dd of=$IMAGE bs=1 seek=440 conv=notrunc
+
+	OFFSET=`expr $END2 / 512`
+	if [ "${PCBIOS}" = "1" ]; then
+		dd if=${STAGING_DATADIR}/syslinux/mbr.bin of=$IMAGE conv=notrunc
+	fi
+
+	dd if=$HDDIMG of=$IMAGE conv=notrunc seek=1 bs=512
+	dd if=${ROOTFS} of=$IMAGE conv=notrunc seek=$OFFSET bs=512
+
+	cd ${DEPLOY_DIR_IMAGE}
+	rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
+	ln -s ${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
+} 
+
+python do_bootdirectdisk() {
+    validate_disk_signature(d)
+    set_live_vm_vars(d, 'VM')
+    if d.getVar("PCBIOS", True) == "1":
+        bb.build.exec_func('build_syslinux_cfg', d)
+    if d.getVar("EFI", True) == "1":
+        bb.build.exec_func('build_efi_cfg', d)
+    bb.build.exec_func('build_boot_dd', d)
+}
+
+def generate_disk_signature():
+    import uuid
+
+    signature = str(uuid.uuid4())[:8]
+
+    if signature != '00000000':
+        return signature
+    else:
+        return 'ffffffff'
+
+def validate_disk_signature(d):
+    import re
+
+    disk_signature = d.getVar("DISK_SIGNATURE", True)
+
+    if not re.match(r'^[0-9a-fA-F]{8}$', disk_signature):
+        bb.fatal("DISK_SIGNATURE '%s' must be an 8 digit hex string" % disk_signature)
+
+DISK_SIGNATURE_GENERATED := "${@generate_disk_signature()}"
+
+run_qemu_img (){
+    type="$1"
+    qemu-img convert -O $type ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.$type
+    ln -sf ${IMAGE_NAME}.$type ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.$type
+}
 create_vmdk_image () {
-    qemu-img convert -O vmdk ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.vmdk
-    ln -sf ${IMAGE_NAME}.vmdk ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.vmdk
+    run_qemu_img vmdk
 }
 
 create_vdi_image () {
-    qemu-img convert -O vdi ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.vdi
-    ln -sf ${IMAGE_NAME}.vdi ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.vdi
+    run_qemu_img vdi
 }
 
 create_qcow2_image () {
-    qemu-img convert -O qcow2 ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.qcow2
-    ln -sf ${IMAGE_NAME}.qcow2 ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.qcow2
+    run_qemu_img qcow2
 }
 
 python do_vmimg() {
@@ -45,6 +170,6 @@
         bb.build.exec_func('create_qcow2_image', d)
 }
 
-addtask vmimg after do_bootdirectdisk before do_build
+addtask bootdirectdisk before do_vmimg
+addtask vmimg after do_bootdirectdisk before do_image_complete
 do_vmimg[depends] += "qemu-native:do_populate_sysroot"
-
diff --git a/yocto-poky/meta/classes/image.bbclass b/yocto-poky/meta/classes/image.bbclass
index d2f8105..8bfd241 100644
--- a/yocto-poky/meta/classes/image.bbclass
+++ b/yocto-poky/meta/classes/image.bbclass
@@ -1,6 +1,9 @@
 inherit rootfs_${IMAGE_PKGTYPE}
 
-inherit populate_sdk_ext
+# Only Linux SDKs support populate_sdk_ext, fall back to populate_sdk
+# in the non-Linux SDK_OS case, such as mingw32
+SDKEXTCLASS ?= "${@['populate_sdk', 'populate_sdk_ext']['linux' in d.getVar("SDK_OS", True)]}"
+inherit ${SDKEXTCLASS}
 
 TOOLCHAIN_TARGET_TASK += "${PACKAGE_INSTALL}"
 TOOLCHAIN_TARGET_TASK_ATTEMPTONLY += "${PACKAGE_INSTALL_ATTEMPTONLY}"
@@ -30,6 +33,10 @@
 # rootfs bootstrap install
 ROOTFS_BOOTSTRAP_INSTALL = "${@bb.utils.contains("IMAGE_FEATURES", "package-management", "", "${ROOTFS_PKGMANAGE_BOOTSTRAP}",d)}"
 
+# These packages will be removed from a read-only rootfs after all other
+# packages have been installed
+ROOTFS_RO_UNNEEDED = "update-rc.d base-passwd shadow ${VIRTUAL-RUNTIME_update-alternatives} ${ROOTFS_BOOTSTRAP_INSTALL}"
+
 # packages to install from features
 FEATURE_INSTALL = "${@' '.join(oe.packagegroup.required_packages(oe.data.typed_value('IMAGE_FEATURES', d), d))}"
 FEATURE_INSTALL[vardepvalue] = "${FEATURE_INSTALL}"
@@ -94,43 +101,25 @@
 "
 do_rootfs[recrdeptask] += "do_packagedata"
 
-def command_variables(d):
-    return ['ROOTFS_POSTPROCESS_COMMAND','ROOTFS_PREPROCESS_COMMAND','ROOTFS_POSTINSTALL_COMMAND','OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','IMAGE_POSTPROCESS_COMMAND',
-            'IMAGE_PREPROCESS_COMMAND','ROOTFS_POSTPROCESS_COMMAND','POPULATE_SDK_POST_HOST_COMMAND','POPULATE_SDK_POST_TARGET_COMMAND','SDK_POSTPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS',         
-            'RPM_POSTPROCESS_COMMANDS']
+def rootfs_command_variables(d):
+    return ['ROOTFS_POSTPROCESS_COMMAND','ROOTFS_PREPROCESS_COMMAND','ROOTFS_POSTINSTALL_COMMAND','ROOTFS_POSTUNINSTALL_COMMAND','OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','IMAGE_POSTPROCESS_COMMAND',
+            'IMAGE_PREPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS','RPM_POSTPROCESS_COMMANDS','DEB_PREPROCESS_COMMANDS','DEB_POSTPROCESS_COMMANDS']
 
 python () {
-    variables = command_variables(d)
+    variables = rootfs_command_variables(d) + sdk_command_variables(d)
     for var in variables:
         if d.getVar(var, False):
             d.setVarFlag(var, 'func', '1')
 }
 
-def fstype_variables(d):
-    import oe.image
-
-    image = oe.image.Image(d)
-    alltypes, fstype_groups, cimages = image._get_image_types()
-    fstype_vars = set()
-    for fstype_group in fstype_groups:
-        for fstype in fstype_group:
-            fstype_vars.add('IMAGE_CMD_' + fstype)
-            if fstype in cimages:
-                for ctype in cimages[fstype]:
-                    fstype_vars.add('COMPRESS_CMD_' + ctype)
-
-    return sorted(fstype_vars)
-
 def rootfs_variables(d):
     from oe.rootfs import variable_depends
     variables = ['IMAGE_DEVICE_TABLES','BUILD_IMAGES_FROM_FEEDS','IMAGE_TYPES_MASKED','IMAGE_ROOTFS_ALIGNMENT','IMAGE_OVERHEAD_FACTOR','IMAGE_ROOTFS_SIZE','IMAGE_ROOTFS_EXTRA_SPACE',
-                 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','RM_OLD_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS','SDK_OS',
-                 'SDK_OUTPUT','SDKPATHNATIVE','SDKTARGETSYSROOT','SDK_DIR','SDK_VENDOR','SDKIMAGE_INSTALL_COMPLEMENTARY','SDK_PACKAGE_ARCHS','SDK_OUTPUT','SDKTARGETSYSROOT','MULTILIBRE_ALLOW_REP',
-                 'MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS','PACKAGE_ARCHS',
-                 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
-                 'COMPRESSIONTYPES', 'IMAGE_GEN_DEBUGFS']
-    variables.extend(fstype_variables(d))
-    variables.extend(command_variables(d))
+                 'IMAGE_ROOTFS_MAXSIZE','IMAGE_NAME','IMAGE_LINK_NAME','IMAGE_MANIFEST','DEPLOY_DIR_IMAGE','RM_OLD_IMAGE','IMAGE_FSTYPES','IMAGE_INSTALL_COMPLEMENTARY','IMAGE_LINGUAS',
+                 'MULTILIBRE_ALLOW_REP','MULTILIB_TEMP_ROOTFS','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS',
+                 'PACKAGE_ARCHS','PACKAGE_CLASSES','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','OVERRIDES','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI','INTERCEPT_DIR','USE_DEVFS',
+                 'COMPRESSIONTYPES', 'IMAGE_GEN_DEBUGFS', 'ROOTFS_RO_UNNEEDED']
+    variables.extend(rootfs_command_variables(d))
     variables.extend(variable_depends(d))
     return " ".join(variables)
 
@@ -183,7 +172,7 @@
     initramfs_image = d.getVar('INITRAMFS_IMAGE', True) or ""
     if initramfs_image != "":
         d.appendVarFlag('do_build', 'depends', " %s:do_bundle_initramfs" %  d.getVar('PN', True))
-        d.appendVarFlag('do_bundle_initramfs', 'depends', " %s:do_rootfs" % initramfs_image)
+        d.appendVarFlag('do_bundle_initramfs', 'depends', " %s:do_image_complete" % initramfs_image)
 }
 
 IMAGE_CLASSES += "image_types"
@@ -191,32 +180,6 @@
 
 IMAGE_POSTPROCESS_COMMAND ?= ""
 
-# Zap the root password if debug-tweaks feature is not enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password ; ",d)}'
-
-# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks is enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}'
-
-# Enable postinst logging if debug-tweaks is enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}'
-
-# Create /etc/timestamp during image construction to give a reasonably sane default time setting
-ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
-
-# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
-
-# Write manifest
-IMAGE_MANIFEST = "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.manifest"
-ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest ; "
-# Set default postinst log file
-POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
-# Set default target for systemd images
-SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains("IMAGE_FEATURES", "x11-base", "graphical.target", "multi-user.target", d)}'
-ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target; ", "", d)}'
-
-ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
-
 # some default locales
 IMAGE_LINGUAS ?= "de-de fr-fr en-gb"
 
@@ -226,80 +189,16 @@
 # aren't yet available.
 PSEUDO_PASSWD = "${IMAGE_ROOTFS}:${STAGING_DIR_NATIVE}"
 
-do_rootfs[dirs] = "${TOPDIR}"
-do_rootfs[lockfiles] += "${IMAGE_ROOTFS}.lock"
-do_rootfs[cleandirs] += "${S}"
-
-# Must call real_do_rootfs() from inside here, rather than as a separate
-# task, so that we have a single fakeroot context for the whole process.
-do_rootfs[umask] = "022"
-
-# A hook function to support read-only-rootfs IMAGE_FEATURES
-read_only_rootfs_hook () {
-	# Tweak the mount option and fs_passno for rootfs in fstab
-	sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
-
-	# If we're using openssh and the /etc/ssh directory has no pre-generated keys,
-	# we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
-	# and the keys under /var/run/ssh.
-	if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
-		if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
-			echo "SYSCONFDIR=/etc/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
-			echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
-		else
-			echo "SYSCONFDIR=/var/run/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
-			echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
-		fi
-	fi
-
-	# Also tweak the key location for dropbear in the same way.
-	if [ -d ${IMAGE_ROOTFS}/etc/dropbear ]; then
-		if [ -e ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ]; then
-			echo "DROPBEAR_RSAKEY_DIR=/etc/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
-		else
-			echo "DROPBEAR_RSAKEY_DIR=/var/lib/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
-		fi
-	fi
-
-
-	if ${@bb.utils.contains("DISTRO_FEATURES", "sysvinit", "true", "false", d)}; then
-		# Change the value of ROOTFS_READ_ONLY in /etc/default/rcS to yes
-		if [ -e ${IMAGE_ROOTFS}/etc/default/rcS ]; then
-			sed -i 's/ROOTFS_READ_ONLY=no/ROOTFS_READ_ONLY=yes/' ${IMAGE_ROOTFS}/etc/default/rcS
-		fi
-		# Run populate-volatile.sh at rootfs time to set up basic files
-		# and directories to support read-only rootfs.
-		if [ -x ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh ]; then
-			${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
-		fi
-	fi
-
-	if ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "true", "false", d)}; then
-	    # Update user database files so that services don't fail for a read-only systemd system
-	    for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd.conf ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd-remote.conf; do
-		[ -e $conffile ] || continue
-		grep -v "^#" $conffile | sed -e '/^$/d' | while read type name id comment; do
-		    if [ "$type" = "u" ]; then
-			useradd_params=""
-			[ "$id" != "-" ] && useradd_params="$useradd_params --uid $id"
-			[ "$comment" != "-" ] && useradd_params="$useradd_params --comment $comment"
-			useradd_params="$useradd_params --system $name"
-			eval useradd --root ${IMAGE_ROOTFS} $useradd_params || true
-		    elif [ "$type" = "g" ]; then
-			groupadd_params=""
-			[ "$id" != "-" ] && groupadd_params="$groupadd_params --gid $id"
-			groupadd_params="$groupadd_params --system $name"
-			eval groupadd --root ${IMAGE_ROOTFS} $groupadd_params || true
-		    fi
-		done
-	    done
-	fi
-}
+inherit rootfs-postcommands
 
 PACKAGE_EXCLUDE ??= ""
 PACKAGE_EXCLUDE[type] = "list"
 
-python rootfs_process_ignore() {
+fakeroot python do_rootfs () {
+    from oe.rootfs import create_rootfs
+    from oe.manifest import create_manifest
+
+    # Handle package exclusions
     excl_pkgs = d.getVar("PACKAGE_EXCLUDE", True).split()
     inst_pkgs = d.getVar("PACKAGE_INSTALL", True).split()
     inst_attempt_pkgs = d.getVar("PACKAGE_INSTALL_ATTEMPTONLY", True).split()
@@ -318,194 +217,312 @@
 
     d.setVar("PACKAGE_INSTALL", ' '.join(inst_pkgs))
     d.setVar("PACKAGE_INSTALL_ATTEMPTONLY", ' '.join(inst_attempt_pkgs))
-}
-do_rootfs[prefuncs] += "rootfs_process_ignore"
 
-# We have to delay the runtime_mapping_rename until just before rootfs runs
-# otherwise, the multilib renaming could step in and squash any fixups that
-# may have occurred.
-python rootfs_runtime_mapping() {
+    # Ensure we handle package name remapping
+    # We have to delay the runtime_mapping_rename until just before rootfs runs
+    # otherwise, the multilib renaming could step in and squash any fixups that
+    # may have occurred.
     pn = d.getVar('PN', True)
     runtime_mapping_rename("PACKAGE_INSTALL", pn, d)
     runtime_mapping_rename("PACKAGE_INSTALL_ATTEMPTONLY", pn, d)
     runtime_mapping_rename("BAD_RECOMMENDATIONS", pn, d)
-}
-do_rootfs[prefuncs] += "rootfs_runtime_mapping"
 
-fakeroot python do_rootfs () {
-    from oe.rootfs import create_rootfs
-    from oe.image import create_image
-    from oe.manifest import create_manifest
-
-    # generate the initial manifest
+    # Generate the initial manifest
     create_manifest(d)
 
-    # generate rootfs
+    # Generate rootfs
     create_rootfs(d)
+}
+do_rootfs[dirs] = "${TOPDIR}"
+do_rootfs[cleandirs] += "${S}"
+do_rootfs[umask] = "022"
+addtask rootfs before do_build
 
-    # generate final images
-    create_image(d)
+fakeroot python do_image () {
+    from oe.utils import execute_pre_post_process
+
+    pre_process_cmds = d.getVar("IMAGE_PREPROCESS_COMMAND", True)
+
+    execute_pre_post_process(d, pre_process_cmds)
+}
+do_image[dirs] = "${TOPDIR}"
+do_image[umask] = "022"
+addtask do_image after do_rootfs before do_build
+
+fakeroot python do_image_complete () {
+    from oe.utils import execute_pre_post_process
+
+    post_process_cmds = d.getVar("IMAGE_POSTPROCESS_COMMAND", True)
+
+    execute_pre_post_process(d, post_process_cmds)
+}
+do_image_complete[dirs] = "${TOPDIR}"
+do_image_complete[umask] = "022"
+addtask do_image_complete after do_image before do_build
+
+#
+# Write environment variables used by wic
+# to tmp/sysroots/<machine>/imgdata/<image>.env
+#
+python do_rootfs_wicenv () {
+    wicvars = d.getVar('WICVARS', True)
+    if not wicvars:
+        return
+
+    stdir = d.getVar('STAGING_DIR_TARGET', True)
+    outdir = os.path.join(stdir, 'imgdata')
+    bb.utils.mkdirhier(outdir)
+    basename = d.getVar('IMAGE_BASENAME', True)
+    with open(os.path.join(outdir, basename) + '.env', 'w') as envf:
+        for var in wicvars.split():
+            value = d.getVar(var, True)
+            if value:
+                envf.write('%s="%s"\n' % (var, value.strip()))
+}
+addtask do_rootfs_wicenv after do_image before do_image_wic
+do_rootfs_wicenv[vardeps] += "${WICVARS}"
+do_rootfs_wicenv[prefuncs] = 'set_image_size'
+
+def setup_debugfs_variables(d):
+    d.appendVar('IMAGE_ROOTFS', '-dbg')
+    d.appendVar('IMAGE_LINK_NAME', '-dbg')
+    d.appendVar('IMAGE_NAME','-dbg')
+    debugfs_image_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS', True)
+    if debugfs_image_fstypes:
+        d.setVar('IMAGE_FSTYPES', debugfs_image_fstypes)
+
+python setup_debugfs () {
+    setup_debugfs_variables(d)
 }
 
-insert_feed_uris () {
-	
-	echo "Building feeds for [${DISTRO}].."
+python () {
+    vardeps = set()
+    ctypes = d.getVar('COMPRESSIONTYPES', True).split()
+    old_overrides = d.getVar('OVERRIDES', 0)
 
-	for line in ${FEED_URIS}
-	do
-		# strip leading and trailing spaces/tabs, then split into name and uri
-		line_clean="`echo "$line"|sed 's/^[ \t]*//;s/[ \t]*$//'`"
-		feed_name="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\1/p'`"
-		feed_uri="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\2/p'`"
-		
-		echo "Added $feed_name feed with URL $feed_uri"
-		
-		# insert new feed-sources
-		echo "src/gz $feed_name $feed_uri" >> ${IMAGE_ROOTFS}/etc/opkg/${feed_name}-feed.conf
-	done
+    def _image_base_type(type):
+        basetype = type
+        for ctype in ctypes:
+            if type.endswith("." + ctype):
+                basetype = type[:-len("." + ctype)]
+                break
+
+        if basetype != type:
+            # New base type itself might be generated by a conversion command.
+            basetype = _image_base_type(basetype)
+
+        return basetype
+
+    basetypes = {}
+    alltypes = d.getVar('IMAGE_FSTYPES', True).split()
+    typedeps = {}
+
+    if d.getVar('IMAGE_GEN_DEBUGFS', True) == "1":
+        debugfs_fstypes = d.getVar('IMAGE_FSTYPES_DEBUGFS', True).split()
+        for t in debugfs_fstypes:
+            alltypes.append("debugfs_" + t)
+
+    def _add_type(t):
+        baset = _image_base_type(t)
+        input_t = t
+        if baset not in basetypes:
+            basetypes[baset]= []
+        if t not in basetypes[baset]:
+            basetypes[baset].append(t)
+        debug = ""
+        if t.startswith("debugfs_"):
+            t = t[8:]
+            debug = "debugfs_"
+        deps = (d.getVar('IMAGE_TYPEDEP_' + t, True) or "").split()
+        vardeps.add('IMAGE_TYPEDEP_' + t)
+        if baset not in typedeps:
+            typedeps[baset] = set()
+        deps = [debug + dep for dep in deps]
+        for dep in deps:
+            if dep not in alltypes:
+                alltypes.append(dep)
+            _add_type(dep)
+            basedep = _image_base_type(dep)
+            typedeps[baset].add(basedep)
+
+        if baset != input_t:
+            _add_type(baset)
+
+    for t in alltypes[:]:
+        _add_type(t)
+
+    d.appendVarFlag('do_image', 'vardeps', ' '.join(vardeps))
+
+    maskedtypes = (d.getVar('IMAGE_TYPES_MASKED', True) or "").split()
+
+    for t in basetypes:
+        vardeps = set()
+        cmds = []
+        subimages = []
+        realt = t
+
+        if t in maskedtypes:
+            continue
+
+        localdata = bb.data.createCopy(d)
+        debug = ""
+        if t.startswith("debugfs_"):
+            setup_debugfs_variables(localdata)
+            debug = "setup_debugfs "
+            realt = t[8:]
+        localdata.setVar('OVERRIDES', '%s:%s' % (realt, old_overrides))
+        bb.data.update_data(localdata)
+        localdata.setVar('type', realt)
+        # Delete DATETIME so we don't expand any references to it now
+        # This means the task's hash can be stable rather than having hardcoded
+        # date/time values. It will get expanded at execution time.
+        # Similarly TMPDIR since otherwise we see QA stamp comparision problems
+        localdata.delVar('DATETIME')
+        localdata.delVar('TMPDIR')
+
+        image_cmd = localdata.getVar("IMAGE_CMD", True)
+        vardeps.add('IMAGE_CMD_' + realt)
+        if image_cmd:
+            cmds.append("\t" + image_cmd)
+        else:
+            bb.fatal("No IMAGE_CMD defined for IMAGE_FSTYPES entry '%s' - possibly invalid type name or missing support class" % t)
+        cmds.append(localdata.expand("\tcd ${DEPLOY_DIR_IMAGE}"))
+
+        rm_tmp_images = set()
+        def gen_conversion_cmds(bt):
+            for ctype in ctypes:
+                if bt.endswith("." + ctype):
+                    type = bt[0:-len(ctype) - 1]
+                    if type.startswith("debugfs_"):
+                        type = type[8:]
+                    # Create input image first.
+                    gen_conversion_cmds(type)
+                    localdata.setVar('type', type)
+                    cmds.append("\t" + localdata.getVar("COMPRESS_CMD_" + ctype, True))
+                    vardeps.add('COMPRESS_CMD_' + ctype)
+                    subimages.append(type + "." + ctype)
+                    if type not in alltypes:
+                        rm_tmp_images.add(localdata.expand("${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"))
+
+        for bt in basetypes[t]:
+            gen_conversion_cmds(bt)
+
+        localdata.setVar('type', realt)
+        if t not in alltypes:
+            rm_tmp_images.add(localdata.expand("${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"))
+        else:
+            subimages.append(realt)
+
+        # Clean up after applying all conversion commands. Some of them might
+        # use the same input, therefore we cannot delete sooner without applying
+        # some complex dependency analysis.
+        for image in rm_tmp_images:
+            cmds.append("\trm " + image)
+
+        after = 'do_image'
+        for dep in typedeps[t]:
+            after += ' do_image_%s' % dep.replace("-", "_").replace(".", "_")
+
+        t = t.replace("-", "_").replace(".", "_")
+
+        d.setVar('do_image_%s' % t, '\n'.join(cmds))
+        d.setVarFlag('do_image_%s' % t, 'func', '1')
+        d.setVarFlag('do_image_%s' % t, 'fakeroot', '1')
+        d.setVarFlag('do_image_%s' % t, 'prefuncs', debug + 'set_image_size')
+        d.setVarFlag('do_image_%s' % t, 'postfuncs', 'create_symlinks')
+        d.setVarFlag('do_image_%s' % t, 'subimages', ' '.join(subimages))
+        d.appendVarFlag('do_image_%s' % t, 'vardeps', ' '.join(vardeps))
+        d.appendVarFlag('do_image_%s' % t, 'vardepsexclude', 'DATETIME')
+
+        bb.debug(2, "Adding type %s before %s, after %s" % (t, 'do_image_complete', after))
+        bb.build.addtask('do_image_%s' % t, 'do_image_complete', after, d)
+}
+
+#
+# Compute the rootfs size
+#
+def get_rootfs_size(d):
+    import subprocess
+
+    rootfs_alignment = int(d.getVar('IMAGE_ROOTFS_ALIGNMENT', True))
+    overhead_factor = float(d.getVar('IMAGE_OVERHEAD_FACTOR', True))
+    rootfs_req_size = int(d.getVar('IMAGE_ROOTFS_SIZE', True))
+    rootfs_extra_space = eval(d.getVar('IMAGE_ROOTFS_EXTRA_SPACE', True))
+    rootfs_maxsize = d.getVar('IMAGE_ROOTFS_MAXSIZE', True)
+    image_fstypes = d.getVar('IMAGE_FSTYPES', True) or ''
+    initramfs_fstypes = d.getVar('INITRAMFS_FSTYPES', True) or ''
+    initramfs_maxsize = d.getVar('INITRAMFS_MAXSIZE', True)
+
+    output = subprocess.check_output(['du', '-ks',
+                                      d.getVar('IMAGE_ROOTFS', True)])
+    size_kb = int(output.split()[0])
+    base_size = size_kb * overhead_factor
+    base_size = max(base_size, rootfs_req_size) + rootfs_extra_space
+
+    if base_size != int(base_size):
+        base_size = int(base_size + 1)
+    else:
+        base_size = int(base_size)
+
+    base_size += rootfs_alignment - 1
+    base_size -= base_size % rootfs_alignment
+
+    # Check the rootfs size against IMAGE_ROOTFS_MAXSIZE (if set)
+    if rootfs_maxsize:
+        rootfs_maxsize_int = int(rootfs_maxsize)
+        if base_size > rootfs_maxsize_int:
+            bb.fatal("The rootfs size %d(K) overrides IMAGE_ROOTFS_MAXSIZE: %d(K)" % \
+                (base_size, rootfs_maxsize_int))
+
+    # Check the initramfs size against INITRAMFS_MAXSIZE (if set)
+    if image_fstypes == initramfs_fstypes != ''  and initramfs_maxsize:
+        initramfs_maxsize_int = int(initramfs_maxsize)
+        if base_size > initramfs_maxsize_int:
+            bb.error("The initramfs size %d(K) overrides INITRAMFS_MAXSIZE: %d(K)" % \
+                (base_size, initramfs_maxsize_int))
+            bb.error("You can set INITRAMFS_MAXSIZE a larger value. Usually, it should")
+            bb.fatal("be less than 1/2 of ram size, or you may fail to boot it.\n")
+    return base_size
+
+python set_image_size () {
+        rootfs_size = get_rootfs_size(d)
+        d.setVar('ROOTFS_SIZE', str(rootfs_size))
+        d.setVarFlag('ROOTFS_SIZE', 'export', '1')
+}
+
+#
+# Create symlinks to the newly created image
+#
+python create_symlinks() {
+
+    deploy_dir = d.getVar('DEPLOY_DIR_IMAGE', True)
+    img_name = d.getVar('IMAGE_NAME', True)
+    link_name = d.getVar('IMAGE_LINK_NAME', True)
+    manifest_name = d.getVar('IMAGE_MANIFEST', True)
+    taskname = d.getVar("BB_CURRENTTASK", True)
+    subimages = (d.getVarFlag("do_" + taskname, 'subimages', False) or "").split()
+    imgsuffix = d.getVarFlag("do_" + taskname, 'imgsuffix', True) or d.expand("${IMAGE_NAME_SUFFIX}.")
+    os.chdir(deploy_dir)
+
+    if not link_name:
+        return
+    for type in subimages:
+        if os.path.exists(img_name + imgsuffix + type):
+            dst = deploy_dir + "/" + link_name + "." + type
+            src = img_name + imgsuffix + type
+            bb.note("Creating symlink: %s -> %s" % (dst, src))
+            if os.path.islink(dst):
+                if d.getVar('RM_OLD_IMAGE', True) == "1" and \
+                        os.path.exists(os.path.realpath(dst)):
+                    os.remove(os.path.realpath(dst))
+                os.remove(dst)
+            os.symlink(src, dst)
 }
 
 MULTILIBRE_ALLOW_REP =. "${base_bindir}|${base_sbindir}|${bindir}|${sbindir}|${libexecdir}|${sysconfdir}|${nonarch_base_libdir}/udev|/lib/modules/[^/]*/modules.*|"
 MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py"
 MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib"
 
-# This function is intended to disallow empty root password if 'debug-tweaks' is not in IMAGE_FEATURES.
-zap_empty_root_password () {
-	if [ -e ${IMAGE_ROOTFS}/etc/shadow ]; then
-		sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/shadow
-        fi
-	if [ -e ${IMAGE_ROOTFS}/etc/passwd ]; then
-		sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/passwd
-	fi
-} 
-
-# allow dropbear/openssh to accept root logins and logins from accounts with an empty password string
-ssh_allow_empty_password () {
-	if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config ]; then
-		sed -i 's/^[#[:space:]]*PermitRootLogin.*/PermitRootLogin yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config
-		sed -i 's/^[#[:space:]]*PermitEmptyPasswords.*/PermitEmptyPasswords yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config
-	fi
-
-	if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then
-		if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then
-			if ! grep -q "DROPBEAR_EXTRA_ARGS=.*-B" ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear ; then
-				sed -i 's/^DROPBEAR_EXTRA_ARGS="*\([^"]*\)"*/DROPBEAR_EXTRA_ARGS="\1 -B"/' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
-			fi
-		else
-			printf '\nDROPBEAR_EXTRA_ARGS="-B"\n' >> ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
-		fi
-	fi
-
-	if [ -d ${IMAGE_ROOTFS}${sysconfdir}/pam.d ] ; then
-		sed -i 's/nullok_secure/nullok/' ${IMAGE_ROOTFS}${sysconfdir}/pam.d/*
-	fi
-}
-
-# Disable DNS lookups, the SSH_DISABLE_DNS_LOOKUP can be overridden to allow
-# distros to choose not to take this change
-SSH_DISABLE_DNS_LOOKUP ?= " ssh_disable_dns_lookup ; "
-ROOTFS_POSTPROCESS_COMMAND_append_qemuall = "${SSH_DISABLE_DNS_LOOKUP}"
-ssh_disable_dns_lookup () {
-	if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config ]; then
-		sed -i -e 's:#UseDNS yes:UseDNS no:' ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config
-	fi
-}
-
-# Enable postinst logging if debug-tweaks is enabled
-postinst_enable_logging () {
-	mkdir -p ${IMAGE_ROOTFS}${sysconfdir}/default
-	echo "POSTINST_LOGGING=1" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
-	echo "LOGFILE=${POSTINST_LOGFILE}" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
-}
-
-# Modify systemd default target
-set_systemd_default_target () {
-	if [ -d ${IMAGE_ROOTFS}${sysconfdir}/systemd/system -a -e ${IMAGE_ROOTFS}${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ]; then
-		ln -sf ${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ${IMAGE_ROOTFS}${sysconfdir}/systemd/system/default.target
-	fi
-}
-
-# If /var/volatile is not empty, we have seen problems where programs such as the
-# journal make assumptions based on the contents of /var/volatile. The journal
-# would then write to /var/volatile before it was mounted, thus hiding the
-# items previously written.
-#
-# This change is to attempt to fix those types of issues in a way that doesn't
-# affect users that may not be using /var/volatile.
-empty_var_volatile () {
-	if [ -e ${IMAGE_ROOTFS}/etc/fstab ]; then
-		match=`awk '$1 !~ "#" && $2 ~ /\/var\/volatile/{print $2}' ${IMAGE_ROOTFS}/etc/fstab 2> /dev/null`
-		if [ -n "$match" ]; then
-			find ${IMAGE_ROOTFS}/var/volatile -mindepth 1 -delete
-		fi
-	fi
-}
-
-# Turn any symbolic /sbin/init link into a file
-remove_init_link () {
-	if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then
-		LINKFILE=${IMAGE_ROOTFS}`readlink ${IMAGE_ROOTFS}/sbin/init`
-		rm ${IMAGE_ROOTFS}/sbin/init
-		cp $LINKFILE ${IMAGE_ROOTFS}/sbin/init
-	fi
-}
-
-make_zimage_symlink_relative () {
-	if [ -L ${IMAGE_ROOTFS}/boot/zImage ]; then
-		(cd ${IMAGE_ROOTFS}/boot/ && for i in `ls zImage-* | sort`; do ln -sf $i zImage; done)
-	fi
-}
-
-python write_image_manifest () {
-    from oe.rootfs import image_list_installed_packages
-    with open(d.getVar('IMAGE_MANIFEST', True), 'w+') as image_manifest:
-        image_manifest.write(image_list_installed_packages(d, 'ver'))
-        image_manifest.write("\n")
-}
-
-# Can be use to create /etc/timestamp during image construction to give a reasonably 
-# sane default time setting
-rootfs_update_timestamp () {
-	date -u +%4Y%2m%2d%2H%2M%2S >${IMAGE_ROOTFS}/etc/timestamp
-}
-
-# Prevent X from being started
-rootfs_no_x_startup () {
-	if [ -f ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm ]; then
-		chmod a-x ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm
-	fi
-}
-
-rootfs_trim_schemas () {
-	for schema in ${IMAGE_ROOTFS}/etc/gconf/schemas/*.schemas
-	do
-		# Need this in case no files exist
-		if [ -e $schema ]; then
-			oe-trim-schemas $schema > $schema.new
-			mv $schema.new $schema
-		fi
-	done
-}
-
-rootfs_check_host_user_contaminated () {
-	contaminated="${WORKDIR}/host-user-contaminated.txt"
-	HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)"
-	HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)"
-
-	find "${IMAGE_ROOTFS}" -wholename "${IMAGE_ROOTFS}/home" -prune \
-	    -user "$HOST_USER_UID" -o -group "$HOST_USER_GID" >"$contaminated"
-
-	if [ -s "$contaminated" ]; then
-		echo "WARNING: Paths in the rootfs are owned by the same user or group as the user running bitbake. See the logfile for the specific paths."
-		cat "$contaminated" | sed "s,^,  ,"
-	fi
-}
-
-# Make any absolute links in a sysroot relative
-rootfs_sysroot_relativelinks () {
-	sysroot-relativelinks.py ${SDK_OUTPUT}/${SDKTARGETSYSROOT}
-}
-
 do_fetch[noexec] = "1"
 do_unpack[noexec] = "1"
 do_patch[noexec] = "1"
@@ -520,7 +537,6 @@
 do_package_write_deb[noexec] = "1"
 do_package_write_rpm[noexec] = "1"
 
-addtask rootfs before do_build
 # Allow the kernel to be repacked with the initramfs and boot image file as a single file
 do_bundle_initramfs[depends] += "virtual/kernel:do_bundle_initramfs"
 do_bundle_initramfs[nostamp] = "1"
@@ -528,4 +544,4 @@
 do_bundle_initramfs () {
 	:
 }
-addtask bundle_initramfs after do_rootfs
+addtask bundle_initramfs after do_image_complete
diff --git a/yocto-poky/meta/classes/image_types.bbclass b/yocto-poky/meta/classes/image_types.bbclass
index 5036919..53af7ca 100644
--- a/yocto-poky/meta/classes/image_types.bbclass
+++ b/yocto-poky/meta/classes/image_types.bbclass
@@ -1,3 +1,8 @@
+# IMAGE_NAME is the base name for everything produced when building images.
+# The actual image that contains the rootfs has an additional suffix (.rootfs
+# by default) followed by additional suffices which describe the format (.ext4,
+# .ext4.xz, etc.).
+IMAGE_NAME_SUFFIX ??= ".rootfs"
 
 # The default aligment of the size of the rootfs is set to 1KiB. In case
 # you're using the SD card emulation of a QEMU system simulator you may
@@ -12,7 +17,9 @@
 
     deps = []
     ctypes = d.getVar('COMPRESSIONTYPES', True).split()
-    for type in (d.getVar('IMAGE_FSTYPES', True) or "").split():
+    fstypes = set((d.getVar('IMAGE_FSTYPES', True) or "").split())
+    fstypes |= set((d.getVar('IMAGE_FSTYPES_DEBUGFS', True) or "").split())
+    for type in fstypes:
         if type in ["vmdk", "vdi", "qcow2", "hdddirect", "live", "iso", "hddimg"]:
             type = "ext4"
         basetype = type
@@ -36,9 +43,9 @@
 XZ_THREADS ?= "-T 0"
 
 JFFS2_SUM_EXTRA_ARGS ?= ""
-IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.jffs2 ${EXTRA_IMAGECMD}"
+IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} --faketime --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.jffs2 ${EXTRA_IMAGECMD}"
 
-IMAGE_CMD_cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cramfs ${EXTRA_IMAGECMD}"
+IMAGE_CMD_cramfs = "mkfs.cramfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cramfs ${EXTRA_IMAGECMD}"
 
 oe_mkext234fs () {
 	fstype=$1
@@ -58,8 +65,8 @@
 		eval COUNT=\"$MIN_COUNT\"
 	fi
 	# Create a sparse image block
-	dd if=/dev/zero of=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024
-	mkfs.$fstype -F $extra_imagecmd ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.$fstype -d ${IMAGE_ROOTFS}
+	dd if=/dev/zero of=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype seek=$ROOTFS_SIZE count=$COUNT bs=1024
+	mkfs.$fstype -F $extra_imagecmd ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.$fstype -d ${IMAGE_ROOTFS}
 }
 
 IMAGE_CMD_ext2 = "oe_mkext234fs ext2 ${EXTRA_IMAGECMD}"
@@ -69,16 +76,16 @@
 MIN_BTRFS_SIZE ?= "16384"
 IMAGE_CMD_btrfs () {
 	if [ ${ROOTFS_SIZE} -gt ${MIN_BTRFS_SIZE} ]; then
-		dd if=/dev/zero of=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs count=${ROOTFS_SIZE} bs=1024
-		mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.btrfs
+		dd if=/dev/zero of=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs count=${ROOTFS_SIZE} bs=1024
+		mkfs.btrfs ${EXTRA_IMAGECMD} -r ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.btrfs
 	else
 		bbfatal "Rootfs is too small for BTRFS (Rootfs Actual Size: ${ROOTFS_SIZE}, BTRFS Minimum Size: ${MIN_BTRFS_SIZE})"
 	fi
 }
 
-IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs ${EXTRA_IMAGECMD} -noappend"
-IMAGE_CMD_squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
-IMAGE_CMD_squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
+IMAGE_CMD_squashfs = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs ${EXTRA_IMAGECMD} -noappend"
+IMAGE_CMD_squashfs-xz = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-xz ${EXTRA_IMAGECMD} -noappend -comp xz"
+IMAGE_CMD_squashfs-lzo = "mksquashfs ${IMAGE_ROOTFS} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.squashfs-lzo ${EXTRA_IMAGECMD} -noappend -comp lzo"
 
 # By default, tar from the host is used, which can be quite old. If
 # you need special parameters (like --xattrs) which are only supported
@@ -91,28 +98,29 @@
 # In practice, it turned out to be not needed when creating archives and
 # required when extracting, but it seems prudent to use it in both cases.
 IMAGE_CMD_TAR ?= "tar"
-IMAGE_CMD_tar = "${IMAGE_CMD_TAR} -cvf ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.tar -C ${IMAGE_ROOTFS} ."
+IMAGE_CMD_tar = "${IMAGE_CMD_TAR} -cvf ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.tar -C ${IMAGE_ROOTFS} ."
 
-do_rootfs[cleandirs] += "${WORKDIR}/cpio_append"
+do_image_cpio[cleandirs] += "${WORKDIR}/cpio_append"
 IMAGE_CMD_cpio () {
-	(cd ${IMAGE_ROOTFS} && find . | cpio -o -H newc >${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio)
+	(cd ${IMAGE_ROOTFS} && find . | cpio -o -H newc >${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
 	if [ ! -L ${IMAGE_ROOTFS}/init -a ! -e ${IMAGE_ROOTFS}/init ]; then
 		if [ -L ${IMAGE_ROOTFS}/sbin/init -o -e ${IMAGE_ROOTFS}/sbin/init ]; then
 			ln -sf /sbin/init ${WORKDIR}/cpio_append/init
 		else
 			touch ${WORKDIR}/cpio_append/init
 		fi
-		(cd  ${WORKDIR}/cpio_append && echo ./init | cpio -oA -H newc -F ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio)
+		(cd  ${WORKDIR}/cpio_append && echo ./init | cpio -oA -H newc -F ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.cpio)
 	fi
 }
 
-ELF_KERNEL ?= "${STAGING_DIR_HOST}/usr/src/kernel/${KERNEL_IMAGETYPE}"
+ELF_KERNEL ?= "${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE}"
 ELF_APPEND ?= "ramdisk_size=32768 root=/dev/ram0 rw console="
 
 IMAGE_CMD_elf () {
-	test -f ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.elf && rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.elf
-	mkelfImage --kernel=${ELF_KERNEL} --initrd=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.cpio.gz --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.elf --append='${ELF_APPEND}' ${EXTRA_IMAGECMD}
+	test -f ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf && rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf
+	mkelfImage --kernel=${ELF_KERNEL} --initrd=${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.cpio.gz --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.elf --append='${ELF_APPEND}' ${EXTRA_IMAGECMD}
 }
+
 IMAGE_TYPEDEP_elf = "cpio.gz"
 
 UBI_VOLNAME ?= "${MACHINE}-rootfs"
@@ -126,28 +134,28 @@
 		local vname="_$3"
 	fi
 
-	echo \[ubifs\] > ubinize${vname}.cfg
-	echo mode=ubi >> ubinize${vname}.cfg
-	echo image=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${vname}.rootfs.ubifs >> ubinize${vname}.cfg
-	echo vol_id=0 >> ubinize${vname}.cfg
-	echo vol_type=dynamic >> ubinize${vname}.cfg
-	echo vol_name=${UBI_VOLNAME} >> ubinize${vname}.cfg
-	echo vol_flags=autoresize >> ubinize${vname}.cfg
-	mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${vname}.rootfs.ubifs ${mkubifs_args}
-	ubinize -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${vname}.rootfs.ubi ${ubinize_args} ubinize${vname}.cfg
+	echo \[ubifs\] > ubinize${vname}-${IMAGE_NAME}.cfg
+	echo mode=ubi >> ubinize${vname}-${IMAGE_NAME}.cfg
+	echo image=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs >> ubinize${vname}-${IMAGE_NAME}.cfg
+	echo vol_id=0 >> ubinize${vname}-${IMAGE_NAME}.cfg
+	echo vol_type=dynamic >> ubinize${vname}-${IMAGE_NAME}.cfg
+	echo vol_name=${UBI_VOLNAME} >> ubinize${vname}-${IMAGE_NAME}.cfg
+	echo vol_flags=autoresize >> ubinize${vname}-${IMAGE_NAME}.cfg
+	mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ${mkubifs_args}
+	ubinize -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi ${ubinize_args} ubinize${vname}-${IMAGE_NAME}.cfg
 
 	# Cleanup cfg file
-	mv ubinize${vname}.cfg ${DEPLOY_DIR_IMAGE}/
+	mv ubinize${vname}-${IMAGE_NAME}.cfg ${DEPLOY_DIR_IMAGE}/
 
 	# Create own symlinks for 'named' volumes
 	if [ -n "$vname" ]; then
 		cd ${DEPLOY_DIR_IMAGE}
-		if [ -e ${IMAGE_NAME}${vname}.rootfs.ubifs ]; then
-			ln -sf ${IMAGE_NAME}${vname}.rootfs.ubifs \
+		if [ -e ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs ]; then
+			ln -sf ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubifs \
 			${IMAGE_LINK_NAME}${vname}.ubifs
 		fi
-		if [ -e ${IMAGE_NAME}${vname}.rootfs.ubi ]; then
-			ln -sf ${IMAGE_NAME}${vname}.rootfs.ubi \
+		if [ -e ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi ]; then
+			ln -sf ${IMAGE_NAME}${vname}${IMAGE_NAME_SUFFIX}.ubi \
 			${IMAGE_LINK_NAME}${vname}.ubi
 		fi
 		cd -
@@ -168,17 +176,39 @@
 	multiubi_mkfs "${MKUBIFS_ARGS}" "${UBINIZE_ARGS}"
 }
 
-IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.ubifs ${MKUBIFS_ARGS}"
+IMAGE_CMD_ubifs = "mkfs.ubifs -r ${IMAGE_ROOTFS} -o ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.ubifs ${MKUBIFS_ARGS}"
+
+WKS_FILE ?= "${IMAGE_BASENAME}.${MACHINE}.wks"
+WKS_FILES ?= "${WKS_FILE} ${IMAGE_BASENAME}.wks"
+WKS_SEARCH_PATH ?= "${THISDIR}:${@':'.join('%s/scripts/lib/wic/canned-wks' % l for l in '${BBPATH}:${COREBASE}'.split(':'))}"
+WKS_FULL_PATH = "${@wks_search('${WKS_FILES}'.split(), '${WKS_SEARCH_PATH}') or ''}"
+
+def wks_search(files, search_path):
+    for f in files:
+        if os.path.isabs(f):
+            if os.path.exists(f):
+                return f
+        else:
+            searched = bb.utils.which(search_path, f)
+            if searched:
+                return searched
 
 IMAGE_CMD_wic () {
-	out=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}
-	wks=${FILE_DIRNAME}/${IMAGE_BASENAME}.${MACHINE}.wks
-	[ -e $wks ] || wks=${FILE_DIRNAME}/${IMAGE_BASENAME}.wks
-	[ -e $wks ] || bbfatal "Kiskstart file $wks doesn't exist"
-	BUILDDIR=${TOPDIR} wic create $wks --vars ${STAGING_DIR_TARGET}/imgdata/ -e ${IMAGE_BASENAME} -o $out/
-	mv $out/build/${IMAGE_BASENAME}*.direct $out.rootfs.wic
-	rm -rf $out/
+	out="${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}"
+	wks="${WKS_FULL_PATH}"
+	if [ -z "$wks" ]; then
+		bbfatal "No kickstart files from WKS_FILES were found: ${WKS_FILES}. Please set WKS_FILE or WKS_FILES appropriately."
+	fi
+
+	BUILDDIR="${TOPDIR}" wic create "$wks" --vars "${STAGING_DIR_TARGET}/imgdata/" -e "${IMAGE_BASENAME}" -o "$out/"
+	mv "$out/build/$(basename "${wks%.wks}")"*.direct "$out${IMAGE_NAME_SUFFIX}.wic"
+	rm -rf "$out/"
 }
+IMAGE_CMD_wic[vardepsexclude] = "WKS_FULL_PATH WKS_FILES"
+
+# Rebuild when the wks file or vars in WICVARS change
+USING_WIC = "${@bb.utils.contains_any('IMAGE_FSTYPES', 'wic ' + ' '.join('wic.%s' % c for c in '${COMPRESSIONTYPES}'.split()), '1', '', d)}"
+do_image_wic[file-checksums] += "${@'${WKS_FULL_PATH}:%s' % os.path.exists('${WKS_FULL_PATH}') if '${USING_WIC}' else ''}"
 
 EXTRA_IMAGECMD = ""
 
@@ -191,7 +221,7 @@
 EXTRA_IMAGECMD_ext2 ?= "-i 4096"
 EXTRA_IMAGECMD_ext3 ?= "-i 4096"
 EXTRA_IMAGECMD_ext4 ?= "-i 4096"
-EXTRA_IMAGECMD_btrfs ?= ""
+EXTRA_IMAGECMD_btrfs ?= "-n 4096"
 EXTRA_IMAGECMD_elf ?= ""
 
 IMAGE_DEPENDS = ""
@@ -232,13 +262,19 @@
     wic wic.gz wic.bz2 wic.lzma \
 "
 
-COMPRESSIONTYPES = "gz bz2 lzma xz lz4 sum"
-COMPRESS_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}.rootfs.${type}"
-COMPRESS_CMD_gz = "gzip -f -9 -c ${IMAGE_NAME}.rootfs.${type} > ${IMAGE_NAME}.rootfs.${type}.gz"
-COMPRESS_CMD_bz2 = "pbzip2 -f -k ${IMAGE_NAME}.rootfs.${type}"
-COMPRESS_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_THREADS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}.rootfs.${type} > ${IMAGE_NAME}.rootfs.${type}.xz"
-COMPRESS_CMD_lz4 = "lz4c -9 -c ${IMAGE_NAME}.rootfs.${type} > ${IMAGE_NAME}.rootfs.${type}.lz4"
-COMPRESS_CMD_sum = "sumtool -i ${IMAGE_NAME}.rootfs.${type} -o ${IMAGE_NAME}.rootfs.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
+COMPRESSIONTYPES = "gz bz2 lzma xz lz4 sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum"
+COMPRESS_CMD_lzma = "lzma -k -f -7 ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+COMPRESS_CMD_gz = "gzip -f -9 -c ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.gz"
+COMPRESS_CMD_bz2 = "pbzip2 -f -k ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}"
+COMPRESS_CMD_xz = "xz -f -k -c ${XZ_COMPRESSION_LEVEL} ${XZ_THREADS} --check=${XZ_INTEGRITY_CHECK} ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.xz"
+COMPRESS_CMD_lz4 = "lz4c -9 -c ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.lz4"
+COMPRESS_CMD_sum = "sumtool -i ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} -o ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sum ${JFFS2_SUM_EXTRA_ARGS}"
+COMPRESS_CMD_md5sum = "md5sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.md5sum"
+COMPRESS_CMD_sha1sum = "sha1sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha1sum"
+COMPRESS_CMD_sha224sum = "sha224sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha224sum"
+COMPRESS_CMD_sha256sum = "sha256sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha256sum"
+COMPRESS_CMD_sha384sum = "sha384sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha384sum"
+COMPRESS_CMD_sha512sum = "sha512sum ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type} > ${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.${type}.sha512sum"
 COMPRESS_DEPENDS_lzma = "xz-native"
 COMPRESS_DEPENDS_gz = ""
 COMPRESS_DEPENDS_bz2 = "pbzip2-native"
diff --git a/yocto-poky/meta/classes/image_types_uboot.bbclass b/yocto-poky/meta/classes/image_types_uboot.bbclass
index 081bca2..19e4aa2 100644
--- a/yocto-poky/meta/classes/image_types_uboot.bbclass
+++ b/yocto-poky/meta/classes/image_types_uboot.bbclass
@@ -22,5 +22,5 @@
 COMPRESS_DEPENDS_lzma.u-boot = "u-boot-mkimage-native"
 COMPRESS_CMD_lzma.u-boot      = "${COMPRESS_CMD_lzma}; oe_mkimage ${IMAGE_NAME}.rootfs.${type}.lzma lzma clean"
 
-IMAGE_TYPES += "ext2.u-boot ext2.gz.u-boot ext2.bz2.u-boot ext2.lzma.u-boot ext3.gz.u-boot ext4.gz.u-boot"
+IMAGE_TYPES += "ext2.u-boot ext2.gz.u-boot ext2.bz2.u-boot ext2.lzma.u-boot ext3.gz.u-boot ext4.gz.u-boot cpio.gz.u-boot"
 
diff --git a/yocto-poky/meta/classes/insane.bbclass b/yocto-poky/meta/classes/insane.bbclass
index a77438d..c57b217 100644
--- a/yocto-poky/meta/classes/insane.bbclass
+++ b/yocto-poky/meta/classes/insane.bbclass
@@ -38,6 +38,7 @@
             perms dep-cmp pkgvarcheck perm-config perm-line perm-link \
             split-strip packages-list pkgv-undefined var-undefined \
             version-going-backwards expanded-d invalid-chars \
+            license-checksum dev-elf \
             "
 FAKEROOT_QA = "host-user-contaminated"
 FAKEROOT_QA[doc] = "QA tests which need to run under fakeroot. If any \
@@ -45,7 +46,7 @@
 
 ALL_QA = "${WARN_QA} ${ERROR_QA}"
 
-UNKNOWN_CONFIGURE_WHITELIST ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --with-libtool-sysroot"
+UNKNOWN_CONFIGURE_WHITELIST ?= "--enable-nls --disable-nls --disable-silent-rules --disable-dependency-tracking --with-libtool-sysroot --disable-static"
 
 #
 # dictionary for elf headers
@@ -126,6 +127,9 @@
                         "mipsel":     (   8,     0,    0,          True,          32),
                         "mips64":     (   8,     0,    0,          False,         64),
                         "mips64el":   (   8,     0,    0,          True,          64),
+                        "microblaze":  (189,     0,    0,          False,         32),
+                        "microblazeeb":(189,     0,    0,          False,         32),
+                        "microblazeel":(189,     0,    0,          True,          32),
                       },
             "uclinux-uclibc" : {
                         "bfin":       ( 106,     0,    0,          True,         32),
@@ -189,6 +193,12 @@
         bb.note("QA Issue: %s [%s]" % (error_msg, error_class))
     return True
 
+def package_qa_add_message(messages, section, new_msg):
+    if section not in messages:
+        messages[section] = new_msg
+    else:
+        messages[section] = messages[section] + "\n" + new_msg
+
 QAPATHTEST[libexec] = "package_qa_check_libexec"
 def package_qa_check_libexec(path,name, d, elf, messages):
 
@@ -198,7 +208,7 @@
         return True
 
     if 'libexec' in path.split(os.path.sep):
-        messages["libexec"] = "%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec)
+        package_qa_add_message(messages, "libexec", "%s: %s is using libexec please relocate to %s" % (name, package_qa_clean_path(path, d), libexec))
         return False
 
     return True
@@ -226,7 +236,7 @@
             rpath = m.group(1)
             for dir in bad_dirs:
                 if dir in rpath:
-                    messages["rpaths"] = "package %s contains bad RPATH %s in file %s" % (name, rpath, file)
+                    package_qa_add_message(messages, "rpaths", "package %s contains bad RPATH %s in file %s" % (name, rpath, file))
 
 QAPATHTEST[useless-rpaths] = "package_qa_check_useless_rpaths"
 def package_qa_check_useless_rpaths(file, name, d, elf, messages):
@@ -256,7 +266,7 @@
             if rpath_eq(rpath, libdir) or rpath_eq(rpath, base_libdir):
                 # The dynamic linker searches both these places anyway.  There is no point in
                 # looking there again.
-                messages["useless-rpaths"] = "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d), rpath)
+                package_qa_add_message(messages, "useless-rpaths", "%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d), rpath))
 
 QAPATHTEST[dev-so] = "package_qa_check_dev"
 def package_qa_check_dev(path, name, d, elf, messages):
@@ -265,8 +275,19 @@
     """
 
     if not name.endswith("-dev") and not name.endswith("-dbg") and not name.endswith("-ptest") and not name.startswith("nativesdk-") and path.endswith(".so") and os.path.islink(path):
-        messages["dev-so"] = "non -dev/-dbg/nativesdk- package contains symlink .so: %s path '%s'" % \
-                 (name, package_qa_clean_path(path,d))
+        package_qa_add_message(messages, "dev-so", "non -dev/-dbg/nativesdk- package contains symlink .so: %s path '%s'" % \
+                 (name, package_qa_clean_path(path,d)))
+
+QAPATHTEST[dev-elf] = "package_qa_check_dev_elf"
+def package_qa_check_dev_elf(path, name, d, elf, messages):
+    """
+    Check that -dev doesn't contain real shared libraries.  The test has to
+    check that the file is not a link and is an ELF object as some recipes
+    install link-time .so files that are linker scripts.
+    """
+    if name.endswith("-dev") and path.endswith(".so") and not os.path.islink(path) and elf:
+        package_qa_add_message(messages, "dev-elf", "-dev package contains non-symlink .so: %s path '%s'" % \
+                 (name, package_qa_clean_path(path,d)))
 
 QAPATHTEST[staticdev] = "package_qa_check_staticdev"
 def package_qa_check_staticdev(path, name, d, elf, messages):
@@ -278,8 +299,8 @@
     """
 
     if not name.endswith("-pic") and not name.endswith("-staticdev") and not name.endswith("-ptest") and path.endswith(".a") and not path.endswith("_nonshared.a"):
-        messages["staticdev"] = "non -staticdev package contains static .a library: %s path '%s'" % \
-                 (name, package_qa_clean_path(path,d))
+        package_qa_add_message(messages, "staticdev", "non -staticdev package contains static .a library: %s path '%s'" % \
+                 (name, package_qa_clean_path(path,d)))
 
 def package_qa_check_libdir(d):
     """
@@ -292,10 +313,14 @@
     pkgdest = d.getVar('PKGDEST', True)
     base_libdir = d.getVar("base_libdir",True) + os.sep
     libdir = d.getVar("libdir", True) + os.sep
+    libexecdir = d.getVar("libexecdir", True) + os.sep
     exec_prefix = d.getVar("exec_prefix", True) + os.sep
 
     messages = []
 
+    # The re's are purposely fuzzy, as some there are some .so.x.y.z files
+    # that don't follow the standard naming convention. It checks later
+    # that they are actual ELF files
     lib_re = re.compile("^/lib.+\.so(\..+)?$")
     exec_re = re.compile("^%s.*/lib.+\.so(\..+)?$" % exec_prefix)
 
@@ -307,6 +332,9 @@
                 if 'libdir' in (d.getVar('INSANE_SKIP_' + package, True) or "").split():
                     bb.note("Package %s skipping libdir QA test" % (package))
                     skippackages.append(package)
+                elif d.getVar('PACKAGE_DEBUG_SPLIT_STYLE', True) == 'debug-file-directory' and package.endswith("-dbg"):
+                    bb.note("Package %s skipping libdir QA test for PACKAGE_DEBUG_SPLIT_STYLE equals debug-file-directory" % (package))
+                    skippackages.append(package)
             for package in skippackages:
                 dirs.remove(package)
         for file in files:
@@ -317,10 +345,22 @@
                 rel_path = os.sep + rel_path
                 if lib_re.match(rel_path):
                     if base_libdir not in rel_path:
-                        messages.append("%s: found library in wrong location: %s" % (package, rel_path))
+                        # make sure it's an actual ELF file
+                        elf = oe.qa.ELFFile(full_path)
+                        try:
+                            elf.open()
+                            messages.append("%s: found library in wrong location: %s" % (package, rel_path))
+                        except (oe.qa.NotELFFileError):
+                            pass
                 if exec_re.match(rel_path):
-                    if libdir not in rel_path:
-                        messages.append("%s: found library in wrong location: %s" % (package, rel_path))
+                    if libdir not in rel_path and libexecdir not in rel_path:
+                        # make sure it's an actual ELF file
+                        elf = oe.qa.ELFFile(full_path)
+                        try:
+                            elf.open()
+                            messages.append("%s: found library in wrong location: %s" % (package, rel_path))
+                        except (oe.qa.NotELFFileError):
+                            pass
 
     if messages:
         package_qa_handle_error("libdir", "\n".join(messages), d)
@@ -333,8 +373,8 @@
 
     if not "-dbg" in name and not "-ptest" in name:
         if '.debug' in path.split(os.path.sep):
-            messages["debug-files"] = "non debug package contains .debug directory: %s path %s" % \
-                     (name, package_qa_clean_path(path,d))
+            messages("debug-files", "non debug package contains .debug directory: %s path %s" % \
+                     (name, package_qa_clean_path(path,d)))
 
 QAPATHTEST[perms] = "package_qa_check_perm"
 def package_qa_check_perm(path,name,d, elf, messages):
@@ -403,7 +443,7 @@
         if bool(statinfo.st_mode & stat.S_IXUSR):
             # grep shell scripts for possible references to /exec_prefix/
             exec_prefix = d.getVar('exec_prefix', True)
-            statement = "grep -e '%s/' %s > /dev/null" % (exec_prefix, path)
+            statement = "grep -e '%s/[^ :]\{1,\}/[^ :]\{1,\}' %s > /dev/null" % (exec_prefix, path)
             if subprocess.call(statement, shell=True) == 0:
                 error_msg = pn + ": Found a reference to %s/ in %s" % (exec_prefix, path)
                 package_qa_handle_error("unsafe-references-in-scripts", error_msg, d)
@@ -465,7 +505,7 @@
 
     if target_arch == "allarch":
         pn = d.getVar('PN', True)
-        messages["arch"] = pn + ": Recipe inherits the allarch class, but has packaged architecture-specific binaries"
+        package_qa_add_message(messages, "arch", pn + ": Recipe inherits the allarch class, but has packaged architecture-specific binaries")
         return
 
     # FIXME: Cross package confuse this check, so just skip them
@@ -485,15 +525,15 @@
     # Check the architecture and endiannes of the binary
     if not ((machine == elf.machine()) or \
         ((("virtual/kernel" in provides) or bb.data.inherits_class("module", d) ) and (target_os == "linux-gnux32" or target_os == "linux-gnun32"))):
-        messages["arch"] = "Architecture did not match (%d to %d) on %s" % \
-                 (machine, elf.machine(), package_qa_clean_path(path,d))
+        package_qa_add_message(messages, "arch", "Architecture did not match (%d to %d) on %s" % \
+                 (machine, elf.machine(), package_qa_clean_path(path,d)))
     elif not ((bits == elf.abiSize()) or  \
         ((("virtual/kernel" in provides) or bb.data.inherits_class("module", d) ) and (target_os == "linux-gnux32" or target_os == "linux-gnun32"))):
-        messages["arch"] = "Bit size did not match (%d to %d) %s on %s" % \
-                 (bits, elf.abiSize(), bpn, package_qa_clean_path(path,d))
+        package_qa_add_message(messages, "arch", "Bit size did not match (%d to %d) %s on %s" % \
+                 (bits, elf.abiSize(), bpn, package_qa_clean_path(path,d)))
     elif not littleendian == elf.isLittleEndian():
-        messages["arch"] = "Endiannes did not match (%d to %d) on %s" % \
-                 (littleendian, elf.isLittleEndian(), package_qa_clean_path(path,d))
+        package_qa_add_message(messages, "arch", "Endiannes did not match (%d to %d) on %s" % \
+                 (littleendian, elf.isLittleEndian(), package_qa_clean_path(path,d)))
 
 QAPATHTEST[desktop] = "package_qa_check_desktop"
 def package_qa_check_desktop(path, name, d, elf, messages):
@@ -505,7 +545,7 @@
         output = os.popen("%s %s" % (desktop_file_validate, path))
         # This only produces output on errors
         for l in output:
-            messages["desktop"] = "Desktop file issue: " + l.strip()
+            package_qa_add_message(messages, "desktop", "Desktop file issue: " + l.strip())
 
 QAPATHTEST[textrel] = "package_qa_textrel"
 def package_qa_textrel(path, name, d, elf, messages):
@@ -529,7 +569,7 @@
             sane = False
 
     if not sane:
-        messages["textrel"] = "ELF binary '%s' has relocations in .text" % path
+        package_qa_add_message(messages, "textrel", "ELF binary '%s' has relocations in .text" % path)
 
 QAPATHTEST[ldflags] = "package_qa_hash_style"
 def package_qa_hash_style(path, name, d, elf, messages):
@@ -564,7 +604,7 @@
             sane = True
 
     if has_syms and not sane:
-        messages["ldflags"] = "No GNU_HASH in the elf binary: '%s'" % path
+        package_qa_add_message(messages, "ldflags", "No GNU_HASH in the elf binary: '%s'" % path)
 
 
 QAPATHTEST[buildpaths] = "package_qa_check_buildpaths"
@@ -580,11 +620,15 @@
     if os.path.islink(path):
         return
 
+    # Ignore ipk and deb's CONTROL dir
+    if path.find(name + "/CONTROL/") != -1 or path.find(name + "/DEBIAN/") != -1:
+        return
+
     tmpdir = d.getVar('TMPDIR', True)
     with open(path) as f:
         file_content = f.read()
         if tmpdir in file_content:
-            messages["buildpaths"] = "File %s in package contained reference to tmpdir" % package_qa_clean_path(path,d)
+            package_qa_add_message(messages, "buildpaths", "File %s in package contained reference to tmpdir" % package_qa_clean_path(path,d))
 
 
 QAPATHTEST[xorg-driver-abi] = "package_qa_check_xorg_driver_abi"
@@ -603,7 +647,7 @@
         for rdep in bb.utils.explode_deps(d.getVar('RDEPENDS_' + name, True) or ""):
             if rdep.startswith("%sxorg-abi-" % mlprefix):
                 return
-        messages["xorg-driver-abi"] = "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path))
+        package_qa_add_message(messages, "xorg-driver-abi", "Package %s contains Xorg driver (%s) but no xorg-abi- dependencies" % (name, os.path.basename(path)))
 
 QAPATHTEST[infodir] = "package_qa_check_infodir"
 def package_qa_check_infodir(path, name, d, elf, messages):
@@ -613,7 +657,7 @@
     infodir = d.expand("${infodir}/dir")
 
     if infodir in path:
-        messages["infodir"] = "The /usr/share/info/dir file is not meant to be shipped in a particular package."
+        package_qa_add_message(messages, "infodir", "The /usr/share/info/dir file is not meant to be shipped in a particular package.")
 
 QAPATHTEST[symlink-to-sysroot] = "package_qa_check_symlink_to_sysroot"
 def package_qa_check_symlink_to_sysroot(path, name, d, elf, messages):
@@ -626,7 +670,8 @@
             tmpdir = d.getVar('TMPDIR', True)
             if target.startswith(tmpdir):
                 trimmed = path.replace(os.path.join (d.getVar("PKGDEST", True), name), "")
-                messages["symlink-to-sysroot"] = "Symlink %s in %s points to TMPDIR" % (trimmed, name)
+                package_qa_add_message(messages, "symlink-to-sysroot", "Symlink %s in %s points to TMPDIR" % (trimmed, name))
+
 def package_qa_check_license(workdir, d):
     """
     Check for changes in the license files 
@@ -639,11 +684,11 @@
     pn = d.getVar('PN', True)
 
     if lic == "CLOSED":
-        return True
+        return
 
     if not lic_files:
-        bb.error(pn + ": Recipe file does not have license file information (LIC_FILES_CHKSUM)")
-        return False
+        package_qa_handle_error("license-checksum", pn + ": Recipe file does not have license file information (LIC_FILES_CHKSUM)", d)
+        return
 
     srcdir = d.getVar('S', True)
 
@@ -651,10 +696,12 @@
         try:
             (type, host, path, user, pswd, parm) = bb.fetch.decodeurl(url)
         except bb.fetch.MalformedUrl:
-            raise bb.build.FuncFailed( pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url)
+            package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM contains an invalid URL: " + url, d)
+            continue
         srclicfile = os.path.join(srcdir, path)
         if not os.path.isfile(srclicfile):
-            raise bb.build.FuncFailed( pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile)
+            package_qa_handle_error("license-checksum", pn + ": LIC_FILES_CHKSUM points to an invalid file: " + srclicfile, d)
+            continue
 
         recipemd5 = parm.get('md5', '')
         beginline, endline = 0, 0
@@ -689,8 +736,8 @@
             bb.note (pn + ": md5 checksum matched for ", url)
         else:
             if recipemd5:
-                bb.error(pn + ": md5 data is not matching for ", url)
-                bb.error(pn + ": The new md5 checksum is ", md5chksum)
+                msg = pn + ": The LIC_FILES_CHKSUM does not match for " + url
+                msg = msg + "\n" + pn + ": The new md5 checksum is " + md5chksum
                 if beginline:
                     if endline:
                         srcfiledesc = "%s (lines %d through to %d)" % (srclicfile, beginline, endline)
@@ -700,29 +747,26 @@
                     srcfiledesc = "%s (ending on line %d)" % (srclicfile, endline)
                 else:
                     srcfiledesc = srclicfile
-                bb.error(pn + ": Check if the license information has changed in %s to verify that the LICENSE value \"%s\" remains valid" % (srcfiledesc, lic))
-            else:
-                bb.error(pn + ": md5 checksum is not specified for ", url)
-                bb.error(pn + ": The md5 checksum is ", md5chksum)
-            sane = False
+                msg = msg + "\n" + pn + ": Check if the license information has changed in %s to verify that the LICENSE value \"%s\" remains valid" % (srcfiledesc, lic)
 
-    return sane
+            else:
+                msg = pn + ": LIC_FILES_CHKSUM is not specified for " +  url
+                msg = msg + "\n" + pn + ": The md5 checksum is " + md5chksum
+            package_qa_handle_error("license-checksum", msg, d)
 
 def package_qa_check_staged(path,d):
     """
-    Check staged la and pc files for sanity
-      -e.g. installed being false
+    Check staged la and pc files for common problems like references to the work
+    directory.
 
-        As this is run after every stage we should be able
-        to find the one responsible for the errors easily even
-        if we look at every .pc and .la file
+    As this is run after every stage we should be able to find the one
+    responsible for the errors easily even if we look at every .pc and .la file.
     """
 
     sane = True
     tmpdir = d.getVar('TMPDIR', True)
     workdir = os.path.join(tmpdir, "work")
 
-    installed = "installed=yes"
     if bb.data.inherits_class("native", d) or bb.data.inherits_class("cross", d):
         pkgconfigcheck = workdir
     else:
@@ -750,7 +794,7 @@
     return sane
 
 # Walk over all files in a directory and call func
-def package_qa_walk(path, warnfuncs, errorfuncs, skip, package, d):
+def package_qa_walk(warnfuncs, errorfuncs, skip, package, d):
     import oe.qa
 
     #if this will throw an exception, then fix the dict above
@@ -763,7 +807,8 @@
             elf = oe.qa.ELFFile(path)
             try:
                 elf.open()
-            except:
+            except (IOError, oe.qa.NotELFFileError):
+                # IOError can happen if the packaging control files disappear,
                 elf = None
             for func in warnfuncs:
                 func(path, package, d, elf, warnings)
@@ -775,15 +820,12 @@
     for e in errors:
         package_qa_handle_error(e, errors[e], d)
 
-    return len(errors) == 0
-
 def package_qa_check_rdepends(pkg, pkgdest, skip, taskdeps, packages, d):
     # Don't do this check for kernel/module recipes, there aren't too many debug/development
     # packages and you can get false positives e.g. on kernel-module-lirc-dev
     if bb.data.inherits_class("kernel", d) or bb.data.inherits_class("module-base", d):
-        return True
+        return
 
-    sane = True
     if not "-dbg" in pkg and not "packagegroup-" in pkg and not "-image" in pkg:
         localdata = bb.data.createCopy(d)
         localdata.setVar('OVERRIDES', pkg)
@@ -797,10 +839,10 @@
             for rdepend in rdepends:
                 if "-dbg" in rdepend and "debug-deps" not in skip:
                     error_msg = "%s rdepends on %s" % (pkg,rdepend)
-                    sane = package_qa_handle_error("debug-deps", error_msg, d)
+                    package_qa_handle_error("debug-deps", error_msg, d)
                 if (not "-dev" in pkg and not "-staticdev" in pkg) and rdepend.endswith("-dev") and "dev-deps" not in skip:
                     error_msg = "%s rdepends on %s" % (pkg, rdepend)
-                    sane = package_qa_handle_error("dev-deps", error_msg, d)
+                    package_qa_handle_error("dev-deps", error_msg, d)
                 if rdepend not in packages:
                     rdep_data = oe.packagedata.read_subpkgdata(rdepend, d)
                     if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
@@ -817,8 +859,11 @@
                                 break
                     if rdep_data and 'PN' in rdep_data and rdep_data['PN'] in taskdeps:
                         continue
-                    error_msg = "%s rdepends on %s, but it isn't a build dependency?" % (pkg, rdepend)
-                    sane = package_qa_handle_error("build-deps", error_msg, d)
+                    if rdep_data and 'PN' in rdep_data:
+                        error_msg = "%s rdepends on %s, but it isn't a build dependency, missing %s in DEPENDS or PACKAGECONFIG?" % (pkg, rdepend, rdep_data['PN'])
+                    else:
+                        error_msg = "%s rdepends on %s, but it isn't a build dependency?" % (pkg, rdepend)
+                    package_qa_handle_error("build-deps", error_msg, d)
 
         if "file-rdeps" not in skip:
             ignored_file_rdeps = set(['/bin/sh', '/usr/bin/env', 'rtld(GNU_HASH)'])
@@ -883,21 +928,17 @@
                         break
             if filerdepends:
                 for key in filerdepends:
-                    error_msg = "%s contained in package %s requires %s, but no providers found in its RDEPENDS" % \
-                            (filerdepends[key],pkg, key)
-                sane = package_qa_handle_error("file-rdeps", error_msg, d)
-
-    return sane
+                    error_msg = "%s contained in package %s requires %s, but no providers found in RDEPENDS_%s?" % \
+                            (filerdepends[key].replace("_%s" % pkg, "").replace("@underscore@", "_"), pkg, key, pkg)
+                package_qa_handle_error("file-rdeps", error_msg, d)
 
 def package_qa_check_deps(pkg, pkgdest, skip, d):
-    sane = True
 
     localdata = bb.data.createCopy(d)
     localdata.setVar('OVERRIDES', pkg)
     bb.data.update_data(localdata)
 
     def check_valid_deps(var):
-        sane = True
         try:
             rvar = bb.utils.explode_dep_versions2(localdata.getVar(var, True) or "")
         except ValueError as e:
@@ -906,24 +947,14 @@
             for v in rvar[dep]:
                 if v and not v.startswith(('< ', '= ', '> ', '<= ', '>=')):
                     error_msg = "%s_%s is invalid: %s (%s)   only comparisons <, =, >, <=, and >= are allowed" % (var, pkg, dep, v)
-                    sane = package_qa_handle_error("dep-cmp", error_msg, d)
-        return sane
+                    package_qa_handle_error("dep-cmp", error_msg, d)
 
-    sane = True
-    if not check_valid_deps('RDEPENDS'):
-        sane = False
-    if not check_valid_deps('RRECOMMENDS'):
-        sane = False
-    if not check_valid_deps('RSUGGESTS'):
-        sane = False
-    if not check_valid_deps('RPROVIDES'):
-        sane = False
-    if not check_valid_deps('RREPLACES'):
-        sane = False
-    if not check_valid_deps('RCONFLICTS'):
-        sane = False
-
-    return sane
+    check_valid_deps('RDEPENDS')
+    check_valid_deps('RRECOMMENDS')
+    check_valid_deps('RSUGGESTS')
+    check_valid_deps('RPROVIDES')
+    check_valid_deps('RREPLACES')
+    check_valid_deps('RCONFLICTS')
 
 QAPATHTEST[expanded-d] = "package_qa_check_expanded_d"
 def package_qa_check_expanded_d(path,name,d,elf,messages):
@@ -945,10 +976,10 @@
                 # Bitbake expands ${D} within bbvar during the previous step, so we check for its expanded value
                 if expanded_d in bbvar:
                     if var == 'FILES':
-                        messages["expanded-d"] = "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % pak
+                        package_qa_add_message(messages, "expanded-d", "FILES in %s recipe should not contain the ${D} variable as it references the local build directory not the target filesystem, best solution is to remove the ${D} reference" % pak)
                         sane = False
                     else:
-                        messages["expanded-d"] = "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, pak)
+                        package_qa_add_message(messages, "expanded-d", "%s in %s recipe contains ${D}, it should be replaced by $D instead" % (var, pak))
                         sane = False
     return sane
 
@@ -996,12 +1027,12 @@
         rootfs_path = path[len(dest):]
         check_uid = int(d.getVar('HOST_USER_UID', True))
         if stat.st_uid == check_uid:
-            messages["host-user-contaminated"] = "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, rootfs_path, check_uid)
+            package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by uid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, rootfs_path, check_uid))
             return False
 
         check_gid = int(d.getVar('HOST_USER_GID', True))
         if stat.st_gid == check_gid:
-            messages["host-user-contaminated"] = "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, rootfs_path, check_gid)
+            package_qa_add_message(messages, "host-user-contaminated", "%s: %s is owned by gid %d, which is the same as the user running bitbake. This may be due to host contamination" % (pn, rootfs_path, check_gid))
             return False
     return True
 
@@ -1068,9 +1099,6 @@
         taskdeps.add(taskdepdata[dep][0])
 
     g = globals()
-    walk_sane = True
-    rdepends_sane = True
-    deps_sane = True
     for package in packages:
         skip = (d.getVar('INSANE_SKIP_' + package, True) or "").split()
         if skip:
@@ -1081,12 +1109,17 @@
                continue
             if w in testmatrix and testmatrix[w] in g:
                 warnchecks.append(g[testmatrix[w]])
+            if w == 'unsafe-references-in-binaries':
+                oe.utils.write_ld_so_conf(d)
+
         errorchecks = []
         for e in (d.getVar("ERROR_QA", True) or "").split():
             if e in skip:
                continue
             if e in testmatrix and testmatrix[e] in g:
                 errorchecks.append(g[testmatrix[e]])
+            if e == 'unsafe-references-in-binaries':
+                oe.utils.write_ld_so_conf(d)
 
         bb.note("Checking Package: %s" % package)
         # Check package name
@@ -1095,23 +1128,21 @@
                     "%s doesn't match the [a-z0-9.+-]+ regex" % package, d)
 
         path = "%s/%s" % (pkgdest, package)
-        if not package_qa_walk(path, warnchecks, errorchecks, skip, package, d):
-            walk_sane  = False
-        if not package_qa_check_rdepends(package, pkgdest, skip, taskdeps, packages, d):
-            rdepends_sane = False
-        if not package_qa_check_deps(package, pkgdest, skip, d):
-            deps_sane = False
+        package_qa_walk(warnchecks, errorchecks, skip, package, d)
 
+        package_qa_check_rdepends(package, pkgdest, skip, taskdeps, packages, d)
+        package_qa_check_deps(package, pkgdest, skip, d)
 
     if 'libdir' in d.getVar("ALL_QA", True).split():
         package_qa_check_libdir(d)
 
     qa_sane = d.getVar("QA_SANE", True)
-    if not walk_sane or not rdepends_sane or not deps_sane or not qa_sane:
+    if not qa_sane:
         bb.fatal("QA run found fatal errors. Please consider fixing them.")
     bb.note("DONE with PACKAGE QA")
 }
 
+do_package_qa[vardepsexclude] = "BB_TASKDEPDATA"
 do_package_qa[rdeptask] = "do_packagedata"
 addtask do_package_qa after do_packagedata do_package before do_build
 
@@ -1126,7 +1157,7 @@
 python do_qa_staging() {
     bb.note("QA checking staging")
 
-    if not package_qa_check_staged(d.expand('${SYSROOT_DESTDIR}${STAGING_LIBDIR}'), d):
+    if not package_qa_check_staged(d.expand('${SYSROOT_DESTDIR}${libdir}'), d):
         bb.fatal("QA staging was broken by the package built above")
 }
 
@@ -1139,19 +1170,21 @@
 
     configs = []
     workdir = d.getVar('WORKDIR', True)
-    bb.note("Checking autotools environment for common misconfiguration")
-    for root, dirs, files in os.walk(workdir):
-        statement = "grep -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s > /dev/null" % \
-                    os.path.join(root,"config.log")
-        if "config.log" in files:
-            if subprocess.call(statement, shell=True) == 0:
-                bb.fatal("""This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
+
+    if bb.data.inherits_class('autotools', d):
+        bb.note("Checking autotools environment for common misconfiguration")
+        for root, dirs, files in os.walk(workdir):
+            statement = "grep -q -F -e 'CROSS COMPILE Badness:' -e 'is unsafe for cross-compilation' %s" % \
+                        os.path.join(root,"config.log")
+            if "config.log" in files:
+                if subprocess.call(statement, shell=True) == 0:
+                    bb.fatal("""This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities.
 Rerun configure task after fixing this.""")
 
-        if "configure.ac" in files:
-            configs.append(os.path.join(root,"configure.ac"))
-        if "configure.in" in files:
-            configs.append(os.path.join(root, "configure.in"))
+            if "configure.ac" in files:
+                configs.append(os.path.join(root,"configure.ac"))
+            if "configure.in" in files:
+                configs.append(os.path.join(root, "configure.in"))
 
     ###########################################################################
     # Check gettext configuration and dependencies are correct
@@ -1178,8 +1211,7 @@
     # Check license variables
     ###########################################################################
 
-    if not package_qa_check_license(workdir, d):
-        bb.fatal("Licensing Error: LIC_FILES_CHKSUM does not match, please fix")
+    package_qa_check_license(workdir, d)
 
     ###########################################################################
     # Check unrecognised configure options (with a white list)
@@ -1211,6 +1243,10 @@
                 pn = d.getVar('PN', True)
                 error_msg = "%s: invalid PACKAGECONFIG: %s" % (pn, pconfig)
                 package_qa_handle_error("invalid-packageconfig", error_msg, d)
+
+    qa_sane = d.getVar("QA_SANE", True)
+    if not qa_sane:
+        bb.fatal("Fatal QA errors found, failing task.")
 }
 
 python do_qa_unpack() {
@@ -1255,9 +1291,6 @@
         msg += "%s\n" % extrapaths
         bb.warn(msg)
 
-    if d.getVar('do_stage', True) is not None:
-        bb.fatal("Legacy staging found for %s as it has a do_stage function. This will need conversion to a do_install or often simply removal to work with OE-core" % d.getVar("FILE", True))
-
     overrides = d.getVar('OVERRIDES', True).split(':')
     pn = d.getVar('PN', True)
     if pn in overrides:
@@ -1280,4 +1313,7 @@
         d.setVarFlag('do_package_qa', 'rdeptask', '')
     for i in issues:
         package_qa_handle_error("pkgvarcheck", "%s: Variable %s is set as not being package specific, please fix this." % (d.getVar("FILE", True), i), d)
+    qa_sane = d.getVar("QA_SANE", True)
+    if not qa_sane:
+        bb.fatal("Fatal QA errors found, failing task.")
 }
diff --git a/yocto-poky/meta/classes/kernel-arch.bbclass b/yocto-poky/meta/classes/kernel-arch.bbclass
index d8b180e..3ed5986 100644
--- a/yocto-poky/meta/classes/kernel-arch.bbclass
+++ b/yocto-poky/meta/classes/kernel-arch.bbclass
@@ -21,9 +21,7 @@
 
     valid_archs = d.getVar('valid_archs', True).split()
 
-    if   re.match('i.86$', a):                  return 'i386'
-    elif re.match('x86.64$', a):                return 'x86_64'
-    elif re.match('athlon$', a):                return 'x86'
+    if   re.match('(i.86|athlon|x86.64)$', a):  return 'x86'
     elif re.match('armeb$', a):                 return 'arm'
     elif re.match('aarch64$', a):               return 'arm64'
     elif re.match('aarch64_be$', a):            return 'arm64'
diff --git a/yocto-poky/meta/classes/kernel-fitimage.bbclass b/yocto-poky/meta/classes/kernel-fitimage.bbclass
index 2a56a54..e5b75ed 100644
--- a/yocto-poky/meta/classes/kernel-fitimage.bbclass
+++ b/yocto-poky/meta/classes/kernel-fitimage.bbclass
@@ -14,7 +14,7 @@
 
         image = d.getVar('INITRAMFS_IMAGE', True)
         if image:
-            d.appendVarFlag('do_assemble_fitimage', 'depends', ' ${INITRAMFS_IMAGE}:do_rootfs')
+            d.appendVarFlag('do_assemble_fitimage', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
 }
 
 #
@@ -215,6 +215,7 @@
 
 addtask assemble_fitimage before do_install after do_compile
 
+kernel_do_deploy[vardepsexclude] = "DATETIME"
 kernel_do_deploy_append() {
 	# Update deploy directory
 	if test "x${KERNEL_IMAGETYPE}" = "xfitImage" ; then
diff --git a/yocto-poky/meta/classes/kernel-yocto.bbclass b/yocto-poky/meta/classes/kernel-yocto.bbclass
index c2d0d30..f86b3ef 100644
--- a/yocto-poky/meta/classes/kernel-yocto.bbclass
+++ b/yocto-poky/meta/classes/kernel-yocto.bbclass
@@ -1,5 +1,5 @@
 # remove tasks that modify the source tree in case externalsrc is inherited
-SRCTREECOVEREDTASKS += "do_kernel_link_vmlinux do_kernel_configme do_validate_branches do_kernel_configcheck do_kernel_checkout do_shared_workdir do_fetch do_unpack do_patch"
+SRCTREECOVEREDTASKS += "do_kernel_configme do_validate_branches do_kernel_configcheck do_kernel_checkout do_shared_workdir do_fetch do_unpack do_patch"
 
 # returns local (absolute) path names for all valid patches in the
 # src_uri
@@ -170,6 +170,17 @@
 		fi
 	fi
 
+        current_branch=`git rev-parse --abbrev-ref HEAD`
+        machine_branch="${@ get_machine_branch(d, "${KBRANCH}" )}"
+        if [ "${current_branch}" != "${machine_branch}" ]; then
+            bbwarn "After meta data application, the kernel tree branch is ${current_branch}. The"
+            bbwarn "SRC_URI specified branch ${machine_branch}. The branch will be forced to ${machine_branch},"
+            bbwarn "but this means the board meta data (.scc files) do not match the SRC_URI specification."
+            bbwarn "The meta data and branch ${machine_branch} should be inspected to ensure the proper"
+            bbwarn "kernel is being built."
+            git checkout -f ${machine_branch}
+        fi
+
 	if [ "${machine_srcrev}" != "AUTOINC" ]; then
 		if ! [ "$(git rev-parse --verify ${machine_srcrev}~0)" = "$(git merge-base ${machine_srcrev} HEAD)" ]; then
 			bberror "SRCREV ${machine_srcrev} was specified, but is not reachable"
@@ -355,18 +366,6 @@
 	fi
 }
 
-# Many scripts want to look in arch/$arch/boot for the bootable
-# image. This poses a problem for vmlinux based booting. This 
-# task arranges to have vmlinux appear in the normalized directory
-# location.
-do_kernel_link_vmlinux() {
-	if [ ! -d "${B}/arch/${ARCH}/boot" ]; then
-		mkdir ${B}/arch/${ARCH}/boot
-	fi
-	cd ${B}/arch/${ARCH}/boot
-	ln -sf ../../../vmlinux
-}
-
 OE_TERMINAL_EXPORTS += "KBUILD_OUTPUT"
 KBUILD_OUTPUT = "${B}"
 
diff --git a/yocto-poky/meta/classes/kernel.bbclass b/yocto-poky/meta/classes/kernel.bbclass
index ee3e9a0..6e3e81e 100644
--- a/yocto-poky/meta/classes/kernel.bbclass
+++ b/yocto-poky/meta/classes/kernel.bbclass
@@ -25,7 +25,7 @@
 
     image = d.getVar('INITRAMFS_IMAGE', True)
     if image:
-        d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_rootfs')
+        d.appendVarFlag('do_bundle_initramfs', 'depends', ' ${INITRAMFS_IMAGE}:do_image_complete')
 
     # NOTE: setting INITRAMFS_TASK is for backward compatibility
     #       The preferred method is to set INITRAMFS_IMAGE, because
@@ -217,6 +217,14 @@
 	unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS MACHINE
 	if (grep -q -i -e '^CONFIG_MODULES=y$' ${B}/.config); then
 		oe_runmake -C ${B} ${PARALLEL_MAKE} modules CC="${KERNEL_CC}" LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS}
+
+		# Module.symvers gets updated during the 
+		# building of the kernel modules. We need to
+		# update this in the shared workdir since some
+		# external kernel modules has a dependency on
+		# other kernel modules and will look at this
+		# file to do symbol lookups
+		cp Module.symvers ${STAGING_KERNEL_BUILDDIR}/
 	else
 		bbnote "no modules to compile"
 	fi
@@ -309,18 +317,9 @@
 		cp -fR include/generated/* $kerneldir/include/generated/
 	fi
 
-	# When ARCH is set to i386 or x86_64, we need to map ARCH to the real name of src
-	# dir (x86) under arch/ of kenrel tree, so that we can find correct source to copy.
-
-	if [ "${ARCH}" = "i386" ] || [ "${ARCH}" = "x86_64" ]; then
-		KERNEL_SRCARCH=x86
-	else
-		KERNEL_SRCARCH=${ARCH}
-	fi
-
-	if [ -d arch/${KERNEL_SRCARCH}/include/generated ]; then
-		mkdir -p $kerneldir/arch/${KERNEL_SRCARCH}/include/generated/
-		cp -fR arch/${KERNEL_SRCARCH}/include/generated/* $kerneldir/arch/${KERNEL_SRCARCH}/include/generated/
+	if [ -d arch/${ARCH}/include/generated ]; then
+		mkdir -p $kerneldir/arch/${ARCH}/include/generated/
+		cp -fR arch/${ARCH}/include/generated/* $kerneldir/arch/${ARCH}/include/generated/
 	fi
 }
 
@@ -347,7 +346,8 @@
 	if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
 		cp "${WORKDIR}/defconfig" "${B}/.config"
 	fi
-	eval ${KERNEL_CONFIG_COMMAND}
+
+	${KERNEL_CONFIG_COMMAND}
 }
 
 do_savedefconfig() {
@@ -405,7 +405,19 @@
 PACKAGESPLITFUNCS_prepend = "split_kernel_packages "
 
 python split_kernel_packages () {
-    do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.(bin|fw|cis|dsp)$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
+    do_split_packages(d, root='/lib/firmware', file_regex='^(.*)\.(bin|fw|cis|csp|dsp)$', output_pattern='kernel-firmware-%s', description='Firmware for %s', recursive=True, extra_depends='')
+}
+
+# Many scripts want to look in arch/$arch/boot for the bootable
+# image. This poses a problem for vmlinux based booting. This 
+# task arranges to have vmlinux appear in the normalized directory
+# location.
+do_kernel_link_vmlinux() {
+	if [ ! -d "${B}/arch/${ARCH}/boot" ]; then
+		mkdir ${B}/arch/${ARCH}/boot
+	fi
+	cd ${B}/arch/${ARCH}/boot
+	ln -sf ../../../vmlinux
 }
 
 do_strip() {
@@ -490,6 +502,7 @@
 		ln -sf ${initramfs_base_name}.bin ${initramfs_symlink_name}.bin
 	fi
 }
+do_deploy[cleandirs] = "${DEPLOYDIR}"
 do_deploy[dirs] = "${DEPLOYDIR} ${B}"
 do_deploy[prefuncs] += "package_get_auto_pr"
 
diff --git a/yocto-poky/meta/classes/libc-package.bbclass b/yocto-poky/meta/classes/libc-package.bbclass
index adb4230..467d567 100644
--- a/yocto-poky/meta/classes/libc-package.bbclass
+++ b/yocto-poky/meta/classes/libc-package.bbclass
@@ -332,6 +332,8 @@
         bb.build.exec_func("do_prep_locale_tree", d)
 
     utf8_only = int(d.getVar('LOCALE_UTF8_ONLY', True) or 0)
+    utf8_is_default = int(d.getVar('LOCALE_UTF8_IS_DEFAULT', True) or 0)
+
     encodings = {}
     for locale in to_generate:
         charset = supported[locale]
@@ -344,10 +346,11 @@
         else:
             base = locale
 
-        # Precompiled locales are kept as is, obeying SUPPORTED, while
-        # others are adjusted, ensuring that the non-suffixed locales
-        # are utf-8, while the suffixed are not.
-        if use_bin == "precompiled":
+        # Non-precompiled locales may be renamed so that the default
+        # (non-suffixed) encoding is always UTF-8, i.e., instead of en_US and
+        # en_US.UTF-8, we have en_US and en_US.ISO-8859-1. This implicitly
+        # contradicts SUPPORTED.
+        if use_bin == "precompiled" or not utf8_is_default:
             output_locale(locale, base, charset)
         else:
             if charset == 'UTF-8':
diff --git a/yocto-poky/meta/classes/license.bbclass b/yocto-poky/meta/classes/license.bbclass
index 8ad4614..43944e6 100644
--- a/yocto-poky/meta/classes/license.bbclass
+++ b/yocto-poky/meta/classes/license.bbclass
@@ -21,25 +21,28 @@
     license_image_dir = d.expand('${LICENSE_DIRECTORY}/${IMAGE_NAME}')
     bb.utils.mkdirhier(license_image_dir)
     from oe.rootfs import image_list_installed_packages
+    from oe.utils import format_pkg_list
+
+    pkgs = image_list_installed_packages(d)
+    output = format_pkg_list(pkgs)
     open(os.path.join(license_image_dir, 'package.manifest'),
-        'w+').write(image_list_installed_packages(d))
+        'w+').write(output)
+}
+
+python write_deploy_manifest() {
+    license_deployed_manifest(d)
 }
 
 python license_create_manifest() {
-    import re
     import oe.packagedata
     from oe.rootfs import image_list_installed_packages
 
-    bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE", True) or "").split()
-    bad_licenses = map(lambda l: canonical_license(d, l), bad_licenses)
-    bad_licenses = expand_wildcard_licenses(d, bad_licenses)
-
     build_images_from_feeds = d.getVar('BUILD_IMAGES_FROM_FEEDS', True)
     if build_images_from_feeds == "1":
         return 0
 
     pkg_dic = {}
-    for pkg in image_list_installed_packages(d).splitlines():
+    for pkg in sorted(image_list_installed_packages(d)):
         pkg_info = os.path.join(d.getVar('PKGDATA_DIR', True),
                                 'runtime-reverse', pkg)
         pkg_name = os.path.basename(os.readlink(pkg_info))
@@ -49,8 +52,18 @@
             pkg_lic_name = "LICENSE_" + pkg_name
             pkg_dic[pkg_name]["LICENSE"] = pkg_dic[pkg_name][pkg_lic_name]
 
-    license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
+    rootfs_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
                         d.getVar('IMAGE_NAME', True), 'license.manifest')
+    write_license_files(d, rootfs_license_manifest, pkg_dic)
+}
+
+def write_license_files(d, license_manifest, pkg_dic):
+    import re
+
+    bad_licenses = (d.getVar("INCOMPATIBLE_LICENSE", True) or "").split()
+    bad_licenses = map(lambda l: canonical_license(d, l), bad_licenses)
+    bad_licenses = expand_wildcard_licenses(d, bad_licenses)
+
     with open(license_manifest, "w") as license_file:
         for pkg in sorted(pkg_dic):
             if bad_licenses:
@@ -61,20 +74,28 @@
                 except oe.license.LicenseError as exc:
                     bb.fatal('%s: %s' % (d.getVar('P', True), exc))
             else:
-                pkg_dic[pkg]["LICENSES"] = re.sub('[|&()*]', '', pkg_dic[pkg]["LICENSE"])
+                pkg_dic[pkg]["LICENSES"] = re.sub('[|&()*]', ' ', pkg_dic[pkg]["LICENSE"])
                 pkg_dic[pkg]["LICENSES"] = re.sub('  *', ' ', pkg_dic[pkg]["LICENSES"])
                 pkg_dic[pkg]["LICENSES"] = pkg_dic[pkg]["LICENSES"].split()
 
-            license_file.write("PACKAGE NAME: %s\n" % pkg)
-            license_file.write("PACKAGE VERSION: %s\n" % pkg_dic[pkg]["PV"])
-            license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
-            license_file.write("LICENSE: %s\n\n" % pkg_dic[pkg]["LICENSE"])
+            if not "IMAGE_MANIFEST" in pkg_dic[pkg]:
+                # Rootfs manifest
+                license_file.write("PACKAGE NAME: %s\n" % pkg)
+                license_file.write("PACKAGE VERSION: %s\n" % pkg_dic[pkg]["PV"])
+                license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
+                license_file.write("LICENSE: %s\n\n" % pkg_dic[pkg]["LICENSE"])
 
-            # If the package doesn't contain any file, that is, its size is 0, the license
-            # isn't relevant as far as the final image is concerned. So doing license check
-            # doesn't make much sense, skip it.
-            if pkg_dic[pkg]["PKGSIZE_%s" % pkg] == "0":
-                continue
+                # If the package doesn't contain any file, that is, its size is 0, the license
+                # isn't relevant as far as the final image is concerned. So doing license check
+                # doesn't make much sense, skip it.
+                if pkg_dic[pkg]["PKGSIZE_%s" % pkg] == "0":
+                    continue
+            else:
+                # Image manifest
+                license_file.write("RECIPE NAME: %s\n" % pkg_dic[pkg]["PN"])
+                license_file.write("VERSION: %s\n" % pkg_dic[pkg]["PV"])
+                license_file.write("LICENSE: %s\n" % pkg_dic[pkg]["LICENSE"])
+                license_file.write("FILES: %s\n\n" % pkg_dic[pkg]["FILES"])
 
             for lic in pkg_dic[pkg]["LICENSES"]:
                 lic_file = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
@@ -98,15 +119,16 @@
     if copy_lic_manifest == "1":
         rootfs_license_dir = os.path.join(d.getVar('IMAGE_ROOTFS', 'True'), 
                                 'usr', 'share', 'common-licenses')
-        os.makedirs(rootfs_license_dir)
+        bb.utils.mkdirhier(rootfs_license_dir)
         rootfs_license_manifest = os.path.join(rootfs_license_dir,
-                                                'license.manifest')
-        os.link(license_manifest, rootfs_license_manifest)
+                os.path.split(license_manifest)[1])
+        if not os.path.exists(rootfs_license_manifest):
+            os.link(license_manifest, rootfs_license_manifest)
 
         if copy_lic_dirs == "1":
             for pkg in sorted(pkg_dic):
                 pkg_rootfs_license_dir = os.path.join(rootfs_license_dir, pkg)
-                os.makedirs(pkg_rootfs_license_dir)
+                bb.utils.mkdirhier(pkg_rootfs_license_dir)
                 pkg_license_dir = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
                                             pkg_dic[pkg]["PN"]) 
                 licenses = os.listdir(pkg_license_dir)
@@ -124,14 +146,145 @@
                         if not os.path.exists(rootfs_license):
                             os.link(pkg_license, rootfs_license)
 
-                        os.symlink(os.path.join('..', lic), pkg_rootfs_license)
+                        if not os.path.exists(pkg_rootfs_license):
+                            os.symlink(os.path.join('..', lic), pkg_rootfs_license)
                     else:
-                        if oe.license.license_ok(canonical_license(d,
-                            lic), bad_licenses) == False:
+                        if (oe.license.license_ok(canonical_license(d,
+                                lic), bad_licenses) == False or
+                                os.path.exists(pkg_rootfs_license)):
                             continue
 
                         os.link(pkg_license, pkg_rootfs_license)
-}
+
+
+def license_deployed_manifest(d):
+    """
+    Write the license manifest for the deployed recipes.
+    The deployed recipes usually includes the bootloader
+    and extra files to boot the target.
+    """
+
+    dep_dic = {}
+    man_dic = {}
+    lic_dir = d.getVar("LICENSE_DIRECTORY", True)
+
+    dep_dic = get_deployed_dependencies(d)
+    for dep in dep_dic.keys():
+        man_dic[dep] = {}
+        # It is necessary to mark this will be used for image manifest
+        man_dic[dep]["IMAGE_MANIFEST"] = True
+        man_dic[dep]["PN"] = dep
+        man_dic[dep]["FILES"] = \
+            " ".join(get_deployed_files(dep_dic[dep]))
+        with open(os.path.join(lic_dir, dep, "recipeinfo"), "r") as f:
+            for line in f.readlines():
+                key,val = line.split(": ", 1)
+                man_dic[dep][key] = val[:-1]
+
+    image_license_manifest = os.path.join(d.getVar('LICENSE_DIRECTORY', True),
+            d.getVar('IMAGE_NAME', True), 'image_license.manifest')
+    write_license_files(d, image_license_manifest, man_dic)
+
+def get_deployed_dependencies(d):
+    """
+    Get all the deployed dependencies of an image
+    """
+
+    deploy = {}
+    # Get all the dependencies for the current task (rootfs).
+    # Also get EXTRA_IMAGEDEPENDS because the bootloader is
+    # usually in this var and not listed in rootfs.
+    # At last, get the dependencies from boot classes because
+    # it might contain the bootloader.
+    taskdata = d.getVar("BB_TASKDEPDATA", False)
+    depends = list(set([dep[0] for dep
+                    in taskdata.itervalues()
+                    if not dep[0].endswith("-native")]))
+    extra_depends = d.getVar("EXTRA_IMAGEDEPENDS", True)
+    boot_depends = get_boot_dependencies(d)
+    depends.extend(extra_depends.split())
+    depends.extend(boot_depends)
+    depends = list(set(depends))
+
+    # To verify what was deployed it checks the rootfs dependencies against
+    # the SSTATE_MANIFESTS for "deploy" task.
+    # The manifest file name contains the arch. Because we are not running
+    # in the recipe context it is necessary to check every arch used.
+    sstate_manifest_dir = d.getVar("SSTATE_MANIFESTS", True)
+    sstate_archs = d.getVar("SSTATE_ARCHS", True)
+    extra_archs = d.getVar("PACKAGE_EXTRA_ARCHS", True)
+    archs = list(set(("%s %s" % (sstate_archs, extra_archs)).split()))
+    for dep in depends:
+        # Some recipes have an arch on their own, so we try that first.
+        special_arch = d.getVar("PACKAGE_ARCH_pn-%s" % dep, True)
+        if special_arch:
+            sstate_manifest_file = os.path.join(sstate_manifest_dir,
+                    "manifest-%s-%s.deploy" % (special_arch, dep))
+            if os.path.exists(sstate_manifest_file):
+                deploy[dep] = sstate_manifest_file
+                continue
+
+        for arch in archs:
+            sstate_manifest_file = os.path.join(sstate_manifest_dir,
+                    "manifest-%s-%s.deploy" % (arch, dep))
+            if os.path.exists(sstate_manifest_file):
+                deploy[dep] = sstate_manifest_file
+                break
+
+    return deploy
+get_deployed_dependencies[vardepsexclude] = "BB_TASKDEPDATA"
+
+def get_boot_dependencies(d):
+    """
+    Return the dependencies from boot tasks
+    """
+
+    depends = []
+    boot_depends_string = ""
+    taskdepdata = d.getVar("BB_TASKDEPDATA", False)
+    # Only bootimg and bootdirectdisk include the depends flag
+    boot_tasks = ["do_bootimg", "do_bootdirectdisk",]
+
+    for task in boot_tasks:
+        boot_depends_string = "%s %s" % (boot_depends_string,
+                d.getVarFlag(task, "depends", True) or "")
+    boot_depends = [dep.split(":")[0] for dep
+                in boot_depends_string.split()
+                if not dep.split(":")[0].endswith("-native")]
+    for dep in boot_depends:
+        info_file = os.path.join(d.getVar("LICENSE_DIRECTORY", True),
+                dep, "recipeinfo")
+        # If the recipe and dependency name is the same
+        if os.path.exists(info_file):
+            depends.append(dep)
+        # We need to search for the provider of the dependency
+        else:
+            for taskdep in taskdepdata.itervalues():
+                # The fifth field contains what the task provides
+                if dep in taskdep[4]:
+                    info_file = os.path.join(
+                            d.getVar("LICENSE_DIRECTORY", True),
+                            taskdep[0], "recipeinfo")
+                    if os.path.exists(info_file):
+                        depends.append(taskdep[0])
+                        break
+    return depends
+get_boot_dependencies[vardepsexclude] = "BB_TASKDEPDATA"
+
+def get_deployed_files(man_file):
+    """
+    Get the files deployed from the sstate manifest
+    """
+
+    dep_files = []
+    excluded_files = ["README_-_DO_NOT_DELETE_FILES_IN_THIS_DIRECTORY.txt"]
+    with open(man_file, "r") as manifest:
+        all_files = manifest.read()
+    for f in all_files.splitlines():
+        if ((not (os.path.islink(f) or os.path.isdir(f))) and
+                not os.path.basename(f) in excluded_files):
+            dep_files.append(os.path.basename(f))
+    return dep_files
 
 python do_populate_lic() {
     """
@@ -142,6 +295,10 @@
     # The base directory we wrangle licenses to
     destdir = os.path.join(d.getVar('LICSSTATEDIR', True), d.getVar('PN', True))
     copy_license_files(lic_files_paths, destdir)
+    info = get_recipe_info(d)
+    with open(os.path.join(destdir, "recipeinfo"), "w") as f:
+        for key in sorted(info.keys()):
+            f.write("%s: %s\n" % (key, info[key]))
 }
 
 # it would be better to copy them in do_install_append, but find_license_filesa is python
@@ -155,6 +312,14 @@
         copy_license_files(lic_files_paths, destdir)
         add_package_and_files(d)
 }
+perform_packagecopy[vardeps] += "LICENSE_CREATE_PACKAGE"
+
+def get_recipe_info(d):
+    info = {}
+    info["PV"] = d.getVar("PV", True)
+    info["PR"] = d.getVar("PR", True)
+    info["LICENSE"] = d.getVar("LICENSE", True)
+    return info
 
 def add_package_and_files(d):
     packages = d.getVar('PACKAGES', True)
@@ -185,6 +350,18 @@
                 os.remove(dst)
             if os.access(src, os.W_OK) and (os.stat(src).st_dev == os.stat(destdir).st_dev):
                 os.link(src, dst)
+                try:
+                    os.chown(dst,0,0)
+                except OSError as err:
+                    import errno
+                    if err.errno in (errno.EPERM, errno.EINVAL):
+                        # Suppress "Operation not permitted" error, as
+                        # sometimes this function is not executed under pseudo.
+                        # Also ignore "Invalid argument" errors that happen in
+                        # some (unprivileged) container environments (no root).
+                        pass
+                    else:
+                        raise
             else:
                 shutil.copyfile(src, dst)
         except Exception as e:
@@ -252,10 +429,10 @@
 
         for lic_dir in license_source_dirs:
             if not os.path.isfile(os.path.join(lic_dir, license_type)):
-                if d.getVarFlag('SPDXLICENSEMAP', license_type) != None:
+                if d.getVarFlag('SPDXLICENSEMAP', license_type, True) != None:
                     # Great, there is an SPDXLICENSEMAP. We can copy!
                     bb.debug(1, "We need to use a SPDXLICENSEMAP for %s" % (license_type))
-                    spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type)
+                    spdx_generic = d.getVarFlag('SPDXLICENSEMAP', license_type, True)
                     license_source = lic_dir
                     break
             elif os.path.isfile(os.path.join(lic_dir, license_type)):
@@ -271,14 +448,14 @@
 
             # The user may attempt to use NO_GENERIC_LICENSE for a generic license which doesn't make sense
             # and should not be allowed, warn the user in this case.
-            if d.getVarFlag('NO_GENERIC_LICENSE', license_type):
+            if d.getVarFlag('NO_GENERIC_LICENSE', license_type, True):
                 bb.warn("%s: %s is a generic license, please don't use NO_GENERIC_LICENSE for it." % (pn, license_type))
 
-        elif d.getVarFlag('NO_GENERIC_LICENSE', license_type):
+        elif d.getVarFlag('NO_GENERIC_LICENSE', license_type, True):
             # if NO_GENERIC_LICENSE is set, we copy the license files from the fetched source
             # of the package rather than the license_source_dirs.
             for (basename, path) in lic_files_paths:
-                if d.getVarFlag('NO_GENERIC_LICENSE', license_type) == basename:
+                if d.getVarFlag('NO_GENERIC_LICENSE', license_type, True) == basename:
                     lic_files_paths.append(("generic_" + license_type, path))
                     break
         else:
@@ -346,7 +523,7 @@
     spdxmapkeys = d.getVarFlags('SPDXLICENSEMAP').keys()
     for wld_lic in wildcard_licenses:
         spdxflags = fnmatch.filter(spdxmapkeys, wld_lic)
-        licenses += [d.getVarFlag('SPDXLICENSEMAP', flag) for flag in spdxflags]
+        licenses += [d.getVarFlag('SPDXLICENSEMAP', flag, True) for flag in spdxflags]
 
     spdx_lics = (d.getVar('SRC_DISTRIBUTE_LICENSES', False) or '').split()
     for wld_lic in wildcard_licenses:
@@ -476,6 +653,9 @@
 ROOTFS_POSTPROCESS_COMMAND_prepend = "write_package_manifest; license_create_manifest; "
 do_rootfs[recrdeptask] += "do_populate_lic"
 
+IMAGE_POSTPROCESS_COMMAND_prepend = "write_deploy_manifest; "
+do_image[recrdeptask] += "do_populate_lic"
+
 do_populate_lic_setscene[dirs] = "${LICSSTATEDIR}/${PN}"
 do_populate_lic_setscene[cleandirs] = "${LICSSTATEDIR}"
 python do_populate_lic_setscene () {
diff --git a/yocto-poky/meta/classes/linuxloader.bbclass b/yocto-poky/meta/classes/linuxloader.bbclass
new file mode 100644
index 0000000..5c4dc5c
--- /dev/null
+++ b/yocto-poky/meta/classes/linuxloader.bbclass
@@ -0,0 +1,24 @@
+
+linuxloader () {
+	case ${TARGET_ARCH} in
+		powerpc | mips | mipsel | microblaze )
+			dynamic_loader="${base_libdir}/ld.so.1"
+			;;
+		powerpc64)
+			dynamic_loader="${base_libdir}/ld64.so.1"
+			;;
+		x86_64)
+			dynamic_loader="${base_libdir}/ld-linux-x86-64.so.2"
+			;;
+		i*86 )
+			dynamic_loader="${base_libdir}/ld-linux.so.2"
+			;;
+		arm )
+			dynamic_loader="${base_libdir}/ld-linux.so.3"
+			;;
+		* )
+			dynamic_loader="/unknown_dynamic_linker"
+			;;
+	esac
+	echo $dynamic_loader
+}
diff --git a/yocto-poky/meta/classes/live-vm-common.bbclass b/yocto-poky/meta/classes/live-vm-common.bbclass
new file mode 100644
index 0000000..c751385
--- /dev/null
+++ b/yocto-poky/meta/classes/live-vm-common.bbclass
@@ -0,0 +1,58 @@
+# Some of the vars for vm and live image are conflicted, this function
+# is used for fixing the problem.
+def set_live_vm_vars(d, suffix):
+    vars = ['GRUB_CFG', 'SYSLINUX_CFG', 'ROOT', 'LABELS', 'INITRD']
+    for var in vars:
+        var_with_suffix = var + '_' + suffix
+        if d.getVar(var, True):
+            bb.warn('Found potential conflicted var %s, please use %s rather than %s' % \
+                (var, var_with_suffix, var))
+        elif d.getVar(var_with_suffix, True):
+            d.setVar(var, d.getVar(var_with_suffix, True))
+
+
+EFI = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
+EFI_PROVIDER ?= "grub-efi"
+EFI_CLASS = "${@bb.utils.contains("MACHINE_FEATURES", "efi", "${EFI_PROVIDER}", "", d)}"
+
+# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
+# contain "efi". This way legacy is supported by default if neither is
+# specified, maintaining the original behavior.
+def pcbios(d):
+    pcbios = bb.utils.contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
+    if pcbios == "0":
+        pcbios = bb.utils.contains("MACHINE_FEATURES", "efi", "0", "1", d)
+    return pcbios
+
+PCBIOS = "${@pcbios(d)}"
+PCBIOS_CLASS = "${@['','syslinux'][d.getVar('PCBIOS', True) == '1']}"
+
+inherit ${EFI_CLASS}
+inherit ${PCBIOS_CLASS}
+
+KERNEL_IMAGETYPE ??= "bzImage"
+
+populate_kernel() {
+	dest=$1
+	install -d $dest
+
+	# Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
+	if [ -e ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} ]; then
+		install -m 0644 ${DEPLOY_DIR_IMAGE}/${KERNEL_IMAGETYPE} $dest/vmlinuz
+	fi
+
+	# initrd is made of concatenation of multiple filesystem images
+	if [ -n "${INITRD}" ]; then
+		rm -f $dest/initrd
+		for fs in ${INITRD}
+		do
+			if [ -s "$fs" ]; then
+				cat $fs >> $dest/initrd
+			else
+				bbfatal "$fs is invalid. initrd image creation failed."
+			fi
+		done
+		chmod 0644 $dest/initrd
+	fi
+}
+
diff --git a/yocto-poky/meta/classes/mirrors.bbclass b/yocto-poky/meta/classes/mirrors.bbclass
index b96c071..9e6d483 100644
--- a/yocto-poky/meta/classes/mirrors.bbclass
+++ b/yocto-poky/meta/classes/mirrors.bbclass
@@ -40,7 +40,8 @@
 ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/  ftp://gd.tuwien.ac.at/utils/admin-tools/lsof/ \n \
 ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/  ftp://sunsite.ualberta.ca/pub/Mirror/lsof/ \n \
 ftp://lsof.itap.purdue.edu/pub/tools/unix/lsof/  ftp://the.wiretapped.net/pub/security/host-security/lsof/ \n \
-http://www.apache.org/dist  http://archive.apache.org/dist \n \
+${APACHE_MIRROR}  http://www.us.apache.org/dist \n \
+${APACHE_MIRROR}  http://archive.apache.org/dist \n \
 http://downloads.sourceforge.net/watchdog/ http://fossies.org/linux/misc/ \n \
 ${SAVANNAH_GNU_MIRROR} http://download-mirror.savannah.gnu.org/releases \n \
 ${SAVANNAH_NONGNU_MIRROR} http://download-mirror.savannah.nongnu.org/releases \n \
@@ -53,6 +54,7 @@
 osc://.*/.*     http://downloads.yoctoproject.org/mirror/sources/ \n \
 https?$://.*/.* http://downloads.yoctoproject.org/mirror/sources/ \n \
 ftp://.*/.*     http://downloads.yoctoproject.org/mirror/sources/ \n \
+npm://.*/.*     http://downloads.yoctoproject.org/mirror/sources/ \n \
 cvs://.*/.*     http://sources.openembedded.org/ \n \
 svn://.*/.*     http://sources.openembedded.org/ \n \
 git://.*/.*     http://sources.openembedded.org/ \n \
@@ -62,6 +64,7 @@
 osc://.*/.*     http://sources.openembedded.org/ \n \
 https?$://.*/.* http://sources.openembedded.org/ \n \
 ftp://.*/.*     http://sources.openembedded.org/ \n \
+npm://.*/.*     http://sources.openembedded.org/ \n \
 ${CPAN_MIRROR}  http://cpan.metacpan.org/ \n \
 ${CPAN_MIRROR}  http://search.cpan.org/CPAN/ \n \
 "
diff --git a/yocto-poky/meta/classes/module-base.bbclass b/yocto-poky/meta/classes/module-base.bbclass
index 8be26c4..6fe77c0 100644
--- a/yocto-poky/meta/classes/module-base.bbclass
+++ b/yocto-poky/meta/classes/module-base.bbclass
@@ -1,7 +1,7 @@
 inherit kernel-arch
 
 # This is instead of DEPENDS = "virtual/kernel"
-do_configure[depends] += "virtual/kernel:do_shared_workdir"
+do_configure[depends] += "virtual/kernel:do_compile_kernelmodules"
 
 export OS = "${TARGET_OS}"
 export CROSS_COMPILE = "${TARGET_PREFIX}"
@@ -19,7 +19,7 @@
 PACKAGE_ARCH = "${MACHINE_ARCH}"
 
 # Function to ensure the kernel scripts are created. Expected to
-# be called before do_compile. See module.bbclass for an exmaple.
+# be called before do_compile. See module.bbclass for an example.
 do_make_scripts() {
 	unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS 
 	make CC="${KERNEL_CC}" LD="${KERNEL_LD}" AR="${KERNEL_AR}" \
diff --git a/yocto-poky/meta/classes/module.bbclass b/yocto-poky/meta/classes/module.bbclass
index 0952c0c..01c9309 100644
--- a/yocto-poky/meta/classes/module.bbclass
+++ b/yocto-poky/meta/classes/module.bbclass
@@ -6,6 +6,8 @@
 
 EXTRA_OEMAKE += "KERNEL_SRC=${STAGING_KERNEL_DIR}"
 
+MODULES_INSTALL_TARGET ?= "modules_install"
+
 module_do_compile() {
 	unset CFLAGS CPPFLAGS CXXFLAGS LDFLAGS
 	oe_runmake KERNEL_PATH=${STAGING_KERNEL_DIR}   \
@@ -21,7 +23,7 @@
 	oe_runmake DEPMOD=echo INSTALL_MOD_PATH="${D}" \
 	           CC="${KERNEL_CC}" LD="${KERNEL_LD}" \
 	           O=${STAGING_KERNEL_BUILDDIR} \
-	           modules_install
+	           ${MODULES_INSTALL_TARGET}
 }
 
 EXPORT_FUNCTIONS do_compile do_install
diff --git a/yocto-poky/meta/classes/multilib.bbclass b/yocto-poky/meta/classes/multilib.bbclass
index 052f911..d5a3128 100644
--- a/yocto-poky/meta/classes/multilib.bbclass
+++ b/yocto-poky/meta/classes/multilib.bbclass
@@ -59,7 +59,7 @@
     e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
 
     # Expand the WHITELISTs with multilib prefix
-    for whitelist in ["HOSTTOOLS_WHITELIST_GPL-3.0", "WHITELIST_GPL-3.0", "LGPLv2_WHITELIST_GPL-3.0"]:
+    for whitelist in ["WHITELIST_GPL-3.0", "LGPLv2_WHITELIST_GPL-3.0"]:
         pkgs = e.data.getVar(whitelist, True)
         for pkg in pkgs.split():
             pkgs += " " + variant + "-" + pkg
diff --git a/yocto-poky/meta/classes/native.bbclass b/yocto-poky/meta/classes/native.bbclass
index bcbcd61..f67ef00 100644
--- a/yocto-poky/meta/classes/native.bbclass
+++ b/yocto-poky/meta/classes/native.bbclass
@@ -42,7 +42,7 @@
 
 CPPFLAGS = "${BUILD_CPPFLAGS}"
 CFLAGS = "${BUILD_CFLAGS}"
-CXXFLAGS = "${BUILD_CFLAGS}"
+CXXFLAGS = "${BUILD_CXXFLAGS}"
 LDFLAGS = "${BUILD_LDFLAGS}"
 LDFLAGS_build-darwin = "-L${STAGING_LIBDIR_NATIVE} "
 
@@ -106,6 +106,8 @@
 EXTRA_NATIVE_PKGCONFIG_PATH ?= ""
 PKG_CONFIG_PATH .= "${EXTRA_NATIVE_PKGCONFIG_PATH}"
 PKG_CONFIG_SYSROOT_DIR = ""
+PKG_CONFIG_SYSTEM_LIBRARY_PATH[unexport] = "1"
+PKG_CONFIG_SYSTEM_INCLUDE_PATH[unexport] = "1"
 
 # we dont want libc-uclibc or libc-glibc to kick in for native recipes
 LIBCOVERRIDE = ""
@@ -169,13 +171,7 @@
 addhandler native_virtclass_handler
 native_virtclass_handler[eventmask] = "bb.event.RecipePreFinalise"
 
-deltask package
-deltask packagedata
-deltask package_qa
-deltask package_write_ipk
-deltask package_write_deb
-deltask package_write_rpm
-deltask package_write
+inherit nopackages
 
 do_packagedata[stamp-extra-info] = ""
 do_populate_sysroot[stamp-extra-info] = ""
diff --git a/yocto-poky/meta/classes/nopackages.bbclass b/yocto-poky/meta/classes/nopackages.bbclass
new file mode 100644
index 0000000..0c2761b
--- /dev/null
+++ b/yocto-poky/meta/classes/nopackages.bbclass
@@ -0,0 +1,6 @@
+deltask do_package
+deltask do_package_write_rpm
+deltask do_package_write_ipk
+deltask do_package_write_deb
+deltask do_package_qa
+deltask do_packagedata
diff --git a/yocto-poky/meta/classes/npm.bbclass b/yocto-poky/meta/classes/npm.bbclass
new file mode 100644
index 0000000..9843e87
--- /dev/null
+++ b/yocto-poky/meta/classes/npm.bbclass
@@ -0,0 +1,49 @@
+DEPENDS_prepend = "nodejs-native "
+S = "${WORKDIR}/npmpkg"
+
+NPM_INSTALLDIR = "${D}${libdir}/node_modules/${PN}"
+
+npm_do_compile() {
+	# changing the home directory to the working directory, the .npmrc will
+	# be created in this directory
+	export HOME=${WORKDIR}
+	npm config set dev false
+	npm set cache ${WORKDIR}/npm_cache
+	# clear cache before every build
+	npm cache clear
+	# Install pkg into ${S} without going to the registry
+	npm --arch=${TARGET_ARCH} --production --no-registry install
+}
+
+npm_do_install() {
+	mkdir -p ${NPM_INSTALLDIR}/
+	cp -a ${S}/* ${NPM_INSTALLDIR}/ --no-preserve=ownership
+}
+
+python populate_packages_prepend () {
+    instdir = d.expand('${D}${libdir}/node_modules/${PN}')
+    extrapackages = oe.package.npm_split_package_dirs(instdir)
+    pkgnames = extrapackages.keys()
+    d.prependVar('PACKAGES', '%s ' % ' '.join(pkgnames))
+    for pkgname in pkgnames:
+        pkgrelpath, pdata = extrapackages[pkgname]
+        pkgpath = '${libdir}/node_modules/${PN}/' + pkgrelpath
+        # package names can't have underscores but npm packages sometimes use them
+        oe_pkg_name = pkgname.replace('_', '-')
+        expanded_pkgname = d.expand(oe_pkg_name)
+        d.setVar('FILES_%s' % expanded_pkgname, pkgpath)
+        if pdata:
+            version = pdata.get('version', None)
+            if version:
+                d.setVar('PKGV_%s' % expanded_pkgname, version.encode("utf8"))
+            description = pdata.get('description', None)
+            if description:
+                d.setVar('SUMMARY_%s' % expanded_pkgname, description.replace(u"\u2018", "'").replace(u"\u2019", "'").encode("utf8"))
+    d.appendVar('RDEPENDS_%s' % d.getVar('PN', True), ' %s' % ' '.join(pkgnames).replace('_', '-'))
+}
+
+FILES_${PN} += " \
+    ${libdir}/node_modules/${PN} \
+"
+
+EXPORT_FUNCTIONS do_compile do_install
diff --git a/yocto-poky/meta/classes/own-mirrors.bbclass b/yocto-poky/meta/classes/own-mirrors.bbclass
index e235227..12b4267 100644
--- a/yocto-poky/meta/classes/own-mirrors.bbclass
+++ b/yocto-poky/meta/classes/own-mirrors.bbclass
@@ -9,4 +9,5 @@
 osc://.*/.*     ${SOURCE_MIRROR_URL}
 https?$://.*/.* ${SOURCE_MIRROR_URL}
 ftp://.*/.*     ${SOURCE_MIRROR_URL}
+npm://.*/.*     ${SOURCE_MIRROR_URL}
 }
diff --git a/yocto-poky/meta/classes/package.bbclass b/yocto-poky/meta/classes/package.bbclass
index a86b680..76b9f86 100644
--- a/yocto-poky/meta/classes/package.bbclass
+++ b/yocto-poky/meta/classes/package.bbclass
@@ -121,6 +121,9 @@
     """
 
     dvar = d.getVar('PKGD', True)
+    root = d.expand(root)
+    output_pattern = d.expand(output_pattern)
+    extra_depends = d.expand(extra_depends)
 
     # If the root directory doesn't exist, don't error out later but silently do
     # no splitting.
@@ -298,6 +301,15 @@
     os.chdir(cwd)
     return conf_list
 
+def checkbuildpath(file, d):
+    tmpdir = d.getVar('TMPDIR', True)
+    with open(file) as f:
+        file_content = f.read()
+        if tmpdir in file_content:
+            return True
+
+    return False
+
 def splitdebuginfo(file, debugfile, debugsrcdir, sourcefile, d):
     # Function to split a single file into two components, one is the stripped
     # target system binary, the other contains any debugging information. The
@@ -310,8 +322,6 @@
     dvar = d.getVar('PKGD', True)
     objcopy = d.getVar("OBJCOPY", True)
     debugedit = d.expand("${STAGING_LIBDIR_NATIVE}/rpm/bin/debugedit")
-    workdir = d.getVar("WORKDIR", True)
-    workparentdir = d.getVar("DEBUGSRC_OVERRIDE_PATH", True) or os.path.dirname(os.path.dirname(workdir))
 
     # We ignore kernel modules, we don't generate debug info files.
     if file.find("/lib/modules/") != -1 and file.endswith(".ko"):
@@ -325,7 +335,7 @@
 
     # We need to extract the debug src information here...
     if debugsrcdir:
-        cmd = "'%s' -b '%s' -d '%s' -i -l '%s' '%s'" % (debugedit, workparentdir, debugsrcdir, sourcefile, file)
+        cmd = "'%s' -i -l '%s' '%s'" % (debugedit, sourcefile, file)
         (retval, output) = oe.utils.getstatusoutput(cmd)
         if retval:
             bb.fatal("debugedit failed with exit code %s (cmd was %s)%s" % (retval, cmd, ":\n%s" % output if output else ""))
@@ -364,6 +374,13 @@
         workparentdir = os.path.dirname(os.path.dirname(workdir))
         workbasedir = os.path.basename(os.path.dirname(workdir)) + "/" + os.path.basename(workdir)
 
+        # If build path exists in sourcefile, it means toolchain did not use
+        # -fdebug-prefix-map to compile
+        if checkbuildpath(sourcefile, d):
+            localsrc_prefix = workparentdir + "/"
+        else:
+            localsrc_prefix = "/usr/src/debug/"
+
         nosuchdir = []
         basepath = dvar
         for p in debugsrcdir.split("/"):
@@ -377,9 +394,11 @@
         # We need to ignore files that are not actually ours
         # we do this by only paying attention to items from this package
         processdebugsrc += "fgrep -zw '%s' | "
+        # Remove prefix in the source paths
+        processdebugsrc += "sed 's#%s##g' | "
         processdebugsrc += "(cd '%s' ; cpio -pd0mlL --no-preserve-owner '%s%s' 2>/dev/null)"
 
-        cmd = processdebugsrc % (sourcefile, workbasedir, workparentdir, dvar, debugsrcdir)
+        cmd = processdebugsrc % (sourcefile, workbasedir, localsrc_prefix, workparentdir, dvar, debugsrcdir)
         (retval, output) = oe.utils.getstatusoutput(cmd)
         # Can "fail" if internal headers/transient sources are attempted
         #if retval:
@@ -427,7 +446,7 @@
         if d.getVar(key, False) is None:
             continue
         d.setVarFlag(key, "type", "list")
-        if d.getVarFlag(key, "separator") is None:
+        if d.getVarFlag(key, "separator", True) is None:
             d.setVarFlag(key, "separator", "\\n")
         metadata_fields = [field.strip() for field in oe.data.typed_value(key, d)]
         return "\n".join(metadata_fields).strip()
@@ -708,6 +727,7 @@
     dvar = d.getVar('PKGD', True)
 
     fs_perms_table = {}
+    fs_link_table = {}
 
     # By default all of the standard directories specified in
     # bitbake.conf will get 0755 root:root.
@@ -754,24 +774,32 @@
                     continue
                 entry = fs_perms_entry(d.expand(line))
                 if entry and entry.path:
-                    fs_perms_table[entry.path] = entry
+                    if entry.link:
+                        fs_link_table[entry.path] = entry
+                        if entry.path in fs_perms_table:
+                            fs_perms_table.pop(entry.path)
+                    else:
+                        fs_perms_table[entry.path] = entry
+                        if entry.path in fs_link_table:
+                            fs_link_table.pop(entry.path)
             f.close()
 
     # Debug -- list out in-memory table
     #for dir in fs_perms_table:
     #    bb.note("Fixup Perms: %s: %s" % (dir, str(fs_perms_table[dir])))
+    #for link in fs_link_table:
+    #    bb.note("Fixup Perms: %s: %s" % (link, str(fs_link_table[link])))
 
     # We process links first, so we can go back and fixup directory ownership
     # for any newly created directories
-    for dir in fs_perms_table:
-        if not fs_perms_table[dir].link:
-            continue
-
+    # Process in sorted order so /run gets created before /run/lock, etc.
+    for entry in sorted(fs_link_table.values(), key=lambda x: x.link):
+        link = entry.link
+        dir = entry.path
         origin = dvar + dir
         if not (cpath.exists(origin) and cpath.isdir(origin) and not cpath.islink(origin)):
             continue
 
-        link = fs_perms_table[dir].link
         if link[0] == "/":
             target = dvar + link
             ptarget = link
@@ -791,9 +819,6 @@
         os.symlink(link, origin)
 
     for dir in fs_perms_table:
-        if fs_perms_table[dir].link:
-            continue
-
         origin = dvar + dir
         if not (cpath.exists(origin) and cpath.isdir(origin)):
             continue
@@ -905,7 +930,7 @@
                     continue
                 # Check its an excutable
                 if (s[stat.ST_MODE] & stat.S_IXUSR) or (s[stat.ST_MODE] & stat.S_IXGRP) or (s[stat.ST_MODE] & stat.S_IXOTH) \
-                        or ((file.startswith(libdir) or file.startswith(baselibdir)) and ".so" in f):
+                        or ((file.startswith(libdir) or file.startswith(baselibdir)) and (".so" in f or ".node" in f)):
                     # If it's a symlink, and points to an ELF file, we capture the readlink target
                     if cpath.islink(file):
                         target = os.readlink(file)
@@ -1039,6 +1064,8 @@
 
     bb.utils.mkdirhier(outdir)
     os.chdir(dvar)
+    
+    autodebug = not (d.getVar("NOAUTOPACKAGEDEBUG", True) or False)
 
     # Sanity check PACKAGES for duplicates
     # Sanity should be moved to sanity.bbclass once we have the infrastucture
@@ -1048,6 +1075,8 @@
         if pkg in package_list:
             msg = "%s is listed in PACKAGES multiple times, this leads to packaging errors." % pkg
             package_qa_handle_error("packages-list", msg, d)
+        elif autodebug and pkg.endswith("-dbg"):
+            package_list.insert(0, pkg)
         else:
             package_list.append(pkg)
     d.setVar('PACKAGES', ' '.join(package_list))
@@ -1058,6 +1087,16 @@
     # os.mkdir masks the permissions with umask so we have to unset it first
     oldumask = os.umask(0)
 
+    debug = []
+    for root, dirs, files in cpath.walk(dvar):
+        dir = root[len(dvar):]
+        if not dir:
+            dir = os.sep
+        for f in (files + dirs):
+            path = "." + os.path.join(dir, f)
+            if "/.debug/" in path or path.endswith("/.debug"):
+                debug.append(path)
+
     for pkg in package_list:
         root = os.path.join(pkgdest, pkg)
         bb.utils.mkdirhier(root)
@@ -1071,6 +1110,9 @@
         origfiles = filesvar.split()
         files = files_from_filevars(origfiles)
 
+        if autodebug and pkg.endswith("-dbg"):
+            files.extend(debug)
+
         for file in files:
             if (not cpath.islink(file)) and (not cpath.exists(file)):
                 continue
@@ -1513,7 +1555,7 @@
             rpath = []
             p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-l', file],stdout=sub.PIPE,stderr=sub.PIPE)
             err, out = p.communicate()
-            # If returned succesfully, process stderr for results
+            # If returned successfully, process stderr for results
             if p.returncode == 0:
                 for l in err.split("\n"):
                     l = l.strip()
@@ -1522,7 +1564,7 @@
 
         p = sub.Popen([d.expand("${HOST_PREFIX}otool"), '-L', file],stdout=sub.PIPE,stderr=sub.PIPE)
         err, out = p.communicate()
-        # If returned succesfully, process stderr for results
+        # If returned successfully, process stderr for results
         if p.returncode == 0:
             for l in err.split("\n"):
                 l = l.strip()
@@ -1892,12 +1934,11 @@
         for pkg in pkglibdeps:
             for k in pkglibdeps[pkg]:
                 add_dep(pkglibdeplist, k)
-        # FIXME this should not look at PN once all task recipes inherit from task.bbclass
-        dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS', True) == '1') or (d.getVar('PN', True) or '').startswith('packagegroup-'))
+        dbgdefaultdeps = ((d.getVar('DEPCHAIN_DBGDEFAULTDEPS', True) == '1') or (bb.data.inherits_class('packagegroup', d)))
 
     for suffix in pkgs:
         for pkg in pkgs[suffix]:
-            if d.getVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs'):
+            if d.getVarFlag('RRECOMMENDS_' + pkg, 'nodeprrecs', True):
                 continue
             (base, func) = pkgs[suffix][pkg]
             if suffix == "-dev":
@@ -2035,6 +2076,10 @@
 
     for f in (d.getVar('PACKAGEFUNCS', True) or '').split():
         bb.build.exec_func(f, d)
+
+    qa_sane = d.getVar("QA_SANE", True)
+    if not qa_sane:
+        bb.fatal("Fatal QA errors found, failing task.")
 }
 
 do_package[dirs] = "${SHLIBSWORKDIR} ${PKGDESTWORK} ${D}"
diff --git a/yocto-poky/meta/classes/package_deb.bbclass b/yocto-poky/meta/classes/package_deb.bbclass
index d2fea4f..e1d05a7 100644
--- a/yocto-poky/meta/classes/package_deb.bbclass
+++ b/yocto-poky/meta/classes/package_deb.bbclass
@@ -6,7 +6,8 @@
 
 IMAGE_PKGTYPE ?= "deb"
 
-DPKG_ARCH ?= "${TARGET_ARCH}" 
+DPKG_ARCH ?= "${@debian_arch_map(d.getVar('TARGET_ARCH', True), d.getVar('TUNE_FEATURES', True))}"
+DPKG_ARCH[vardepvalue] = "${DPKG_ARCH}"
 
 PKGWRITEDIRDEB = "${WORKDIR}/deploy-debs"
 
@@ -14,6 +15,28 @@
 
 APT_ARGS = "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS", True) == "1"]}"
 
+def debian_arch_map(arch, tune):
+    tune_features = tune.split()
+    if arch in ["i586", "i686"]:
+        return "i386"
+    if arch == "x86_64":
+        if "mx32" in tune_features:
+            return "x32"
+        return "amd64"
+    if arch.startswith("mips"):
+        endian = ["el", ""]["bigendian" in tune_features]
+        if "n64" in tune_features:
+            return "mips64" + endian
+        if "n32" in tune_features:
+            return "mipsn32" + endian
+        return "mips" + endian
+    if arch == "powerpc":
+        return arch + ["", "spe"]["spe" in tune_features]
+    if arch == "aarch64":
+        return "arm64"
+    if arch == "arm":
+        return arch + ["el", "hf"]["callconvention-hard" in tune_features]
+    return arch
 #
 # install a bunch of packages using apt
 # the following shell variables needs to be set before calling this func:
@@ -21,7 +44,7 @@
 # INSTALL_BASEARCH_DEB - install base architecutre
 # INSTALL_ARCHS_DEB - list of available archs
 # INSTALL_PACKAGES_NORMAL_DEB - packages to be installed
-# INSTALL_PACKAGES_ATTEMPTONLY_DEB - packages attemped to be installed only
+# INSTALL_PACKAGES_ATTEMPTONLY_DEB - packages attempted to be installed only
 # INSTALL_PACKAGES_LINGUAS_DEB - additional packages for uclibc
 # INSTALL_TASK_DEB - task name
 
@@ -139,6 +162,8 @@
             return l2
 
         ctrlfile.write("Package: %s\n" % pkgname)
+        if d.getVar('PACKAGE_ARCH', True) == "all":
+            ctrlfile.write("Multi-Arch: foreign\n")
         # check for required fields
         try:
             for (c, fs) in fields:
@@ -209,12 +234,15 @@
 
         rdepends = bb.utils.explode_dep_versions2(localdata.getVar("RDEPENDS", True) or "")
         debian_cmp_remap(rdepends)
-        for dep in rdepends:
+        for dep in rdepends.keys():
+                if dep == pkg:
+                        del rdepends[dep]
+                        continue
                 if '*' in dep:
                         del rdepends[dep]
         rrecommends = bb.utils.explode_dep_versions2(localdata.getVar("RRECOMMENDS", True) or "")
         debian_cmp_remap(rrecommends)
-        for dep in rrecommends:
+        for dep in rrecommends.keys():
                 if '*' in dep:
                         del rrecommends[dep]
         rsuggests = bb.utils.explode_dep_versions2(localdata.getVar("RSUGGESTS", True) or "")
@@ -288,6 +316,8 @@
         cleanupcontrol(root)
         bb.utils.unlockfile(lf)
 }
+# Indirect references to these vars
+do_package_write_deb[vardeps] += "PKGV PKGR PKGV DESCRIPTION SECTION PRIORITY MAINTAINER DPKG_ARCH PN HOMEPAGE"
 # Otherwise allarch packages may change depending on override configuration
 do_package_deb[vardepsexclude] = "OVERRIDES"
 
@@ -311,15 +341,6 @@
         deps = ' dpkg-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
         d.appendVarFlag('do_package_write_deb', 'depends', deps)
         d.setVarFlag('do_package_write_deb', 'fakeroot', "1")
-
-    # Map TARGET_ARCH to Debian's ideas about architectures
-    darch = d.getVar('DPKG_ARCH', True)
-    if darch in ["x86", "i486", "i586", "i686", "pentium"]:
-         d.setVar('DPKG_ARCH', 'i386')
-    elif darch == "x86_64":
-         d.setVar('DPKG_ARCH', 'amd64')
-    elif darch == "arm":
-         d.setVar('DPKG_ARCH', 'armel')
 }
 
 python do_package_write_deb () {
diff --git a/yocto-poky/meta/classes/package_ipk.bbclass b/yocto-poky/meta/classes/package_ipk.bbclass
index 4dd7a7e..f1ad1d5 100644
--- a/yocto-poky/meta/classes/package_ipk.bbclass
+++ b/yocto-poky/meta/classes/package_ipk.bbclass
@@ -10,7 +10,7 @@
 # Program to be used to build opkg packages
 OPKGBUILDCMD ??= "opkg-build"
 
-OPKG_ARGS = "--force_postinstall --prefer-arch-to-version"
+OPKG_ARGS += "--force_postinstall --prefer-arch-to-version"
 OPKG_ARGS += "${@['', '--no-install-recommends'][d.getVar("NO_RECOMMENDATIONS", True) == "1"]}"
 OPKG_ARGS += "${@['', '--add-exclude ' + ' --add-exclude '.join((d.getVar('PACKAGE_EXCLUDE', True) or "").split())][(d.getVar("PACKAGE_EXCLUDE", True) or "") != ""]}"
 
@@ -246,6 +246,11 @@
             bb.utils.unlockfile(lf)
             raise bb.build.FuncFailed("opkg-build execution failed")
 
+        if d.getVar('IPK_SIGN_PACKAGES', True) == '1':
+            ipkver = "%s-%s" % (d.getVar('PKGV', True), d.getVar('PKGR', True))
+            ipk_to_sign = "%s/%s_%s_%s.ipk" % (pkgoutdir, pkgname, ipkver, d.getVar('PACKAGE_ARCH', True))
+            sign_ipk(d, ipk_to_sign)
+
         cleanupcontrol(root)
         bb.utils.unlockfile(lf)
 
diff --git a/yocto-poky/meta/classes/package_rpm.bbclass b/yocto-poky/meta/classes/package_rpm.bbclass
index 1fa1634..7d523a1 100644
--- a/yocto-poky/meta/classes/package_rpm.bbclass
+++ b/yocto-poky/meta/classes/package_rpm.bbclass
@@ -743,7 +743,7 @@
     if d.getVar('PACKAGES', True) != '':
         deps = ' rpm-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot'
         d.appendVarFlag('do_package_write_rpm', 'depends', deps)
-        d.setVarFlag('do_package_write_rpm', 'fakeroot', 1)
+        d.setVarFlag('do_package_write_rpm', 'fakeroot', '1')
 }
 
 SSTATETASKS += "do_package_write_rpm"
diff --git a/yocto-poky/meta/classes/package_tar.bbclass b/yocto-poky/meta/classes/package_tar.bbclass
index f9e2292..854e645 100644
--- a/yocto-poky/meta/classes/package_tar.bbclass
+++ b/yocto-poky/meta/classes/package_tar.bbclass
@@ -53,7 +53,7 @@
 
 python () {
     if d.getVar('PACKAGES', True) != '':
-        deps = (d.getVarFlag('do_package_write_tar', 'depends') or "").split()
+        deps = (d.getVarFlag('do_package_write_tar', 'depends', True) or "").split()
         deps.append('tar-native:do_populate_sysroot')
         deps.append('virtual/fakeroot-native:do_populate_sysroot')
         d.setVarFlag('do_package_write_tar', 'depends', " ".join(deps))
diff --git a/yocto-poky/meta/classes/packagegroup.bbclass b/yocto-poky/meta/classes/packagegroup.bbclass
index 56cfead..38bdbd3 100644
--- a/yocto-poky/meta/classes/packagegroup.bbclass
+++ b/yocto-poky/meta/classes/packagegroup.bbclass
@@ -22,13 +22,15 @@
 # Also mark all packages as ALLOW_EMPTY
 python () {
     packages = d.getVar('PACKAGES', True).split()
-    genpackages = []
-    for pkg in packages:
-        d.setVar("ALLOW_EMPTY_%s" % pkg, "1")
-        for postfix in ['-dbg', '-dev', '-ptest']:
-            genpackages.append(pkg+postfix)
     if d.getVar('PACKAGEGROUP_DISABLE_COMPLEMENTARY', True) != '1':
-        d.setVar('PACKAGES', ' '.join(packages+genpackages))
+        types = ['', '-dbg', '-dev']
+        if bb.utils.contains('DISTRO_FEATURES', 'ptest', True, False, d):
+            types.append('-ptest')
+        packages = [pkg + suffix for pkg in packages
+                    for suffix in types]
+        d.setVar('PACKAGES', ' '.join(packages))
+    for pkg in packages:
+        d.setVar('ALLOW_EMPTY_%s' % pkg, '1')
 }
 
 # We don't want to look at shared library dependencies for the
diff --git a/yocto-poky/meta/classes/packageinfo.bbclass b/yocto-poky/meta/classes/packageinfo.bbclass
deleted file mode 100644
index 7d60ace..0000000
--- a/yocto-poky/meta/classes/packageinfo.bbclass
+++ /dev/null
@@ -1,22 +0,0 @@
-python packageinfo_handler () {
-    import oe.packagedata
-    pkginfolist = []
-
-    pkgdata_dir = e.data.getVar("PKGDATA_DIR", True) + '/runtime/'
-    if os.path.exists(pkgdata_dir):
-        for root, dirs, files in os.walk(pkgdata_dir):
-            for pkgname in files:
-                if pkgname.endswith('.packaged'):
-                    pkgname = pkgname[:-9]
-                    pkgdatafile = root + pkgname
-                    try:
-                        sdata = oe.packagedata.read_pkgdatafile(pkgdatafile)
-                        sdata['PKG'] = pkgname
-                        pkginfolist.append(sdata)
-                    except Exception as e:
-                        bb.warn("Failed to read pkgdata file %s: %s: %s" % (pkgdatafile, e.__class__, str(e)))
-    bb.event.fire(bb.event.PackageInfo(pkginfolist), e.data)
-}
-
-addhandler packageinfo_handler
-packageinfo_handler[eventmask] = "bb.event.RequestPackageInfo"
diff --git a/yocto-poky/meta/classes/patch.bbclass b/yocto-poky/meta/classes/patch.bbclass
index 1e2aab0..3d22ad8 100644
--- a/yocto-poky/meta/classes/patch.bbclass
+++ b/yocto-poky/meta/classes/patch.bbclass
@@ -136,7 +136,6 @@
 
     s = d.getVar('S', True)
 
-    path = os.getenv('PATH')
     os.putenv('PATH', d.getVar('PATH', True))
 
     # We must use one TMPDIR per process so that the "patch" processes
diff --git a/yocto-poky/meta/classes/populate_sdk_base.bbclass b/yocto-poky/meta/classes/populate_sdk_base.bbclass
index 35e129b..008bb57 100644
--- a/yocto-poky/meta/classes/populate_sdk_base.bbclass
+++ b/yocto-poky/meta/classes/populate_sdk_base.bbclass
@@ -40,7 +40,7 @@
 TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}"
 
 SDK_RDEPENDS = "${TOOLCHAIN_TARGET_TASK} ${TOOLCHAIN_HOST_TASK}"
-SDK_DEPENDS = "virtual/fakeroot-native pbzip2-native"
+SDK_DEPENDS = "virtual/fakeroot-native pixz-native"
 
 # We want the MULTIARCH_TARGET_SYS to point to the TUNE_PKGARCH, not PACKAGE_ARCH as it
 # could be set to the MACHINE_ARCH
@@ -62,25 +62,30 @@
 SDK_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.host.manifest"
 python write_target_sdk_manifest () {
     from oe.sdk import sdk_list_installed_packages
+    from oe.utils import format_pkg_list
     sdkmanifestdir = os.path.dirname(d.getVar("SDK_TARGET_MANIFEST", True))
+    pkgs = sdk_list_installed_packages(d, True)
     if not os.path.exists(sdkmanifestdir):
         bb.utils.mkdirhier(sdkmanifestdir)
     with open(d.getVar('SDK_TARGET_MANIFEST', True), 'w') as output:
-        output.write(sdk_list_installed_packages(d, True, 'ver'))
+        output.write(format_pkg_list(pkgs, 'ver'))
 }
 
 python write_host_sdk_manifest () {
     from oe.sdk import sdk_list_installed_packages
+    from oe.utils import format_pkg_list
     sdkmanifestdir = os.path.dirname(d.getVar("SDK_HOST_MANIFEST", True))
+    pkgs = sdk_list_installed_packages(d, False)
     if not os.path.exists(sdkmanifestdir):
         bb.utils.mkdirhier(sdkmanifestdir)
     with open(d.getVar('SDK_HOST_MANIFEST', True), 'w') as output:
-        output.write(sdk_list_installed_packages(d, False, 'ver'))
+        output.write(format_pkg_list(pkgs, 'ver'))
 }
 
 POPULATE_SDK_POST_TARGET_COMMAND_append = " write_target_sdk_manifest ; "
 POPULATE_SDK_POST_HOST_COMMAND_append = " write_host_sdk_manifest; "
-SDK_POSTPROCESS_COMMAND = " create_sdk_files; tar_sdk; ${SDK_PACKAGING_FUNC}; "
+SDK_PACKAGING_COMMAND = "${@'${SDK_PACKAGING_FUNC};' if '${SDK_PACKAGING_FUNC}' else ''}"
+SDK_POSTPROCESS_COMMAND = " create_sdk_files; check_sdk_sysroots; tar_sdk; ${SDK_PACKAGING_COMMAND} "
 
 # Some archs override this, we need the nativesdk version
 # turns out this is hard to get from the datastore due to TRANSLATED_TARGET_ARCH
@@ -120,13 +125,64 @@
 	sed -i -e "s:##DEFAULT_INSTALL_DIR##:$escaped_sdkpath:" ${SDK_OUTPUT}/${SDKPATH}/relocate_sdk.py
 }
 
+python check_sdk_sysroots() {
+    # Fails build if there are broken or dangling symlinks in SDK sysroots
+
+    if d.getVar('CHECK_SDK_SYSROOTS', True) != '1':
+        # disabled, bail out
+        return
+
+    def norm_path(path):
+        return os.path.abspath(path)
+
+    # Get scan root
+    SCAN_ROOT = norm_path("${SDK_OUTPUT}/${SDKPATH}/sysroots/")
+
+    bb.note('Checking SDK sysroots at ' + SCAN_ROOT)
+
+    def check_symlink(linkPath):
+        if not os.path.islink(linkPath):
+            return
+
+        linkDirPath = os.path.dirname(linkPath)
+
+        targetPath = os.readlink(linkPath)
+        if not os.path.isabs(targetPath):
+            targetPath = os.path.join(linkDirPath, targetPath)
+        targetPath = norm_path(targetPath)
+
+        if SCAN_ROOT != os.path.commonprefix( [SCAN_ROOT, targetPath] ):
+            bb.error("Escaping symlink {0!s} --> {1!s}".format(linkPath, targetPath))
+            return
+
+        if not os.path.exists(targetPath):
+            bb.error("Broken symlink {0!s} --> {1!s}".format(linkPath, targetPath))
+            return
+
+        if os.path.isdir(targetPath):
+            dir_walk(targetPath)
+
+    def walk_error_handler(e):
+        bb.error(str(e))
+
+    def dir_walk(rootDir):
+        for dirPath,subDirEntries,fileEntries in os.walk(rootDir, followlinks=False, onerror=walk_error_handler):
+            entries = subDirEntries + fileEntries
+            for e in entries:
+                ePath = os.path.join(dirPath, e)
+                check_symlink(ePath)
+
+    # start
+    dir_walk(SCAN_ROOT)
+}
+
 SDKTAROPTS = "--owner=root --group=root"
 
 fakeroot tar_sdk() {
 	# Package it up
 	mkdir -p ${SDK_DEPLOY}
 	cd ${SDK_OUTPUT}/${SDKPATH}
-	tar ${SDKTAROPTS} -cf - . | pbzip2 > ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.bz2
+	tar ${SDKTAROPTS} -cf - . | pixz > ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.xz
 }
 
 fakeroot create_shar() {
@@ -165,10 +221,10 @@
 	chmod +x ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
 
 	# append the SDK tarball
-	cat ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.bz2 >> ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
+	cat ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.xz >> ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh
 
 	# delete the old tarball, we don't need it anymore
-	rm ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.bz2
+	rm ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.tar.xz
 }
 
 populate_sdk_log_check() {
@@ -188,6 +244,22 @@
 	done
 }
 
+def sdk_command_variables(d):
+    return ['OPKG_PREPROCESS_COMMANDS','OPKG_POSTPROCESS_COMMANDS','POPULATE_SDK_POST_HOST_COMMAND','POPULATE_SDK_POST_TARGET_COMMAND','SDK_POSTPROCESS_COMMAND','RPM_PREPROCESS_COMMANDS',
+            'RPM_POSTPROCESS_COMMANDS']
+
+def sdk_variables(d):
+    variables = ['BUILD_IMAGES_FROM_FEEDS','SDK_OS','SDK_OUTPUT','SDKPATHNATIVE','SDKTARGETSYSROOT','SDK_DIR','SDK_VENDOR','SDKIMAGE_INSTALL_COMPLEMENTARY','SDK_PACKAGE_ARCHS','SDK_OUTPUT',
+                 'SDKTARGETSYSROOT','MULTILIB_VARIANTS','MULTILIBS','ALL_MULTILIB_PACKAGE_ARCHS','MULTILIB_GLOBAL_VARIANTS','BAD_RECOMMENDATIONS','NO_RECOMMENDATIONS','PACKAGE_ARCHS',
+                 'PACKAGE_CLASSES','TARGET_VENDOR','TARGET_VENDOR','TARGET_ARCH','TARGET_OS','BBEXTENDVARIANT','FEED_DEPLOYDIR_BASE_URI']
+    variables.extend(sdk_command_variables(d))
+    return " ".join(variables)
+
+do_populate_sdk[vardeps] += "${@sdk_variables(d)}"
+
+do_populate_sdk[file-checksums] += "${COREBASE}/meta/files/toolchain-shar-relocate.sh:True \
+                                    ${COREBASE}/meta/files/toolchain-shar-extract.sh:True"
+
 do_populate_sdk[dirs] = "${PKGDATA_DIR} ${TOPDIR}"
 do_populate_sdk[depends] += "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_DEPENDS', True).split()])}  ${@d.getVarFlag('do_rootfs', 'depends', False)}"
 do_populate_sdk[rdepends] = "${@' '.join([x + ':do_populate_sysroot' for x in d.getVar('SDK_RDEPENDS', True).split()])}"
diff --git a/yocto-poky/meta/classes/populate_sdk_ext.bbclass b/yocto-poky/meta/classes/populate_sdk_ext.bbclass
index b9808bb..87518d1 100644
--- a/yocto-poky/meta/classes/populate_sdk_ext.bbclass
+++ b/yocto-poky/meta/classes/populate_sdk_ext.bbclass
@@ -15,12 +15,44 @@
 
 SDK_RELOCATE_AFTER_INSTALL_task-populate-sdk-ext = "0"
 
+SDK_EXT = ""
+SDK_EXT_task-populate-sdk-ext = "-ext"
+
+# Options are full or minimal
+SDK_EXT_TYPE ?= "full"
+
+SDK_RECRDEP_TASKS ?= ""
+
 SDK_LOCAL_CONF_WHITELIST ?= ""
-SDK_LOCAL_CONF_BLACKLIST ?= "CONF_VERSION BB_NUMBER_THREADS PARALLEL_MAKE PRSERV_HOST"
+SDK_LOCAL_CONF_BLACKLIST ?= "CONF_VERSION \
+                             BB_NUMBER_THREADS \
+                             PARALLEL_MAKE \
+                             PRSERV_HOST \
+                             SSTATE_MIRRORS \
+                            "
 SDK_INHERIT_BLACKLIST ?= "buildhistory icecc"
 SDK_UPDATE_URL ?= ""
 
 SDK_TARGETS ?= "${PN}"
+
+def get_sdk_install_targets(d):
+    sdk_install_targets = ''
+    if d.getVar('SDK_EXT_TYPE', True) != 'minimal':
+        sdk_install_targets = d.getVar('SDK_TARGETS', True)
+
+        depd = d.getVar('BB_TASKDEPDATA', False)
+        for v in depd.itervalues():
+            if v[1] == 'do_image_complete':
+                if v[0] not in sdk_install_targets:
+                    sdk_install_targets += ' {}'.format(v[0])
+
+    if d.getVar('SDK_INCLUDE_PKGDATA', True) == '1':
+        sdk_install_targets += ' meta-world-pkgdata:do_allpackagedata'
+
+    return sdk_install_targets
+
+get_sdk_install_targets[vardepsexclude] = "BB_TASKDEPDATA"
+
 OE_INIT_ENV_SCRIPT ?= "oe-init-build-env"
 
 # The files from COREBASE that you want preserved in the COREBASE copied
@@ -36,12 +68,18 @@
 
 SDK_DIR_task-populate-sdk-ext = "${WORKDIR}/sdk-ext"
 B_task-populate-sdk-ext = "${SDK_DIR}"
-TOOLCHAIN_OUTPUTNAME_task-populate-sdk-ext = "${SDK_NAME}-toolchain-ext-${SDK_VERSION}"
+TOOLCHAINEXT_OUTPUTNAME = "${SDK_NAME}-toolchain-ext-${SDK_VERSION}"
+TOOLCHAIN_OUTPUTNAME_task-populate-sdk-ext = "${TOOLCHAINEXT_OUTPUTNAME}"
+
+SDK_EXT_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.target.manifest"
+SDK_EXT_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.host.manifest"
 
 SDK_TITLE_task-populate-sdk-ext = "${@d.getVar('DISTRO_NAME', True) or d.getVar('DISTRO', True)} Extensible SDK"
 
 python copy_buildsystem () {
     import re
+    import shutil
+    import glob
     import oe.copy_buildsystem
 
     oe_init_env_script = d.getVar('OE_INIT_ENV_SCRIPT', True)
@@ -53,7 +91,14 @@
     # Copy in all metadata layers + bitbake (as repositories)
     buildsystem = oe.copy_buildsystem.BuildSystem('extensible SDK', d)
     baseoutpath = d.getVar('SDK_OUTPUT', True) + '/' + d.getVar('SDKPATH', True)
-    layers_copied = buildsystem.copy_bitbake_and_layers(baseoutpath + '/layers')
+
+    # Determine if we're building a derivative extensible SDK (from devtool build-sdk)
+    derivative = (d.getVar('SDK_DERIVATIVE', True) or '') == '1'
+    if derivative:
+        workspace_name = 'orig-workspace'
+    else:
+        workspace_name = None
+    layers_copied = buildsystem.copy_bitbake_and_layers(baseoutpath + '/layers', workspace_name)
 
     sdkbblayers = []
     corebase = os.path.basename(d.getVar('COREBASE', True))
@@ -95,6 +140,10 @@
     with open(os.path.join(baseoutpath, 'conf', 'devtool.conf'), 'w') as f:
         config.write(f)
 
+    unlockedsigs =  os.path.join(baseoutpath, 'conf', 'unlocked-sigs.inc')
+    with open(unlockedsigs, 'w') as f:
+        pass
+
     # Create a layer for new recipes / appends
     bbpath = d.getVar('BBPATH', True)
     bb.process.run(['devtool', '--bbpath', bbpath, '--basepath', baseoutpath, 'create-workspace', '--create-only', os.path.join(baseoutpath, 'workspace')])
@@ -107,7 +156,12 @@
         f.write('# this configuration provides, it is strongly suggested that you set\n')
         f.write('# up a proper instance of the full build system and use that instead.\n\n')
 
-        f.write('LCONF_VERSION = "%s"\n\n' % d.getVar('LCONF_VERSION', False))
+        # LCONF_VERSION may not be set, for example when using meta-poky
+        # so don't error if it isn't found
+        lconf_version = d.getVar('LCONF_VERSION', False)
+        if lconf_version is not None:
+            f.write('LCONF_VERSION = "%s"\n\n' % lconf_version)
+
         f.write('BBPATH = "$' + '{TOPDIR}"\n')
         f.write('SDKBASEMETAPATH = "$' + '{TOPDIR}"\n')
         f.write('BBLAYERS := " \\\n')
@@ -116,74 +170,180 @@
         f.write('    $' + '{SDKBASEMETAPATH}/workspace \\\n')
         f.write('    "\n')
 
+    env_whitelist = (d.getVar('BB_ENV_EXTRAWHITE', True) or '').split()
+    env_whitelist_values = {}
+
     # Create local.conf
-    local_conf_whitelist = (d.getVar('SDK_LOCAL_CONF_WHITELIST', True) or '').split()
-    local_conf_blacklist = (d.getVar('SDK_LOCAL_CONF_BLACKLIST', True) or '').split()
-    def handle_var(varname, origvalue, op, newlines):
-        if varname in local_conf_blacklist or (origvalue.strip().startswith('/') and not varname in local_conf_whitelist):
-            newlines.append('# Removed original setting of %s\n' % varname)
-            return None, op, 0, True
-        else:
-            return origvalue, op, 0, True
-    varlist = ['[^#=+ ]*']
     builddir = d.getVar('TOPDIR', True)
-    with open(builddir + '/conf/local.conf', 'r') as f:
-        oldlines = f.readlines()
-    (updated, newlines) = bb.utils.edit_metadata(oldlines, varlist, handle_var)
+    if derivative:
+        shutil.copyfile(builddir + '/conf/local.conf', baseoutpath + '/conf/local.conf')
+    else:
+        local_conf_whitelist = (d.getVar('SDK_LOCAL_CONF_WHITELIST', True) or '').split()
+        local_conf_blacklist = (d.getVar('SDK_LOCAL_CONF_BLACKLIST', True) or '').split()
+        def handle_var(varname, origvalue, op, newlines):
+            if varname in local_conf_blacklist or (origvalue.strip().startswith('/') and not varname in local_conf_whitelist):
+                newlines.append('# Removed original setting of %s\n' % varname)
+                return None, op, 0, True
+            else:
+                if varname in env_whitelist:
+                    env_whitelist_values[varname] = origvalue
+                return origvalue, op, 0, True
+        varlist = ['[^#=+ ]*']
+        with open(builddir + '/conf/local.conf', 'r') as f:
+            oldlines = f.readlines()
+        (updated, newlines) = bb.utils.edit_metadata(oldlines, varlist, handle_var)
 
-    with open(baseoutpath + '/conf/local.conf', 'w') as f:
-        f.write('# WARNING: this configuration has been automatically generated and in\n')
-        f.write('# most cases should not be edited. If you need more flexibility than\n')
-        f.write('# this configuration provides, it is strongly suggested that you set\n')
-        f.write('# up a proper instance of the full build system and use that instead.\n\n')
-        for line in newlines:
-            if line.strip() and not line.startswith('#'):
+        with open(baseoutpath + '/conf/local.conf', 'w') as f:
+            f.write('# WARNING: this configuration has been automatically generated and in\n')
+            f.write('# most cases should not be edited. If you need more flexibility than\n')
+            f.write('# this configuration provides, it is strongly suggested that you set\n')
+            f.write('# up a proper instance of the full build system and use that instead.\n\n')
+            for line in newlines:
+                if line.strip() and not line.startswith('#'):
+                    f.write(line)
+            # Write a newline just in case there's none at the end of the original
+            f.write('\n')
+
+            f.write('INHERIT += "%s"\n\n' % 'uninative')
+            f.write('CONF_VERSION = "%s"\n\n' % d.getVar('CONF_VERSION', False))
+
+            # Some classes are not suitable for SDK, remove them from INHERIT
+            f.write('INHERIT_remove = "%s"\n' % d.getVar('SDK_INHERIT_BLACKLIST', False))
+
+            # Bypass the default connectivity check if any
+            f.write('CONNECTIVITY_CHECK_URIS = ""\n\n')
+
+            # This warning will come out if reverse dependencies for a task
+            # don't have sstate as well as the task itself. We already know
+            # this will be the case for the extensible sdk, so turn off the
+            # warning.
+            f.write('SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK = "none"\n\n')
+
+            # Error if the sigs in the locked-signature file don't match
+            # the sig computed from the metadata.
+            f.write('SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "error"\n\n')
+
+            # Hide the config information from bitbake output (since it's fixed within the SDK)
+            f.write('BUILDCFG_HEADER = ""\n')
+
+            # Allow additional config through sdk-extra.conf
+            fn = bb.cookerdata.findConfigFile('sdk-extra.conf', d)
+            if fn:
+                with open(fn, 'r') as xf:
+                    for line in xf:
+                        f.write(line)
+
+            # If you define a sdk_extraconf() function then it can contain additional config
+            # (Though this is awkward; sdk-extra.conf should probably be used instead)
+            extraconf = (d.getVar('sdk_extraconf', True) or '').strip()
+            if extraconf:
+                # Strip off any leading / trailing spaces
+                for line in extraconf.splitlines():
+                    f.write(line.strip() + '\n')
+
+            f.write('require conf/locked-sigs.inc\n')
+            f.write('require conf/unlocked-sigs.inc\n')
+
+    if os.path.exists(builddir + '/conf/auto.conf'):
+        if derivative:
+            shutil.copyfile(builddir + '/conf/auto.conf', baseoutpath + '/conf/auto.conf')
+        else:
+            with open(builddir + '/conf/auto.conf', 'r') as f:
+                oldlines = f.readlines()
+            (updated, newlines) = bb.utils.edit_metadata(oldlines, varlist, handle_var)
+            with open(baseoutpath + '/conf/auto.conf', 'w') as f:
+                f.write('# WARNING: this configuration has been automatically generated and in\n')
+                f.write('# most cases should not be edited. If you need more flexibility than\n')
+                f.write('# this configuration provides, it is strongly suggested that you set\n')
+                f.write('# up a proper instance of the full build system and use that instead.\n\n')
+                for line in newlines:
+                    if line.strip() and not line.startswith('#'):
+                        f.write(line)
+
+    # Ensure any variables set from the external environment (by way of
+    # BB_ENV_EXTRAWHITE) are set in the SDK's configuration
+    extralines = []
+    for name, value in env_whitelist_values.iteritems():
+        actualvalue = d.getVar(name, True) or ''
+        if value != actualvalue:
+            extralines.append('%s = "%s"\n' % (name, actualvalue))
+    if extralines:
+        with open(baseoutpath + '/conf/local.conf', 'a') as f:
+            f.write('\n')
+            f.write('# Extra settings from environment:\n')
+            for line in extralines:
                 f.write(line)
-
-        f.write('INHERIT += "%s"\n\n' % 'uninative')
-        f.write('CONF_VERSION = "%s"\n\n' % d.getVar('CONF_VERSION', False))
-
-        # Some classes are not suitable for SDK, remove them from INHERIT
-        f.write('INHERIT_remove = "%s"\n' % d.getVar('SDK_INHERIT_BLACKLIST'))
-
-        # Bypass the default connectivity check if any
-        f.write('CONNECTIVITY_CHECK_URIS = ""\n\n')
-
-        # Another hack, but we want the native part of sstate to be kept the same
-        # regardless of the host distro
-        fixedlsbstring = 'SDK-Fixed'
-        f.write('NATIVELSBSTRING_forcevariable = "%s"\n\n' % fixedlsbstring)
-
-        # Ensure locked sstate cache objects are re-used without error
-        f.write('SIGGEN_LOCKEDSIGS_CHECK_LEVEL = "none"\n\n')
-
-        # If you define a sdk_extraconf() function then it can contain additional config
-        extraconf = (d.getVar('sdk_extraconf', True) or '').strip()
-        if extraconf:
-            # Strip off any leading / trailing spaces
-            for line in extraconf.splitlines():
-                f.write(line.strip() + '\n')
-
-        f.write('require conf/locked-sigs.inc\n')
-
-    sigfile = d.getVar('WORKDIR', True) + '/locked-sigs.inc'
-    oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
+            f.write('\n')
 
     # Filter the locked signatures file to just the sstate tasks we are interested in
-    allowed_tasks = ['do_populate_lic', 'do_populate_sysroot', 'do_packagedata', 'do_package_write_ipk', 'do_package_write_rpm', 'do_package_write_deb', 'do_package_qa', 'do_deploy']
     excluded_targets = d.getVar('SDK_TARGETS', True)
+    sigfile = d.getVar('WORKDIR', True) + '/locked-sigs.inc'
     lockedsigs_pruned = baseoutpath + '/conf/locked-sigs.inc'
-    oe.copy_buildsystem.prune_lockedsigs(allowed_tasks,
-                                         excluded_targets,
+    oe.copy_buildsystem.prune_lockedsigs([],
+                                         excluded_targets.split(),
                                          sigfile,
                                          lockedsigs_pruned)
 
     sstate_out = baseoutpath + '/sstate-cache'
     bb.utils.remove(sstate_out, True)
-    oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_pruned,
-                                                   d.getVar('SSTATE_DIR', True),
-                                                   sstate_out, d,
-                                                   fixedlsbstring)
+    # uninative.bbclass sets NATIVELSBSTRING to 'universal'
+    fixedlsbstring = 'universal'
+
+    # Add packagedata if enabled
+    if d.getVar('SDK_INCLUDE_PKGDATA', True) == '1':
+        lockedsigs_base = d.getVar('WORKDIR', True) + '/locked-sigs-base.inc'
+        lockedsigs_copy = d.getVar('WORKDIR', True) + '/locked-sigs-copy.inc'
+        shutil.move(lockedsigs_pruned, lockedsigs_base)
+        oe.copy_buildsystem.merge_lockedsigs(['do_packagedata'],
+                                             lockedsigs_base,
+                                             d.getVar('STAGING_DIR_HOST', True) + '/world-pkgdata/locked-sigs-pkgdata.inc',
+                                             lockedsigs_pruned,
+                                             lockedsigs_copy)
+
+    if d.getVar('SDK_EXT_TYPE', True) == 'minimal':
+        if derivative:
+            # Assume the user is not going to set up an additional sstate
+            # mirror, thus we need to copy the additional artifacts (from
+            # workspace recipes) into the derivative SDK
+            lockedsigs_orig = d.getVar('TOPDIR', True) + '/conf/locked-sigs.inc'
+            if os.path.exists(lockedsigs_orig):
+                lockedsigs_extra = d.getVar('WORKDIR', True) + '/locked-sigs-extra.inc'
+                oe.copy_buildsystem.merge_lockedsigs(None,
+                                                     lockedsigs_orig,
+                                                     lockedsigs_pruned,
+                                                     None,
+                                                     lockedsigs_extra)
+                oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_extra,
+                                                               d.getVar('SSTATE_DIR', True),
+                                                               sstate_out, d,
+                                                               fixedlsbstring)
+    else:
+        oe.copy_buildsystem.create_locked_sstate_cache(lockedsigs_pruned,
+                                                       d.getVar('SSTATE_DIR', True),
+                                                       sstate_out, d,
+                                                       fixedlsbstring)
+
+    # We don't need sstate do_package files
+    for root, dirs, files in os.walk(sstate_out):
+        for name in files:
+            if name.endswith("_package.tgz"):
+                f = os.path.join(root, name)
+                os.remove(f)
+
+    # Write manifest file
+    # Note: at the moment we cannot include the env setup script here to keep
+    # it updated, since it gets modified during SDK installation (see
+    # sdk_ext_postinst() below) thus the checksum we take here would always
+    # be different.
+    manifest_file_list = ['conf/*']
+    manifest_file = os.path.join(baseoutpath, 'conf', 'sdk-conf-manifest')
+    with open(manifest_file, 'w') as f:
+        for item in manifest_file_list:
+            for fn in glob.glob(os.path.join(baseoutpath, item)):
+                if fn == manifest_file:
+                    continue
+                chksum = bb.utils.sha256_file(fn)
+                f.write('%s\t%s\n' % (chksum, os.path.relpath(fn, baseoutpath)))
 }
 
 def extsdk_get_buildtools_filename(d):
@@ -195,14 +355,21 @@
 	lnr ${SDK_OUTPUT}/${SDKPATH}/${scriptrelpath}/recipetool ${SDK_OUTPUT}/${SDKPATHNATIVE}${bindir_nativesdk}/recipetool
 	touch ${SDK_OUTPUT}/${SDKPATH}/.devtoolbase
 
+	localconf=${SDK_OUTPUT}/${SDKPATH}/conf/local.conf
+
 	# find latest buildtools-tarball and install it
 	buildtools_path=`ls -t1 ${SDK_DEPLOY}/${@extsdk_get_buildtools_filename(d)} | head -n1`
 	install $buildtools_path ${SDK_OUTPUT}/${SDKPATH}
 
-	install ${SDK_DEPLOY}/${BUILD_ARCH}-nativesdk-libc.tar.bz2 ${SDK_OUTPUT}/${SDKPATH}
+	# For now this is where uninative.bbclass expects the tarball
+	chksum=`sha256sum ${SDK_DEPLOY}/${BUILD_ARCH}-nativesdk-libc.tar.bz2 | cut -f 1 -d ' '`
+	install -d ${SDK_OUTPUT}/${SDKPATH}/downloads/uninative/$chksum/
+	install ${SDK_DEPLOY}/${BUILD_ARCH}-nativesdk-libc.tar.bz2 ${SDK_OUTPUT}/${SDKPATH}/downloads/uninative/$chksum/
+	echo "UNINATIVE_CHECKSUM[${BUILD_ARCH}] = '$chksum'" >> ${SDK_OUTPUT}/${SDKPATH}/conf/local.conf
 
-	install -m 0755 ${COREBASE}/meta/files/ext-sdk-prepare.sh ${SDK_OUTPUT}/${SDKPATH}
+	install -m 0644 ${COREBASE}/meta/files/ext-sdk-prepare.py ${SDK_OUTPUT}/${SDKPATH}
 }
+do_populate_sdk_ext[file-checksums] += "${COREBASE}/meta/files/ext-sdk-prepare.py:True"
 
 # Since bitbake won't run as root it doesn't make sense to try and install
 # the extensible sdk as root.
@@ -212,6 +379,12 @@
 		exit 1
 	fi
 	SDK_EXTENSIBLE="1"
+	if [ "$publish" = "1" ] ; then
+		EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=ext-sdk-prepare.py"
+		if [ "${SDK_EXT_TYPE}" = "minimal" ] ; then
+			EXTRA_TAR_OPTIONS="$EXTRA_TAR_OPTIONS --exclude=sstate-cache"
+		fi
+	fi
 }
 SDK_PRE_INSTALL_COMMAND_task-populate-sdk-ext = "${sdk_ext_preinst}"
 
@@ -221,6 +394,9 @@
 	cd $target_sdk_dir
 	printf "buildtools\ny" | ./*buildtools-nativesdk-standalone* > /dev/null || ( printf 'ERROR: buildtools installation failed\n' ; exit 1 )
 
+	# Delete the buildtools tar file since it won't be used again
+	rm ./*buildtools-nativesdk-standalone*.sh -f
+
 	# Make sure when the user sets up the environment, they also get
 	# the buildtools-tarball tools in their path.
 	env_setup_script="$target_sdk_dir/environment-setup-${REAL_MULTIMACH_TARGET_SYS}"
@@ -231,25 +407,22 @@
 
 	# A bit of another hack, but we need this in the path only for devtool
 	# so put it at the end of $PATH.
-	echo "export PATH=\$PATH:$target_sdk_dir/sysroots/${SDK_SYS}/${bindir_nativesdk}" >> $env_setup_script
+	echo "export PATH=$target_sdk_dir/sysroots/${SDK_SYS}${bindir_nativesdk}:\$PATH" >> $env_setup_script
 
 	echo "printf 'SDK environment now set up; additionally you may now run devtool to perform development tasks.\nRun devtool --help for further details.\n'" >> $env_setup_script
 
 	# Warn if trying to use external bitbake and the ext SDK together
 	echo "(which bitbake > /dev/null 2>&1 && echo 'WARNING: attempting to use the extensible SDK in an environment set up to run bitbake - this may lead to unexpected results. Please source this script in a new shell session instead.') || true" >> $env_setup_script
 
-	# For now this is where uninative.bbclass expects the tarball
-	mv *-nativesdk-libc.tar.* $target_sdk_dir/`dirname ${oe_init_build_env_path}`
-
 	if [ "$prepare_buildsystem" != "no" ]; then
 		printf "Preparing build system...\n"
 		# dash which is /bin/sh on Ubuntu will not preserve the
 		# current working directory when first ran, nor will it set $1 when
 		# sourcing a script. That is why this has to look so ugly.
 		LOGFILE="$target_sdk_dir/preparing_build_system.log"
-		sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && $target_sdk_dir/ext-sdk-prepare.sh $target_sdk_dir '${SDK_TARGETS}' >> $LOGFILE 2>&1" || { echo "ERROR: SDK preparation failed: see $LOGFILE"; echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
+		sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && python $target_sdk_dir/ext-sdk-prepare.py '${SDK_INSTALL_TARGETS}' >> $LOGFILE 2>&1" || { echo "ERROR: SDK preparation failed: see $LOGFILE"; echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; }
+		rm $target_sdk_dir/ext-sdk-prepare.py
 	fi
-	rm -f $target_sdk_dir/ext-sdk-prepare.sh
 	echo done
 }
 
@@ -257,15 +430,37 @@
 
 SDK_POSTPROCESS_COMMAND_prepend_task-populate-sdk-ext = "copy_buildsystem; install_tools; "
 
+SDK_INSTALL_TARGETS = ""
 fakeroot python do_populate_sdk_ext() {
     # FIXME hopefully we can remove this restriction at some point, but uninative
     # currently forces this upon us
     if d.getVar('SDK_ARCH', True) != d.getVar('BUILD_ARCH', True):
         bb.fatal('The extensible SDK can currently only be built for the same architecture as the machine being built on - SDK_ARCH is set to %s (likely via setting SDKMACHINE) which is different from the architecture of the build machine (%s). Unable to continue.' % (d.getVar('SDK_ARCH', True), d.getVar('BUILD_ARCH', True)))
 
+    d.setVar('SDK_INSTALL_TARGETS', get_sdk_install_targets(d))
+
     bb.build.exec_func("do_populate_sdk", d)
 }
 
+def get_ext_sdk_depends(d):
+    return d.getVarFlag('do_rootfs', 'depends', True) + ' ' + d.getVarFlag('do_build', 'depends', True)
+
+python do_sdk_depends() {
+    # We have to do this separately in its own task so we avoid recursing into
+    # dependencies we don't need to (e.g. buildtools-tarball) and bringing those
+    # into the SDK's sstate-cache
+    import oe.copy_buildsystem
+    sigfile = d.getVar('WORKDIR', True) + '/locked-sigs.inc'
+    oe.copy_buildsystem.generate_locked_sigs(sigfile, d)
+}
+addtask sdk_depends
+
+do_sdk_depends[dirs] = "${WORKDIR}"
+do_sdk_depends[depends] = "${@get_ext_sdk_depends(d)}"
+do_sdk_depends[recrdeptask] = "${@d.getVarFlag('do_populate_sdk', 'recrdeptask', False)}"
+do_sdk_depends[recrdeptask] += "do_populate_lic do_package_qa do_populate_sysroot do_deploy ${SDK_RECRDEP_TASKS}"
+do_sdk_depends[rdepends] = "${@get_sdk_ext_rdepends(d)}"
+
 def get_sdk_ext_rdepends(d):
     localdata = d.createCopy()
     localdata.appendVar('OVERRIDES', ':task-populate-sdk-ext')
@@ -273,17 +468,20 @@
     return localdata.getVarFlag('do_populate_sdk', 'rdepends', True)
 
 do_populate_sdk_ext[dirs] = "${@d.getVarFlag('do_populate_sdk', 'dirs', False)}"
-do_populate_sdk_ext[depends] += "${@d.getVarFlag('do_populate_sdk', 'depends', False)}"
-do_populate_sdk_ext[rdepends] = "${@get_sdk_ext_rdepends(d)}"
-do_populate_sdk_ext[recrdeptask] += "${@d.getVarFlag('do_populate_sdk', 'recrdeptask', False)}"
 
-
-do_populate_sdk_ext[depends] += "buildtools-tarball:do_populate_sdk uninative-tarball:do_populate_sdk"
+do_populate_sdk_ext[depends] = "${@d.getVarFlag('do_populate_sdk', 'depends', False)} \
+                                buildtools-tarball:do_populate_sdk uninative-tarball:do_populate_sdk \
+                                ${@'meta-world-pkgdata:do_collect_packagedata' if d.getVar('SDK_INCLUDE_PKGDATA', True) == '1' else ''}"
 
 do_populate_sdk_ext[rdepends] += "${@' '.join([x + ':do_build' for x in d.getVar('SDK_TARGETS', True).split()])}"
-do_populate_sdk_ext[recrdeptask] += "do_populate_lic do_package_qa do_populate_sysroot do_deploy"
 
-# Make sure codes change in copy_buildsystem can result in rebuilt
-do_populate_sdk_ext[vardeps] += "copy_buildsystem"
+# Make sure code changes can result in rebuild
+do_populate_sdk_ext[vardeps] += "copy_buildsystem \
+                                 sdk_ext_postinst"
 
-addtask populate_sdk_ext
+# Since any change in the metadata of any layer should cause a rebuild of the
+# sdk(since the layers are put in the sdk) set the task to nostamp so it
+# always runs.
+do_populate_sdk_ext[nostamp] = "1"
+
+addtask populate_sdk_ext after do_sdk_depends
diff --git a/yocto-poky/meta/classes/prexport.bbclass b/yocto-poky/meta/classes/prexport.bbclass
index 5a1cb33..809ec10 100644
--- a/yocto-poky/meta/classes/prexport.bbclass
+++ b/yocto-poky/meta/classes/prexport.bbclass
@@ -8,7 +8,8 @@
 
 python prexport_handler () {
     import bb.event
-    if not e.data:
+    if not e.data or bb.data.inherits_class('native', e.data) or \
+        bb.data.inherits_class('crosssdk', e.data):
         return
 
     if isinstance(e, bb.event.RecipeParsed):
@@ -21,7 +22,7 @@
             bb.fatal("prexport_handler: export failed!")
         (metainfo, datainfo) = retval
         if not datainfo:
-            bb.warn("prexport_handler: No AUTOPR values found for %s" % ver)
+            bb.note("prexport_handler: No AUTOPR values found for %s" % ver)
             return
         oe.prservice.prserv_export_tofile(e.data, None, datainfo, False)
         if 'AUTOINC' in ver:
diff --git a/yocto-poky/meta/classes/ptest-gnome.bbclass b/yocto-poky/meta/classes/ptest-gnome.bbclass
index b2949af..478a334 100644
--- a/yocto-poky/meta/classes/ptest-gnome.bbclass
+++ b/yocto-poky/meta/classes/ptest-gnome.bbclass
@@ -1,6 +1,6 @@
 inherit ptest
 
-EXTRA_OECONF_append_class-target = " ${@bb.utils.contains('PTEST_ENABLED', '1', '--enable-installed-tests', '--disable-installed-tests', d)}"
+EXTRA_OECONF_append = " ${@bb.utils.contains('PTEST_ENABLED', '1', '--enable-installed-tests', '--disable-installed-tests', d)}"
 
 FILES_${PN}-ptest += "${libexecdir}/installed-tests/ \
                       ${datadir}/installed-tests/"
diff --git a/yocto-poky/meta/classes/ptest.bbclass b/yocto-poky/meta/classes/ptest.bbclass
index 4dc5dbe..fa3561e 100644
--- a/yocto-poky/meta/classes/ptest.bbclass
+++ b/yocto-poky/meta/classes/ptest.bbclass
@@ -58,7 +58,7 @@
 
 python () {
     if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d):
-        d.setVarFlag('do_install_ptest_base', 'fakeroot', 1)
+        d.setVarFlag('do_install_ptest_base', 'fakeroot', '1')
 
     # Remove all '*ptest_base' tasks when ptest is not enabled
     if not(d.getVar('PTEST_ENABLED', True) == "1"):
diff --git a/yocto-poky/meta/classes/python3native.bbclass b/yocto-poky/meta/classes/python3native.bbclass
index cf83017..8ec6b76 100644
--- a/yocto-poky/meta/classes/python3native.bbclass
+++ b/yocto-poky/meta/classes/python3native.bbclass
@@ -1,4 +1,4 @@
-PYTHON_BASEVERSION = "3.4"
+PYTHON_BASEVERSION = "3.5"
 
 inherit python-dir
 
diff --git a/yocto-poky/meta/classes/qemu.bbclass b/yocto-poky/meta/classes/qemu.bbclass
index 601f587..75739db 100644
--- a/yocto-poky/meta/classes/qemu.bbclass
+++ b/yocto-poky/meta/classes/qemu.bbclass
@@ -13,7 +13,19 @@
         target_arch = "ppc64"
 
     return "qemu-" + target_arch
-#
+
+def qemu_wrapper_cmdline(data, rootfs_path, library_paths):
+    import string
+
+    qemu_binary = qemu_target_binary(data)
+    if qemu_binary == "qemu-allarch":
+        qemu_binary = "qemuwrapper"
+
+    qemu_options = data.getVar("QEMU_OPTIONS", True)    
+
+    return "PSEUDO_UNLOAD=1 " + qemu_binary + " " + qemu_options + " -L " + rootfs_path\
+            + " -E LD_LIBRARY_PATH=" + ":".join(library_paths) + " "
+
 # Next function will return a string containing the command that is needed to
 # to run a certain binary through qemu. For example, in order to make a certain
 # postinstall scriptlet run at do_rootfs time and running the postinstall is
@@ -23,29 +35,25 @@
 # ${@qemu_run_binary(d, '$D', '/usr/bin/test_app')} [test_app arguments]
 #
 def qemu_run_binary(data, rootfs_path, binary):
-    qemu_binary = qemu_target_binary(data)
-    if qemu_binary == "qemu-allarch":
-        qemu_binary = "qemuwrapper"
-
     libdir = rootfs_path + data.getVar("libdir", False)
     base_libdir = rootfs_path + data.getVar("base_libdir", False)
-    qemu_options = data.getVar("QEMU_OPTIONS", True)
 
-    return "PSEUDO_UNLOAD=1 " + qemu_binary + " " + qemu_options + " -L " + rootfs_path\
-            + " -E LD_LIBRARY_PATH=" + libdir + ":" + base_libdir + " "\
-            + rootfs_path + binary
+    return qemu_wrapper_cmdline(data, rootfs_path, [libdir, base_libdir]) + rootfs_path + binary
 
-# QEMU_EXTRAOPTIONS is not meant to be directly used, the extensions are 
-# PACKAGE_ARCH, not overrides and hence have to do this dance. Simply being arch 
-# specific isn't good enough.
+# QEMU_EXTRAOPTIONS is not meant to be directly used, the extensions are
+# PACKAGE_ARCH, *NOT* overrides.
+# In some cases (e.g. ppc) simply being arch specific (apparently) isn't good
+# enough and a PACKAGE_ARCH specific -cpu option is needed (hence we have to do
+# this dance). For others (e.g. arm) a -cpu option is not necessary, since the
+# qemu-arm default CPU supports all required architecture levels.
+
 QEMU_OPTIONS = "-r ${OLDEST_KERNEL} ${@d.getVar("QEMU_EXTRAOPTIONS_%s" % d.getVar('PACKAGE_ARCH', True), True) or ""}"
-QEMU_EXTRAOPTIONS_iwmmxt    = " -cpu pxa270-c5"
-QEMU_EXTRAOPTIONS_armv6     = " -cpu arm1136"
-QEMU_EXTRAOPTIONS_armv7a    = " -cpu cortex-a8"
-QEMU_EXTRAOPTIONS_e500v2    = " -cpu e500v2"
-QEMU_EXTRAOPTIONS_e500mc    = " -cpu e500mc"
-QEMU_EXTRAOPTIONS_e5500     = " -cpu e5500"
-QEMU_EXTRAOPTIONS_e5500-64b = " -cpu e5500"
-QEMU_EXTRAOPTIONS_e6500     = " -cpu e6500"
-QEMU_EXTRAOPTIONS_e6500-64b = " -cpu e6500"
-QEMU_EXTRAOPTIONS_ppc7400   = " -cpu 7400"
+QEMU_OPTIONS[vardeps] += "QEMU_EXTRAOPTIONS_${PACKAGE_ARCH}"
+
+QEMU_EXTRAOPTIONS_ppce500v2 = " -cpu e500v2"
+QEMU_EXTRAOPTIONS_ppce500mc = " -cpu e500mc"
+QEMU_EXTRAOPTIONS_ppce5500 = " -cpu e500mc"
+QEMU_EXTRAOPTIONS_ppc64e5500 = " -cpu e500mc"
+QEMU_EXTRAOPTIONS_ppce6500 = " -cpu e500mc"
+QEMU_EXTRAOPTIONS_ppc64e6500 = " -cpu e500mc"
+QEMU_EXTRAOPTIONS_ppc7400 = " -cpu 7400"
diff --git a/yocto-poky/meta/classes/qmake2.bbclass b/yocto-poky/meta/classes/qmake2.bbclass
deleted file mode 100644
index 6e73ad2..0000000
--- a/yocto-poky/meta/classes/qmake2.bbclass
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# QMake variables for Qt4
-#
-inherit qmake_base
-
-DEPENDS_prepend = "qt4-tools-native "
-
-export QMAKESPEC = "${STAGING_DATADIR}/qt4/mkspecs/${TARGET_OS}-oe-g++"
-export OE_QMAKE_QT_CONFIG = "${STAGING_DATADIR}/qt4/mkspecs/qconfig.pri"
-export OE_QMAKE_UIC = "${STAGING_BINDIR_NATIVE}/uic4"
-export OE_QMAKE_UIC3 = "${STAGING_BINDIR_NATIVE}/uic34"
-export OE_QMAKE_MOC = "${STAGING_BINDIR_NATIVE}/moc4"
-export OE_QMAKE_RCC = "${STAGING_BINDIR_NATIVE}/rcc4"
-export OE_QMAKE_QDBUSCPP2XML = "${STAGING_BINDIR_NATIVE}/qdbuscpp2xml4"
-export OE_QMAKE_QDBUSXML2CPP = "${STAGING_BINDIR_NATIVE}/qdbusxml2cpp4"
-export OE_QMAKE_QMAKE = "${STAGING_BINDIR_NATIVE}/qmake2"
-export OE_QMAKE_LINK = "${CXX}"
-export OE_QMAKE_CXXFLAGS = "${CXXFLAGS}"
-export OE_QMAKE_INCDIR_QT = "${STAGING_INCDIR}/qt4"
-export OE_QMAKE_LIBDIR_QT = "${STAGING_LIBDIR}"
-export OE_QMAKE_LIBS_QT = "qt"
-export OE_QMAKE_LIBS_X11 = "-lXext -lX11 -lm"
-export OE_QMAKE_LIBS_X11SM = "-lSM -lICE"
-export OE_QMAKE_LCONVERT = "${STAGING_BINDIR_NATIVE}/lconvert4"
-export OE_QMAKE_LRELEASE = "${STAGING_BINDIR_NATIVE}/lrelease4"
-export OE_QMAKE_LUPDATE = "${STAGING_BINDIR_NATIVE}/lupdate4"
-export OE_QMAKE_XMLPATTERNS = "${STAGING_BINDIR_NATIVE}/xmlpatterns4"
diff --git a/yocto-poky/meta/classes/qmake_base.bbclass b/yocto-poky/meta/classes/qmake_base.bbclass
deleted file mode 100644
index dc98713..0000000
--- a/yocto-poky/meta/classes/qmake_base.bbclass
+++ /dev/null
@@ -1,119 +0,0 @@
-QMAKE_MKSPEC_PATH ?= "${STAGING_DATADIR_NATIVE}/qmake"
-
-OE_QMAKE_PLATFORM = "${TARGET_OS}-oe-g++"
-QMAKESPEC := "${QMAKE_MKSPEC_PATH}/${OE_QMAKE_PLATFORM}"
-
-# We override this completely to eliminate the -e normally passed in
-EXTRA_OEMAKE = ""
-
-export OE_QMAKE_CC="${CC}"
-export OE_QMAKE_CFLAGS="${CFLAGS}"
-export OE_QMAKE_CXX="${CXX}"
-export OE_QMAKE_LDFLAGS="${LDFLAGS}"
-export OE_QMAKE_AR="${AR}"
-export OE_QMAKE_STRIP="echo"
-export OE_QMAKE_RPATH="-Wl,-rpath-link,"
-
-# default to qte2 via bb.conf, inherit qt3x11 to configure for qt3x11
-
-oe_qmake_mkspecs () {
-    mkdir -p mkspecs/${OE_QMAKE_PLATFORM}
-    for f in ${QMAKE_MKSPEC_PATH}/${OE_QMAKE_PLATFORM}/*; do
-        if [ -L $f ]; then
-            lnk=`readlink $f`
-            if [ -f mkspecs/${OE_QMAKE_PLATFORM}/$lnk ]; then
-                ln -s $lnk mkspecs/${OE_QMAKE_PLATFORM}/`basename $f`
-            else
-                cp $f mkspecs/${OE_QMAKE_PLATFORM}/
-            fi
-        else
-            cp $f mkspecs/${OE_QMAKE_PLATFORM}/
-        fi
-    done
-}
-
-do_generate_qt_config_file() {
-	export QT_CONF_PATH=${WORKDIR}/qt.conf
-	cat > ${WORKDIR}/qt.conf <<EOF
-[Paths]
-Prefix =
-Binaries = ${STAGING_BINDIR_NATIVE}
-Headers = ${STAGING_INCDIR}/${QT_DIR_NAME}
-Plugins = ${STAGING_LIBDIR}/${QT_DIR_NAME}/plugins/
-Mkspecs = ${STAGING_DATADIR}/${QT_DIR_NAME}/mkspecs/
-EOF
-}
-
-addtask generate_qt_config_file after do_patch before do_configure
-
-qmake_base_do_configure() {
-	case ${QMAKESPEC} in
-	*linux-oe-g++|*linux-uclibc-oe-g++|*linux-gnueabi-oe-g++|*linux-uclibceabi-oe-g++|*linux-gnuspe-oe-g++|*linux-uclibcspe-oe-g++|*linux-gnun32-oe-g++)
-		;;
-	*-oe-g++)
-		die Unsupported target ${TARGET_OS} for oe-g++ qmake spec
-		;;
-	*)
-		bbnote Searching for qmake spec file
-		paths="${QMAKE_MKSPEC_PATH}/qws/${TARGET_OS}-${TARGET_ARCH}-g++"
-		paths="${QMAKE_MKSPEC_PATH}/${TARGET_OS}-g++ $paths"
-
-		if (echo "${TARGET_ARCH}"|grep -q 'i.86'); then
-			paths="${QMAKE_MKSPEC_PATH}/qws/${TARGET_OS}-x86-g++ $paths"
-		fi
-		for i in $paths; do
-			if test -e $i; then
-				export QMAKESPEC=$i
-				break
-			fi
-		done
-		;;
-	esac
-
-	bbnote "using qmake spec in ${QMAKESPEC}, using profiles '${QMAKE_PROFILES}'"
-
-	if [ -z "${QMAKE_PROFILES}" ]; then 
-		PROFILES="`ls *.pro`"
-	else
-		PROFILES="${QMAKE_PROFILES}"
-	fi
-
-	if [ -z "$PROFILES" ]; then
-		die "QMAKE_PROFILES not set and no profiles found in $PWD"
-        fi
-
-	if [ ! -z "${EXTRA_QMAKEVARS_POST}" ]; then
-		AFTER="-after"
-		QMAKE_VARSUBST_POST="${EXTRA_QMAKEVARS_POST}"
-		bbnote "qmake postvar substitution: ${EXTRA_QMAKEVARS_POST}"
-	fi
-
-	if [ ! -z "${EXTRA_QMAKEVARS_PRE}" ]; then
-		QMAKE_VARSUBST_PRE="${EXTRA_QMAKEVARS_PRE}"
-		bbnote "qmake prevar substitution: ${EXTRA_QMAKEVARS_PRE}"
-	fi
-
-	# Hack .pro files to use OE utilities
-	LCONVERT_NAME=$(basename ${OE_QMAKE_LCONVERT})
-	LRELEASE_NAME=$(basename ${OE_QMAKE_LRELEASE})
-	LUPDATE_NAME=$(basename ${OE_QMAKE_LUPDATE})
-	XMLPATTERNS_NAME=$(basename ${OE_QMAKE_XMLPATTERNS})
-	find -name '*.pro' \
-	     -exec sed -i -e "s|\(=\s*.*\)/$LCONVERT_NAME|\1/lconvert|g" \
-	                  -e "s|\(=\s*.*\)/$LRELEASE_NAME|\1/lrelease|g" \
-	                  -e "s|\(=\s*.*\)/$LUPDATE_NAME|\1/lupdate|g" \
-	                  -e "s|\(=\s*.*\)/$XMLPATTERNS_NAME|\1/xmlpatterns|g" \
-	                  -e "s|\(=\s*.*\)/lconvert|\1/$LCONVERT_NAME|g" \
-	                  -e "s|\(=\s*.*\)/lrelease|\1/$LRELEASE_NAME|g" \
-	                  -e "s|\(=\s*.*\)/lupdate|\1/$LUPDATE_NAME|g" \
-	                  -e "s|\(=\s*.*\)/xmlpatterns|\1/$XMLPATTERNS_NAME|g" \
-	                  '{}' ';'
-
-#bbnote "Calling '${OE_QMAKE_QMAKE} -makefile -spec ${QMAKESPEC} -o Makefile $QMAKE_VARSUBST_PRE $AFTER $PROFILES $QMAKE_VARSUBST_POST'"
-	unset QMAKESPEC || true
-	${OE_QMAKE_QMAKE} -makefile -spec ${QMAKESPEC} -o Makefile $QMAKE_VARSUBST_PRE $AFTER $PROFILES $QMAKE_VARSUBST_POST || die "Error calling ${OE_QMAKE_QMAKE} on $PROFILES"
-}
-
-EXPORT_FUNCTIONS do_configure
-
-addtask configure after do_unpack do_patch before do_compile
diff --git a/yocto-poky/meta/classes/qt4e.bbclass b/yocto-poky/meta/classes/qt4e.bbclass
deleted file mode 100644
index 13b1050..0000000
--- a/yocto-poky/meta/classes/qt4e.bbclass
+++ /dev/null
@@ -1,21 +0,0 @@
-QT4EDEPENDS ?= "qt4-embedded "
-DEPENDS_prepend = "${QT4EDEPENDS}"
-
-inherit qmake2
-
-QT_BASE_NAME = "qt4-embedded"
-QT_DIR_NAME = "qtopia"
-QT_LIBINFIX = "E"
-# override variables set by qmake-base to compile Qt/Embedded apps
-#
-export QMAKESPEC = "${STAGING_DATADIR}/${QT_DIR_NAME}/mkspecs/${TARGET_OS}-oe-g++"
-export OE_QMAKE_QT_CONFIG = "${STAGING_DATADIR}/${QT_DIR_NAME}/mkspecs/qconfig.pri"
-export OE_QMAKE_INCDIR_QT = "${STAGING_INCDIR}/${QT_DIR_NAME}"
-export OE_QMAKE_LIBDIR_QT = "${STAGING_LIBDIR}"
-export OE_QMAKE_LIBS_QT = "qt"
-export OE_QMAKE_LIBS_X11 = ""
-export OE_QMAKE_EXTRA_MODULES = "network"
-EXTRA_QMAKEVARS_PRE += " QT_LIBINFIX=${QT_LIBINFIX} "
-
-# Qt4 uses atomic instructions not supported in thumb mode
-ARM_INSTRUCTION_SET = "arm"
diff --git a/yocto-poky/meta/classes/qt4x11.bbclass b/yocto-poky/meta/classes/qt4x11.bbclass
deleted file mode 100644
index 6f06d34..0000000
--- a/yocto-poky/meta/classes/qt4x11.bbclass
+++ /dev/null
@@ -1,14 +0,0 @@
-QT4DEPENDS ?= "qt4-x11 "
-DEPENDS_prepend = "${QT4DEPENDS}"
-
-# depends on qt4-x11
-REQUIRED_DISTRO_FEATURES += "x11"
-
-inherit qmake2 distro_features_check
-
-QT_BASE_NAME = "qt4"
-QT_DIR_NAME = "qt4"
-QT_LIBINFIX = ""
-
-# Qt4 uses atomic instructions not supported in thumb mode
-ARM_INSTRUCTION_SET = "arm"
diff --git a/yocto-poky/meta/classes/recipe_sanity.bbclass b/yocto-poky/meta/classes/recipe_sanity.bbclass
index ee04e33..295611f 100644
--- a/yocto-poky/meta/classes/recipe_sanity.bbclass
+++ b/yocto-poky/meta/classes/recipe_sanity.bbclass
@@ -143,9 +143,8 @@
 
     cfgdata = {}
     for k in d.keys():
-    #for k in ["S", "PR", "PV", "PN", "DESCRIPTION", "LICENSE", "DEPENDS",
-    #          "SECTION"]:
-        cfgdata[k] = d.getVar(k, 0)
+        if not isinstance(d.getVar(k, 0), bb.data_smart.DataSmart):
+            cfgdata[k] = d.getVar(k, 0)
 
     d.setVar("__recipe_sanity_cfgdata", cfgdata)
     #d.setVar("__recipe_sanity_cfgdata", d)
diff --git a/yocto-poky/meta/classes/remove-libtool.bbclass b/yocto-poky/meta/classes/remove-libtool.bbclass
new file mode 100644
index 0000000..3fd0cd5
--- /dev/null
+++ b/yocto-poky/meta/classes/remove-libtool.bbclass
@@ -0,0 +1,11 @@
+# This class removes libtool .la files after do_install
+
+REMOVE_LIBTOOL_LA ?= "1"
+
+remove_libtool_la() {
+	if [ "${REMOVE_LIBTOOL_LA}" != "0" ]; then
+		find "${D}" -ignore_readdir_race -name "*.la" -delete
+	fi
+}
+
+do_install[postfuncs] += "remove_libtool_la"
diff --git a/yocto-poky/meta/classes/rm_work.bbclass b/yocto-poky/meta/classes/rm_work.bbclass
index 5e9efc1..c647d88 100644
--- a/yocto-poky/meta/classes/rm_work.bbclass
+++ b/yocto-poky/meta/classes/rm_work.bbclass
@@ -63,6 +63,14 @@
                 i=dummy
                 break
                 ;;
+            *do_rootfs*)
+               i=dummy
+               break
+               ;;
+            *do_image*)
+               i=dummy
+               break
+               ;;
             *do_build*)
                 i=dummy
                 break
@@ -104,7 +112,7 @@
 }
 rm_work_populatesdk[cleandirs] = "${WORKDIR}/sdk"
 
-do_rootfs[postfuncs] += "rm_work_rootfs"
+do_image_complete[postfuncs] += "rm_work_rootfs"
 rm_work_rootfs () {
     :
 }
diff --git a/yocto-poky/meta/classes/rootfs-postcommands.bbclass b/yocto-poky/meta/classes/rootfs-postcommands.bbclass
new file mode 100644
index 0000000..95d28af
--- /dev/null
+++ b/yocto-poky/meta/classes/rootfs-postcommands.bbclass
@@ -0,0 +1,277 @@
+
+# Zap the root password if debug-tweaks feature is not enabled
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password ; ",d)}'
+
+# Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks is enabled
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}'
+
+# Enable postinst logging if debug-tweaks is enabled
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}'
+
+# Create /etc/timestamp during image construction to give a reasonably sane default time setting
+ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; "
+
+# Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}'
+
+# Write manifest
+IMAGE_MANIFEST = "${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.manifest"
+ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest ; "
+# Set default postinst log file
+POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
+# Set default target for systemd images
+SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains("IMAGE_FEATURES", "x11-base", "graphical.target", "multi-user.target", d)}'
+ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target; ", "", d)}'
+
+ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
+
+# Disable DNS lookups, the SSH_DISABLE_DNS_LOOKUP can be overridden to allow
+# distros to choose not to take this change
+SSH_DISABLE_DNS_LOOKUP ?= " ssh_disable_dns_lookup ; "
+ROOTFS_POSTPROCESS_COMMAND_append_qemuall = "${SSH_DISABLE_DNS_LOOKUP}"
+
+
+
+#
+# A hook function to support read-only-rootfs IMAGE_FEATURES
+#
+read_only_rootfs_hook () {
+	# Tweak the mount option and fs_passno for rootfs in fstab
+	sed -i -e '/^[#[:space:]]*\/dev\/root/{s/defaults/ro/;s/\([[:space:]]*[[:digit:]]\)\([[:space:]]*\)[[:digit:]]$/\1\20/}' ${IMAGE_ROOTFS}/etc/fstab
+
+	# If we're using openssh and the /etc/ssh directory has no pre-generated keys,
+	# we should configure openssh to use the configuration file /etc/ssh/sshd_config_readonly
+	# and the keys under /var/run/ssh.
+	if [ -d ${IMAGE_ROOTFS}/etc/ssh ]; then
+		if [ -e ${IMAGE_ROOTFS}/etc/ssh/ssh_host_rsa_key ]; then
+			echo "SYSCONFDIR=/etc/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
+			echo "SSHD_OPTS=" >> ${IMAGE_ROOTFS}/etc/default/ssh
+		else
+			echo "SYSCONFDIR=/var/run/ssh" >> ${IMAGE_ROOTFS}/etc/default/ssh
+			echo "SSHD_OPTS='-f /etc/ssh/sshd_config_readonly'" >> ${IMAGE_ROOTFS}/etc/default/ssh
+		fi
+	fi
+
+	# Also tweak the key location for dropbear in the same way.
+	if [ -d ${IMAGE_ROOTFS}/etc/dropbear ]; then
+		if [ -e ${IMAGE_ROOTFS}/etc/dropbear/dropbear_rsa_host_key ]; then
+			echo "DROPBEAR_RSAKEY_DIR=/etc/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
+		else
+			echo "DROPBEAR_RSAKEY_DIR=/var/lib/dropbear" >> ${IMAGE_ROOTFS}/etc/default/dropbear
+		fi
+	fi
+
+
+	if ${@bb.utils.contains("DISTRO_FEATURES", "sysvinit", "true", "false", d)}; then
+		# Change the value of ROOTFS_READ_ONLY in /etc/default/rcS to yes
+		if [ -e ${IMAGE_ROOTFS}/etc/default/rcS ]; then
+			sed -i 's/ROOTFS_READ_ONLY=no/ROOTFS_READ_ONLY=yes/' ${IMAGE_ROOTFS}/etc/default/rcS
+		fi
+		# Run populate-volatile.sh at rootfs time to set up basic files
+		# and directories to support read-only rootfs.
+		if [ -x ${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh ]; then
+			${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
+		fi
+	fi
+
+	if ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "true", "false", d)}; then
+	    # Update user database files so that services don't fail for a read-only systemd system
+	    for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd.conf ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd-remote.conf; do
+		[ -e $conffile ] || continue
+		grep -v "^#" $conffile | sed -e '/^$/d' | while read type name id comment; do
+		    if [ "$type" = "u" ]; then
+			useradd_params=""
+			[ "$id" != "-" ] && useradd_params="$useradd_params --uid $id"
+			[ "$comment" != "-" ] && useradd_params="$useradd_params --comment $comment"
+			useradd_params="$useradd_params --system $name"
+			eval useradd --root ${IMAGE_ROOTFS} $useradd_params || true
+		    elif [ "$type" = "g" ]; then
+			groupadd_params=""
+			[ "$id" != "-" ] && groupadd_params="$groupadd_params --gid $id"
+			groupadd_params="$groupadd_params --system $name"
+			eval groupadd --root ${IMAGE_ROOTFS} $groupadd_params || true
+		    fi
+		done
+	    done
+	fi
+}
+
+#
+# This function is intended to disallow empty root password if 'debug-tweaks' is not in IMAGE_FEATURES.
+#
+zap_empty_root_password () {
+	if [ -e ${IMAGE_ROOTFS}/etc/shadow ]; then
+		sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/shadow
+        fi
+	if [ -e ${IMAGE_ROOTFS}/etc/passwd ]; then
+		sed -i 's%^root::%root:*:%' ${IMAGE_ROOTFS}/etc/passwd
+	fi
+} 
+
+#
+# allow dropbear/openssh to accept root logins and logins from accounts with an empty password string
+#
+ssh_allow_empty_password () {
+	for config in sshd_config sshd_config_readonly; do
+		if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config ]; then
+			sed -i 's/^[#[:space:]]*PermitRootLogin.*/PermitRootLogin yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config
+			sed -i 's/^[#[:space:]]*PermitEmptyPasswords.*/PermitEmptyPasswords yes/' ${IMAGE_ROOTFS}${sysconfdir}/ssh/$config
+		fi
+	done
+
+	if [ -e ${IMAGE_ROOTFS}${sbindir}/dropbear ] ; then
+		if grep -q DROPBEAR_EXTRA_ARGS ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear 2>/dev/null ; then
+			if ! grep -q "DROPBEAR_EXTRA_ARGS=.*-B" ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear ; then
+				sed -i 's/^DROPBEAR_EXTRA_ARGS="*\([^"]*\)"*/DROPBEAR_EXTRA_ARGS="\1 -B"/' ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
+			fi
+		else
+			printf '\nDROPBEAR_EXTRA_ARGS="-B"\n' >> ${IMAGE_ROOTFS}${sysconfdir}/default/dropbear
+		fi
+	fi
+
+	if [ -d ${IMAGE_ROOTFS}${sysconfdir}/pam.d ] ; then
+		sed -i 's/nullok_secure/nullok/' ${IMAGE_ROOTFS}${sysconfdir}/pam.d/*
+	fi
+}
+
+ssh_disable_dns_lookup () {
+	if [ -e ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config ]; then
+		sed -i -e 's:#UseDNS yes:UseDNS no:' ${IMAGE_ROOTFS}${sysconfdir}/ssh/sshd_config
+	fi
+}
+
+#
+# Enable postinst logging if debug-tweaks is enabled
+#
+postinst_enable_logging () {
+	mkdir -p ${IMAGE_ROOTFS}${sysconfdir}/default
+	echo "POSTINST_LOGGING=1" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
+	echo "LOGFILE=${POSTINST_LOGFILE}" >> ${IMAGE_ROOTFS}${sysconfdir}/default/postinst
+}
+
+#
+# Modify systemd default target
+#
+set_systemd_default_target () {
+	if [ -d ${IMAGE_ROOTFS}${sysconfdir}/systemd/system -a -e ${IMAGE_ROOTFS}${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ]; then
+		ln -sf ${systemd_unitdir}/system/${SYSTEMD_DEFAULT_TARGET} ${IMAGE_ROOTFS}${sysconfdir}/systemd/system/default.target
+	fi
+}
+
+# If /var/volatile is not empty, we have seen problems where programs such as the
+# journal make assumptions based on the contents of /var/volatile. The journal
+# would then write to /var/volatile before it was mounted, thus hiding the
+# items previously written.
+#
+# This change is to attempt to fix those types of issues in a way that doesn't
+# affect users that may not be using /var/volatile.
+empty_var_volatile () {
+	if [ -e ${IMAGE_ROOTFS}/etc/fstab ]; then
+		match=`awk '$1 !~ "#" && $2 ~ /\/var\/volatile/{print $2}' ${IMAGE_ROOTFS}/etc/fstab 2> /dev/null`
+		if [ -n "$match" ]; then
+			find ${IMAGE_ROOTFS}/var/volatile -mindepth 1 -delete
+		fi
+	fi
+}
+
+# Turn any symbolic /sbin/init link into a file
+remove_init_link () {
+	if [ -h ${IMAGE_ROOTFS}/sbin/init ]; then
+		LINKFILE=${IMAGE_ROOTFS}`readlink ${IMAGE_ROOTFS}/sbin/init`
+		rm ${IMAGE_ROOTFS}/sbin/init
+		cp $LINKFILE ${IMAGE_ROOTFS}/sbin/init
+	fi
+}
+
+make_zimage_symlink_relative () {
+	if [ -L ${IMAGE_ROOTFS}/boot/zImage ]; then
+		(cd ${IMAGE_ROOTFS}/boot/ && for i in `ls zImage-* | sort`; do ln -sf $i zImage; done)
+	fi
+}
+
+insert_feed_uris () {
+	
+	echo "Building feeds for [${DISTRO}].."
+
+	for line in ${FEED_URIS}
+	do
+		# strip leading and trailing spaces/tabs, then split into name and uri
+		line_clean="`echo "$line"|sed 's/^[ \t]*//;s/[ \t]*$//'`"
+		feed_name="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\1/p'`"
+		feed_uri="`echo "$line_clean" | sed -n 's/\(.*\)##\(.*\)/\2/p'`"
+		
+		echo "Added $feed_name feed with URL $feed_uri"
+		
+		# insert new feed-sources
+		echo "src/gz $feed_name $feed_uri" >> ${IMAGE_ROOTFS}/etc/opkg/${feed_name}-feed.conf
+	done
+}
+
+python write_image_manifest () {
+    from oe.rootfs import image_list_installed_packages
+    from oe.utils import format_pkg_list
+
+    deploy_dir = d.getVar('DEPLOY_DIR_IMAGE', True)
+    link_name = d.getVar('IMAGE_LINK_NAME', True)
+    manifest_name = d.getVar('IMAGE_MANIFEST', True)
+
+    if not manifest_name:
+        return
+
+    pkgs = image_list_installed_packages(d)
+    with open(manifest_name, 'w+') as image_manifest:
+        image_manifest.write(format_pkg_list(pkgs, "ver"))
+        image_manifest.write("\n")
+
+    if os.path.exists(manifest_name):
+        manifest_link = deploy_dir + "/" + link_name + ".manifest"
+        if os.path.lexists(manifest_link):
+            if d.getVar('RM_OLD_IMAGE', True) == "1" and \
+                    os.path.exists(os.path.realpath(manifest_link)):
+                os.remove(os.path.realpath(manifest_link))
+            os.remove(manifest_link)
+        os.symlink(os.path.basename(manifest_name), manifest_link)
+}
+
+# Can be use to create /etc/timestamp during image construction to give a reasonably 
+# sane default time setting
+rootfs_update_timestamp () {
+	date -u +%4Y%2m%2d%2H%2M%2S >${IMAGE_ROOTFS}/etc/timestamp
+}
+
+# Prevent X from being started
+rootfs_no_x_startup () {
+	if [ -f ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm ]; then
+		chmod a-x ${IMAGE_ROOTFS}/etc/init.d/xserver-nodm
+	fi
+}
+
+rootfs_trim_schemas () {
+	for schema in ${IMAGE_ROOTFS}/etc/gconf/schemas/*.schemas
+	do
+		# Need this in case no files exist
+		if [ -e $schema ]; then
+			oe-trim-schemas $schema > $schema.new
+			mv $schema.new $schema
+		fi
+	done
+}
+
+rootfs_check_host_user_contaminated () {
+	contaminated="${WORKDIR}/host-user-contaminated.txt"
+	HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)"
+	HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)"
+
+	find "${IMAGE_ROOTFS}" -wholename "${IMAGE_ROOTFS}/home" -prune \
+	    -user "$HOST_USER_UID" -o -group "$HOST_USER_GID" >"$contaminated"
+
+	if [ -s "$contaminated" ]; then
+		echo "WARNING: Paths in the rootfs are owned by the same user or group as the user running bitbake. See the logfile for the specific paths."
+		cat "$contaminated" | sed "s,^,  ,"
+	fi
+}
+
+# Make any absolute links in a sysroot relative
+rootfs_sysroot_relativelinks () {
+	sysroot-relativelinks.py ${SDK_OUTPUT}/${SDKTARGETSYSROOT}
+}
diff --git a/yocto-poky/meta/classes/rootfs_deb.bbclass b/yocto-poky/meta/classes/rootfs_deb.bbclass
index d51b458..f79fca6 100644
--- a/yocto-poky/meta/classes/rootfs_deb.bbclass
+++ b/yocto-poky/meta/classes/rootfs_deb.bbclass
@@ -8,7 +8,6 @@
 do_rootfs[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot"
 do_populate_sdk[depends] += "dpkg-native:do_populate_sysroot apt-native:do_populate_sysroot bzip2-native:do_populate_sysroot"
 do_rootfs[recrdeptask] += "do_package_write_deb"
-rootfs_deb_do_rootfs[vardepsexclude] += "BUILDNAME"
 do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
 
 do_rootfs[lockfiles] += "${DEPLOY_DIR_DEB}/deb.lock"
diff --git a/yocto-poky/meta/classes/rootfs_ipk.bbclass b/yocto-poky/meta/classes/rootfs_ipk.bbclass
index dd144e4..d5c38fe 100644
--- a/yocto-poky/meta/classes/rootfs_ipk.bbclass
+++ b/yocto-poky/meta/classes/rootfs_ipk.bbclass
@@ -13,7 +13,6 @@
 do_populate_sdk[depends] += "opkg-native:do_populate_sysroot opkg-utils-native:do_populate_sysroot"
 do_rootfs[recrdeptask] += "do_package_write_ipk"
 do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
-rootfs_ipk_do_rootfs[vardepsexclude] += "BUILDNAME"
 
 do_rootfs[lockfiles] += "${WORKDIR}/ipk.lock"
 do_populate_sdk[lockfiles] += "${WORKDIR}/ipk.lock"
@@ -29,7 +28,7 @@
 python () {
 
     if d.getVar('BUILD_IMAGES_FROM_FEEDS', True):
-        flags = d.getVarFlag('do_rootfs', 'recrdeptask')
+        flags = d.getVarFlag('do_rootfs', 'recrdeptask', True)
         flags = flags.replace("do_package_write_ipk", "")
         flags = flags.replace("do_deploy", "")
         flags = flags.replace("do_populate_sysroot", "")
diff --git a/yocto-poky/meta/classes/rootfs_rpm.bbclass b/yocto-poky/meta/classes/rootfs_rpm.bbclass
index d85d001..0d2e897 100644
--- a/yocto-poky/meta/classes/rootfs_rpm.bbclass
+++ b/yocto-poky/meta/classes/rootfs_rpm.bbclass
@@ -22,7 +22,6 @@
 do_populate_sdk[depends] += "${RPMROOTFSDEPENDS}"
 
 do_rootfs[recrdeptask] += "do_package_write_rpm"
-rootfs_rpm_do_rootfs[vardepsexclude] += "BUILDNAME"
 do_rootfs[vardeps] += "PACKAGE_FEED_URIS"
 
 # RPM doesn't work with multiple rootfs generation at once due to collisions in the use of files 
@@ -32,7 +31,7 @@
 
 python () {
     if d.getVar('BUILD_IMAGES_FROM_FEEDS', True):
-        flags = d.getVarFlag('do_rootfs', 'recrdeptask')
+        flags = d.getVarFlag('do_rootfs', 'recrdeptask', True)
         flags = flags.replace("do_package_write_rpm", "")
         flags = flags.replace("do_deploy", "")
         flags = flags.replace("do_populate_sysroot", "")
diff --git a/yocto-poky/meta/classes/sanity.bbclass b/yocto-poky/meta/classes/sanity.bbclass
index ae86d26..77813e4 100644
--- a/yocto-poky/meta/classes/sanity.bbclass
+++ b/yocto-poky/meta/classes/sanity.bbclass
@@ -20,26 +20,83 @@
         if re.search(pattern, line)), (None, None))
 
 def sanity_conf_update(fn, lines, version_var_name, new_version):
-    index, line = sanity_conf_find_line(version_var_name, lines)
+    index, line = sanity_conf_find_line(r"^%s" % version_var_name, lines)
     lines[index] = '%s = "%d"\n' % (version_var_name, new_version)
     with open(fn, "w") as f:
         f.write(''.join(lines))
 
-# Functions added to this variable MUST throw an exception (or sys.exit()) unless they
-# successfully changed LCONF_VERSION in bblayers.conf
-BBLAYERS_CONF_UPDATE_FUNCS += "oecore_update_bblayers"
+# Functions added to this variable MUST throw a NotImplementedError exception unless 
+# they successfully changed the config version in the config file. Exceptions
+# are used since exec_func doesn't handle return values.
+BBLAYERS_CONF_UPDATE_FUNCS += " \
+    conf/bblayers.conf:LCONF_VERSION:LAYER_CONF_VERSION:oecore_update_bblayers \
+    conf/local.conf:CONF_VERSION:LOCALCONF_VERSION:oecore_update_localconf \
+    conf/site.conf:SCONF_VERSION:SITE_CONF_VERSION:oecore_update_siteconf \
+"
 
+SANITY_DIFF_TOOL ?= "meld"
+
+SANITY_LOCALCONF_SAMPLE ?= "${COREBASE}/meta*/conf/local.conf.sample"
+python oecore_update_localconf() {
+    # Check we are using a valid local.conf
+    current_conf  = d.getVar('CONF_VERSION', True)
+    conf_version =  d.getVar('LOCALCONF_VERSION', True)
+
+    failmsg = """Your version of local.conf was generated from an older/newer version of 
+local.conf.sample and there have been updates made to this file. Please compare the two 
+files and merge any changes before continuing.
+
+Matching the version numbers will remove this message.
+
+\"${SANITY_DIFF_TOOL} conf/local.conf ${SANITY_LOCALCONF_SAMPLE}\" 
+
+is a good way to visualise the changes."""
+    failmsg = d.expand(failmsg)
+
+    raise NotImplementedError(failmsg)
+}
+
+SANITY_SITECONF_SAMPLE ?= "${COREBASE}/meta*/conf/site.conf.sample"
+python oecore_update_siteconf() {
+    # If we have a site.conf, check it's valid
+    current_sconf = d.getVar('SCONF_VERSION', True)
+    sconf_version = d.getVar('SITE_CONF_VERSION', True)
+
+    failmsg = """Your version of site.conf was generated from an older version of 
+site.conf.sample and there have been updates made to this file. Please compare the two 
+files and merge any changes before continuing.
+
+Matching the version numbers will remove this message.
+
+\"${SANITY_DIFF_TOOL} conf/site.conf ${SANITY_SITECONF_SAMPLE}\" 
+
+is a good way to visualise the changes."""
+    failmsg = d.expand(failmsg)
+
+    raise NotImplementedError(failmsg)
+}
+
+SANITY_BBLAYERCONF_SAMPLE ?= "${COREBASE}/meta*/conf/bblayers.conf.sample"
 python oecore_update_bblayers() {
     # bblayers.conf is out of date, so see if we can resolve that
 
     current_lconf = int(d.getVar('LCONF_VERSION', True))
-    if not current_lconf:
-        sys.exit()
     lconf_version = int(d.getVar('LAYER_CONF_VERSION', True))
+
+    failmsg = """Your version of bblayers.conf has the wrong LCONF_VERSION (has ${LCONF_VERSION}, expecting ${LAYER_CONF_VERSION}).
+Please compare your file against bblayers.conf.sample and merge any changes before continuing.
+"${SANITY_DIFF_TOOL} conf/bblayers.conf ${SANITY_BBLAYERCONF_SAMPLE}" 
+
+is a good way to visualise the changes."""
+    failmsg = d.expand(failmsg)
+
+    if not current_lconf:
+        raise NotImplementedError(failmsg)
+
     lines = []
 
     if current_lconf < 4:
-        sys.exit()
+        raise NotImplementedError(failmsg)
 
     bblayers_fn = bblayers_conf_file(d)
     lines = sanity_conf_read(bblayers_fn)
@@ -58,25 +115,61 @@
                         lines[index] = (bbpath_line[:start + 1] +
                                     topdir_var + ':' + bbpath_line[start + 1:])
             else:
-                sys.exit()
+                raise NotImplementedError(failmsg)
         else:
             index, bbfiles_line = sanity_conf_find_line('BBFILES', lines)
             if bbfiles_line:
                 lines.insert(index, 'BBPATH = "' + topdir_var + '"\n')
             else:
-                sys.exit()
+                raise NotImplementedError(failmsg)
 
         current_lconf += 1
         sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
+        bb.note("Your conf/bblayers.conf has been automatically updated.")
         return
 
     elif current_lconf == 5 and lconf_version > 5:
         # Null update, to avoid issues with people switching between poky and other distros
         current_lconf = 6
         sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
+        bb.note("Your conf/bblayers.conf has been automatically updated.")
         return
 
-    sys.exit()
+        if not status.reparse:
+            status.addresult()
+
+    elif current_lconf == 6 and lconf_version > 6:
+        # Handle rename of meta-yocto -> meta-poky
+        # This marks the start of separate version numbers but code is needed in OE-Core
+        # for the migration, one last time.
+        layers = d.getVar('BBLAYERS', True).split()
+        layers = [ os.path.basename(path) for path in layers ]
+        if 'meta-yocto' in layers:
+            found = False
+            while True:
+                index, meta_yocto_line = sanity_conf_find_line(r'.*meta-yocto[\'"\s\n]', lines)
+                if meta_yocto_line:
+                    lines[index] = meta_yocto_line.replace('meta-yocto', 'meta-poky')
+                    found = True
+                else:
+                    break
+            if not found:
+                raise NotImplementedError(failmsg)
+            index, meta_yocto_line = sanity_conf_find_line('LCONF_VERSION.*\n', lines)
+            if meta_yocto_line:
+                lines[index] = 'POKY_BBLAYERS_CONF_VERSION = "1"\n'
+            else:
+                raise NotImplementedError(failmsg)
+            with open(bblayers_fn, "w") as f:
+                f.write(''.join(lines))
+            bb.note("Your conf/bblayers.conf has been automatically updated.")
+            return
+        current_lconf += 1
+        sanity_conf_update(bblayers_fn, lines, 'LCONF_VERSION', current_lconf)
+        bb.note("Your conf/bblayers.conf has been automatically updated.")
+        return
+
+    raise NotImplementedError(failmsg)
 }
 
 def raise_sanity_error(msg, d, network_error=False):
@@ -241,7 +334,7 @@
 
 def check_path_length(filepath, pathname, limit):
     if len(filepath) > limit:
-        return "The length of %s is longer than 410, this would cause unexpected errors, please use a shorter path.\n" % pathname
+        return "The length of %s is longer than %s, this would cause unexpected errors, please use a shorter path.\n" % (pathname, limit)
     return ""
 
 def get_filesystem_id(path):
@@ -329,6 +422,7 @@
     # Check that we don't have duplicate entries in PACKAGE_ARCHS & that TUNE_PKGARCH is in PACKAGE_ARCHS
     pkgarchs = sanity_data.getVar('PACKAGE_ARCHS', True)
     tunepkg = sanity_data.getVar('TUNE_PKGARCH', True)
+    defaulttune = sanity_data.getVar('DEFAULTTUNE', True)
     tunefound = False
     seen = {}
     dups = []
@@ -345,7 +439,7 @@
         messages = messages + "Error, the PACKAGE_ARCHS variable contains duplicates. The following archs are listed more than once: %s" % " ".join(dups)
 
     if tunefound == False:
-        messages = messages + "Error, the PACKAGE_ARCHS variable does not contain TUNE_PKGARCH (%s)." % tunepkg
+        messages = messages + "Error, the PACKAGE_ARCHS variable (%s) for DEFAULTTUNE (%s) does not contain TUNE_PKGARCH (%s)." % (pkgarchs, defaulttune, tunepkg)
 
     return messages
 
@@ -437,60 +531,46 @@
     return None
 
 # We use git parameters and functionality only found in 1.7.8 or later
+# The kernel tools assume git >= 1.8.3.1 (verified needed > 1.7.9.5) see #6162 
+# The git fetcher also had workarounds for git < 1.7.9.2 which we've dropped
 def check_git_version(sanity_data):
     from distutils.version import LooseVersion
     status, result = oe.utils.getstatusoutput("git --version 2> /dev/null")
     if status != 0:
         return "Unable to execute git --version, exit code %s\n" % status
     version = result.split()[2]
-    if LooseVersion(version) < LooseVersion("1.7.8"):
-        return "Your version of git is older than 1.7.8 and has bugs which will break builds. Please install a newer version of git.\n"
+    if LooseVersion(version) < LooseVersion("1.8.3.1"):
+        return "Your version of git is older than 1.8.3.1 and has bugs which will break builds. Please install a newer version of git.\n"
     return None
 
 # Check the required perl modules which may not be installed by default
 def check_perl_modules(sanity_data):
     ret = ""
     modules = ( "Text::ParseWords", "Thread::Queue", "Data::Dumper" )
+    errresult = ''
     for m in modules:
-        status, result = oe.utils.getstatusoutput("perl -e 'use %s' 2> /dev/null" % m)
+        status, result = oe.utils.getstatusoutput("perl -e 'use %s'" % m)
         if status != 0:
+            errresult += result
             ret += "%s " % m
     if ret:
-        return "Required perl module(s) not found: %s\n" % ret
+        return "Required perl module(s) not found: %s\n\n%s\n" % (ret, errresult)
     return None
 
 def sanity_check_conffiles(status, d):
-    # Check we are using a valid local.conf
-    current_conf  = d.getVar('CONF_VERSION', True)
-    conf_version =  d.getVar('LOCALCONF_VERSION', True)
-
-    if current_conf != conf_version:
-        status.addresult("Your version of local.conf was generated from an older/newer version of local.conf.sample and there have been updates made to this file. Please compare the two files and merge any changes before continuing.\nMatching the version numbers will remove this message.\n\"meld conf/local.conf ${COREBASE}/meta*/conf/local.conf.sample\" is a good way to visualise the changes.\n")
-
-    # Check bblayers.conf is valid
-    current_lconf = d.getVar('LCONF_VERSION', True)
-    lconf_version = d.getVar('LAYER_CONF_VERSION', True)
-    if current_lconf != lconf_version:
-        funcs = d.getVar('BBLAYERS_CONF_UPDATE_FUNCS', True).split()
-        for func in funcs:
+    funcs = d.getVar('BBLAYERS_CONF_UPDATE_FUNCS', True).split()
+    for func in funcs:
+        conffile, current_version, required_version, func = func.split(":")
+        if check_conf_exists(conffile, d) and d.getVar(current_version, True) is not None and \
+                d.getVar(current_version, True) != d.getVar(required_version, True):
             success = True
             try:
-                bb.build.exec_func(func, d)
-            except Exception:
+                bb.build.exec_func(func, d, pythonexception=True)
+            except NotImplementedError as e:
                 success = False
+                status.addresult(str(e))
             if success:
-                bb.note("Your conf/bblayers.conf has been automatically updated.")
                 status.reparse = True
-        if not status.reparse:
-            status.addresult("Your version of bblayers.conf has the wrong LCONF_VERSION (has %s, expecting %s).\nPlease compare the your file against bblayers.conf.sample and merge any changes before continuing.\n\"meld conf/bblayers.conf ${COREBASE}/meta*/conf/bblayers.conf.sample\" is a good way to visualise the changes.\n" % (current_lconf, lconf_version))
-
-    # If we have a site.conf, check it's valid
-    if check_conf_exists("conf/site.conf", d):
-        current_sconf = d.getVar('SCONF_VERSION', True)
-        sconf_version = d.getVar('SITE_CONF_VERSION', True)
-        if current_sconf != sconf_version:
-            status.addresult("Your version of site.conf was generated from an older version of site.conf.sample and there have been updates made to this file. Please compare the two files and merge any changes before continuing.\nMatching the version numbers will remove this message.\n\"meld conf/site.conf ${COREBASE}/meta*/conf/site.conf.sample\" is a good way to visualise the changes.\n")
-
 
 def sanity_handle_abichanges(status, d):
     #
@@ -571,9 +651,9 @@
     return testmsg
        
 def check_sanity_version_change(status, d):
-    # Sanity checks to be done when SANITY_VERSION changes
+    # Sanity checks to be done when SANITY_VERSION or NATIVELSBSTRING changes
     # In other words, these tests run once in a given build directory and then 
-    # never again until the sanity version changes.
+    # never again until the sanity version or host distrubution id/version changes.
 
     # Check the python install is complete. glib-2.0-natives requries
     # xml.parsers.expat
@@ -749,14 +829,6 @@
 
     check_supported_distro(d)
 
-    # Check if DISPLAY is set if TEST_IMAGE is set
-    if d.getVar('TEST_IMAGE', True) == '1' or d.getVar('DEFAULT_TEST_SUITES', True):
-        testtarget = d.getVar('TEST_TARGET', True)
-        if testtarget == 'qemu' or testtarget == 'QemuTarget':
-            display = d.getVar("BB_ORIGENV", False).getVar("DISPLAY", True)
-            if not display:
-                status.addresult('testimage needs an X desktop to start qemu, please set DISPLAY correctly (e.g. DISPLAY=:1.0)\n')
-
     omask = os.umask(022)
     if omask & 0755:
         status.addresult("Please use a umask which allows a+rx and u+rwx\n")
@@ -788,7 +860,7 @@
     mirror_vars = ['MIRRORS', 'PREMIRRORS', 'SSTATE_MIRRORS']
     protocols = ['http', 'ftp', 'file', 'https', \
                  'git', 'gitsm', 'hg', 'osc', 'p4', 'svn', \
-                 'bzr', 'cvs']
+                 'bzr', 'cvs', 'npm', 'sftp', 'ssh']
     for mirror_var in mirror_vars:
         mirrors = (d.getVar(mirror_var, True) or '').replace('\\n', '\n').split('\n')
         for mirror_entry in mirrors:
@@ -848,18 +920,6 @@
         with open(checkfile, "w") as f:
             f.write(tmpdir)
 
-    # Check vmdk and live can't be built together.
-    if 'vmdk' in d.getVar('IMAGE_FSTYPES', True) and 'live' in d.getVar('IMAGE_FSTYPES', True):
-        status.addresult("Error, IMAGE_FSTYPES vmdk and live can't be built together\n")
-
-    # Check vdi and live can't be built together.
-    if 'vdi' in d.getVar('IMAGE_FSTYPES', True) and 'live' in d.getVar('IMAGE_FSTYPES', True):
-        status.addresult("Error, IMAGE_FSTYPES vdi and live can't be built together\n")
-
-    # Check qcow2 and live can't be built together.
-    if 'qcow2' in d.getVar('IMAGE_FSTYPES', True) and 'live' in d.getVar('IMAGE_FSTYPES', True):
-        status.addresult("Error, IMAGE_FSTYPES qcow2 and live can't be built together\n")
-
     # Check /bin/sh links to dash or bash
     real_sh = os.path.realpath('/bin/sh')
     if not real_sh.endswith('/dash') and not real_sh.endswith('/bash'):
@@ -887,6 +947,7 @@
     last_sanity_version = 0
     last_tmpdir = ""
     last_sstate_dir = ""
+    last_nativelsbstr = ""
     sanityverfile = sanity_data.expand("${TOPDIR}/conf/sanity_info")
     if os.path.exists(sanityverfile):
         with open(sanityverfile, 'r') as f:
@@ -897,12 +958,17 @@
                     last_tmpdir = line.split()[1]
                 if line.startswith('SSTATE_DIR'):
                     last_sstate_dir = line.split()[1]
+                if line.startswith('NATIVELSBSTRING'):
+                    last_nativelsbstr = line.split()[1]
 
     check_sanity_everybuild(status, sanity_data)
     
     sanity_version = int(sanity_data.getVar('SANITY_VERSION', True) or 1)
     network_error = False
-    if last_sanity_version < sanity_version: 
+    # NATIVELSBSTRING var may have been overridden with "universal", so
+    # get actual host distribution id and version
+    nativelsbstr = lsb_distro_identifier(sanity_data)
+    if last_sanity_version < sanity_version or last_nativelsbstr != nativelsbstr: 
         check_sanity_version_change(status, sanity_data)
         status.addresult(check_sanity_sstate_dir_change(sstate_dir, sanity_data))
     else: 
@@ -914,6 +980,7 @@
             f.write("SANITY_VERSION %s\n" % sanity_version) 
             f.write("TMPDIR %s\n" % tmpdir) 
             f.write("SSTATE_DIR %s\n" % sstate_dir) 
+            f.write("NATIVELSBSTRING %s\n" % nativelsbstr) 
 
     sanity_handle_abichanges(status, sanity_data)
 
diff --git a/yocto-poky/meta/classes/scons.bbclass b/yocto-poky/meta/classes/scons.bbclass
index b8de822..1579b05 100644
--- a/yocto-poky/meta/classes/scons.bbclass
+++ b/yocto-poky/meta/classes/scons.bbclass
@@ -2,6 +2,8 @@
 
 EXTRA_OESCONS ?= ""
 
+do_configure[noexec] = "1"
+
 scons_do_compile() {
         ${STAGING_BINDIR_NATIVE}/scons ${PARALLEL_MAKE} PREFIX=${prefix} prefix=${prefix} ${EXTRA_OESCONS} || \
         die "scons build execution failed."
diff --git a/yocto-poky/meta/classes/sign_ipk.bbclass b/yocto-poky/meta/classes/sign_ipk.bbclass
new file mode 100644
index 0000000..a481f6d
--- /dev/null
+++ b/yocto-poky/meta/classes/sign_ipk.bbclass
@@ -0,0 +1,52 @@
+# Class for generating signed IPK packages.
+#
+# Configuration variables used by this class:
+# IPK_GPG_PASSPHRASE_FILE
+#           Path to a file containing the passphrase of the signing key.
+# IPK_GPG_NAME
+#           Name of the key to sign with.
+# IPK_GPG_BACKEND
+#           Optional variable for specifying the backend to use for signing.
+#           Currently the only available option is 'local', i.e. local signing
+#           on the build host.
+# IPK_GPG_SIGNATURE_TYPE
+#           Optional variable for specifying the type of gpg signatures, can be:
+#                     1. Ascii armored (ASC), default if not set
+#                     2. Binary (BIN)
+# GPG_BIN
+#           Optional variable for specifying the gpg binary/wrapper to use for
+#           signing.
+# GPG_PATH
+#           Optional variable for specifying the gnupg "home" directory:
+#
+
+inherit sanity
+
+IPK_SIGN_PACKAGES = '1'
+IPK_GPG_BACKEND ?= 'local'
+IPK_GPG_SIGNATURE_TYPE ?= 'ASC'
+
+python () {
+    # Check configuration
+    for var in ('IPK_GPG_NAME', 'IPK_GPG_PASSPHRASE_FILE'):
+        if not d.getVar(var, True):
+            raise_sanity_error("You need to define %s in the config" % var, d)
+
+    sigtype = d.getVar("IPK_GPG_SIGNATURE_TYPE", True)
+    if sigtype.upper() != "ASC" and sigtype.upper() != "BIN":
+        raise_sanity_error("Bad value for IPK_GPG_SIGNATURE_TYPE (%s), use either ASC or BIN" % sigtype)
+}
+
+def sign_ipk(d, ipk_to_sign):
+    from oe.gpg_sign import get_signer
+
+    bb.debug(1, 'Signing ipk: %s' % ipk_to_sign)
+
+    signer = get_signer(d, d.getVar('IPK_GPG_BACKEND', True))
+    sig_type = d.getVar('IPK_GPG_SIGNATURE_TYPE', True)
+    is_ascii_sig = (sig_type.upper() != "BIN")
+
+    signer.detach_sign(ipk_to_sign,
+                       d.getVar('IPK_GPG_NAME', True),
+                       d.getVar('IPK_GPG_PASSPHRASE_FILE', True),
+                       armor=is_ascii_sig)
diff --git a/yocto-poky/meta/classes/sign_package_feed.bbclass b/yocto-poky/meta/classes/sign_package_feed.bbclass
index 4263810..31a6e9b 100644
--- a/yocto-poky/meta/classes/sign_package_feed.bbclass
+++ b/yocto-poky/meta/classes/sign_package_feed.bbclass
@@ -6,6 +6,16 @@
 #           Path to a file containing the passphrase of the signing key.
 # PACKAGE_FEED_GPG_NAME
 #           Name of the key to sign with. May be key id or key name.
+# PACKAGE_FEED_GPG_BACKEND
+#           Optional variable for specifying the backend to use for signing.
+#           Currently the only available option is 'local', i.e. local signing
+#           on the build host.
+# PACKAGE_FEED_GPG_SIGNATURE_TYPE
+#           Optional variable for specifying the type of gpg signature, can be:
+#               1. Ascii armored (ASC), default if not set
+#               2. Binary (BIN)
+#           This variable is only available for IPK feeds. It is ignored on
+#           other packaging backends.
 # GPG_BIN
 #           Optional variable for specifying the gpg binary/wrapper to use for
 #           signing.
@@ -15,6 +25,8 @@
 inherit sanity
 
 PACKAGE_FEED_SIGN = '1'
+PACKAGE_FEED_GPG_BACKEND ?= 'local'
+PACKAGE_FEED_GPG_SIGNATURE_TYPE ?= 'ASC'
 
 python () {
     # Check sanity of configuration
@@ -22,10 +34,10 @@
         if not d.getVar(var, True):
             raise_sanity_error("You need to define %s in the config" % var, d)
 
-    # Set expected location of the public key
-    d.setVar('PACKAGE_FEED_GPG_PUBKEY',
-             os.path.join(d.getVar('STAGING_ETCDIR_NATIVE'),
-                                   'PACKAGE-FEED-GPG-PUBKEY'))
+    sigtype = d.getVar("PACKAGE_FEED_GPG_SIGNATURE_TYPE", True)
+    if sigtype.upper() != "ASC" and sigtype.upper() != "BIN":
+        raise_sanity_error("Bad value for PACKAGE_FEED_GPG_SIGNATURE_TYPE (%s), use either ASC or BIN" % sigtype)
 }
 
-do_package_index[depends] += "signing-keys:do_export_public_keys"
+do_package_index[depends] += "signing-keys:do_deploy"
+do_rootfs[depends] += "signing-keys:do_populate_sysroot"
diff --git a/yocto-poky/meta/classes/sign_rpm.bbclass b/yocto-poky/meta/classes/sign_rpm.bbclass
index f0c3dc9..a8ea75f 100644
--- a/yocto-poky/meta/classes/sign_rpm.bbclass
+++ b/yocto-poky/meta/classes/sign_rpm.bbclass
@@ -1,10 +1,14 @@
 # Class for generating signed RPM packages.
 #
 # Configuration variables used by this class:
-# RPM_GPG_PASSPHRASE_FILE
-#           Path to a file containing the passphrase of the signing key.
+# RPM_GPG_PASSPHRASE
+#           The passphrase of the signing key.
 # RPM_GPG_NAME
 #           Name of the key to sign with. May be key id or key name.
+# RPM_GPG_BACKEND
+#           Optional variable for specifying the backend to use for signing.
+#           Currently the only available option is 'local', i.e. local signing
+#           on the build host.
 # GPG_BIN
 #           Optional variable for specifying the gpg binary/wrapper to use for
 #           signing.
@@ -14,60 +18,36 @@
 inherit sanity
 
 RPM_SIGN_PACKAGES='1'
+RPM_GPG_BACKEND ?= 'local'
 
 
 python () {
+    if d.getVar('RPM_GPG_PASSPHRASE_FILE', True):
+        raise_sanity_error('RPM_GPG_PASSPHRASE_FILE is replaced by RPM_GPG_PASSPHRASE', d)
     # Check configuration
-    for var in ('RPM_GPG_NAME', 'RPM_GPG_PASSPHRASE_FILE'):
+    for var in ('RPM_GPG_NAME', 'RPM_GPG_PASSPHRASE'):
         if not d.getVar(var, True):
             raise_sanity_error("You need to define %s in the config" % var, d)
 
     # Set the expected location of the public key
-    d.setVar('RPM_GPG_PUBKEY', os.path.join(d.getVar('STAGING_ETCDIR_NATIVE'),
-                                            'RPM-GPG-PUBKEY'))
+    d.setVar('RPM_GPG_PUBKEY', os.path.join(d.getVar('STAGING_DIR_TARGET', False),
+                                            d.getVar('sysconfdir', False),
+                                            'pki',
+                                            'rpm-gpg',
+                                            'RPM-GPG-KEY-${DISTRO_VERSION}'))
 }
 
-
-def rpmsign_wrapper(d, files, passphrase, gpg_name=None):
-    import pexpect
-
-    # Find the correct rpm binary
-    rpm_bin_path = d.getVar('STAGING_BINDIR_NATIVE', True) + '/rpm'
-    cmd = rpm_bin_path + " --addsign --define '_gpg_name %s' " % gpg_name
-    if d.getVar('GPG_BIN', True):
-        cmd += "--define '%%__gpg %s' " % d.getVar('GPG_BIN', True)
-    if d.getVar('GPG_PATH', True):
-        cmd += "--define '_gpg_path %s' " % d.getVar('GPG_PATH', True)
-    cmd += ' '.join(files)
-
-    # Need to use pexpect for feeding the passphrase
-    proc = pexpect.spawn(cmd)
-    try:
-        proc.expect_exact('Enter pass phrase:', timeout=15)
-        proc.sendline(passphrase)
-        proc.expect(pexpect.EOF, timeout=900)
-        proc.close()
-    except pexpect.TIMEOUT as err:
-        bb.warn('rpmsign timeout: %s' % err)
-        proc.terminate()
-    else:
-        if os.WEXITSTATUS(proc.status) or not os.WIFEXITED(proc.status):
-            bb.warn('rpmsign failed: %s' % proc.before.strip())
-    return proc.exitstatus
-
-
 python sign_rpm () {
     import glob
+    from oe.gpg_sign import get_signer
 
-    with open(d.getVar("RPM_GPG_PASSPHRASE_FILE", True)) as fobj:
-        rpm_gpg_passphrase = fobj.readlines()[0].rstrip('\n')
-
-    rpm_gpg_name = (d.getVar("RPM_GPG_NAME", True) or "")
-
+    signer = get_signer(d, d.getVar('RPM_GPG_BACKEND', True))
     rpms = glob.glob(d.getVar('RPM_PKGWRITEDIR', True) + '/*')
 
-    if rpmsign_wrapper(d, rpms, rpm_gpg_passphrase, rpm_gpg_name) != 0:
-        raise bb.build.FuncFailed("RPM signing failed")
+    signer.sign_rpms(rpms,
+                     d.getVar('RPM_GPG_NAME', True),
+                     d.getVar('RPM_GPG_PASSPHRASE', True))
 }
 
-do_package_index[depends] += "signing-keys:do_export_public_keys"
+do_package_index[depends] += "signing-keys:do_deploy"
+do_rootfs[depends] += "signing-keys:do_populate_sysroot"
diff --git a/yocto-poky/meta/classes/siteinfo.bbclass b/yocto-poky/meta/classes/siteinfo.bbclass
index 3b562ee..50141a3 100644
--- a/yocto-poky/meta/classes/siteinfo.bbclass
+++ b/yocto-poky/meta/classes/siteinfo.bbclass
@@ -8,7 +8,7 @@
 #
 # 'what' can be one of
 # * target: Returns the target name ("<arch>-<os>")
-# * endianess: Return "be" for big endian targets, "le" for little endian
+# * endianness: Return "be" for big endian targets, "le" for little endian
 # * bits: Returns the bit size of the target, either "32" or "64"
 # * libc: Returns the name of the c library used by the target
 #
@@ -42,7 +42,7 @@
         "powerpc64": "endian-big bit-64 powerpc-common",
         "ppc": "endian-big bit-32 powerpc-common",
         "ppc64": "endian-big bit-64 powerpc-common",
-        "ppc64le": "endian-little bit-64 powerpc-common",
+        "ppc64le" : "endian-little bit-64 powerpc-common",
         "sh3": "endian-little bit-32 sh-common",
         "sh4": "endian-little bit-32 sh-common",
         "sparc": "endian-big bit-32",
@@ -71,6 +71,8 @@
     targetinfo = {
         "aarch64-linux-gnu": "aarch64-linux",
         "aarch64_be-linux-gnu": "aarch64_be-linux",
+        "aarch64-linux-musl": "aarch64-linux",
+        "aarch64_be-linux-musl": "aarch64_be-linux",
         "arm-linux-gnueabi": "arm-linux",
         "arm-linux-musleabi": "arm-linux",
         "arm-linux-uclibceabi": "arm-linux-uclibc",
@@ -79,8 +81,8 @@
         "armeb-linux-musleabi": "armeb-linux",
         "mips-linux-musl": "mips-linux",
         "mipsel-linux-musl": "mipsel-linux",
-        "mips64-linux-musl": "mips-linux",
-        "mips64el-linux-musl": "mipsel-linux",
+        "mips64-linux-musl": "mips64-linux",
+        "mips64el-linux-musl": "mips64el-linux",
         "mips64-linux-gnun32": "mips-linux bit-32",
         "mips64el-linux-gnun32": "mipsel-linux bit-32",
         "powerpc-linux": "powerpc32-linux",
@@ -92,6 +94,7 @@
         "powerpc64-linux-gnuspe": "powerpc-linux powerpc64-linux",
         "powerpc64-linux-muslspe": "powerpc-linux powerpc64-linux",
         "powerpc64-linux": "powerpc-linux",
+        "powerpc64-linux-musl": "powerpc-linux",
         "x86_64-cygwin": "bit-64",
         "x86_64-darwin": "bit-64",
         "x86_64-darwin9": "bit-64",
diff --git a/yocto-poky/meta/classes/spdx.bbclass b/yocto-poky/meta/classes/spdx.bbclass
index 454c53e..0c92765 100644
--- a/yocto-poky/meta/classes/spdx.bbclass
+++ b/yocto-poky/meta/classes/spdx.bbclass
@@ -232,7 +232,7 @@
     # Package info
     package_info = {}
     if full_spdx:
-        # All mandatory, only one occurance
+        # All mandatory, only one occurrence
         package_info['PackageCopyrightText'] = re.findall('PackageCopyrightText: (.*?</text>)', foss_output, re.S)[0]
         package_info['PackageLicenseDeclared'] = re.findall('PackageLicenseDeclared: (.*)', foss_output)[0]
         package_info['PackageLicenseConcluded'] = re.findall('PackageLicenseConcluded: (.*)', foss_output)[0]
diff --git a/yocto-poky/meta/classes/sstate.bbclass b/yocto-poky/meta/classes/sstate.bbclass
index d09e27a..8c62327 100644
--- a/yocto-poky/meta/classes/sstate.bbclass
+++ b/yocto-poky/meta/classes/sstate.bbclass
@@ -10,7 +10,7 @@
 
 SSTATE_PKGARCH    = "${PACKAGE_ARCH}"
 SSTATE_PKGSPEC    = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
-SSTATE_SWSPEC     = "sstate:${BPN}::${PV}:${PR}::${SSTATE_VERSION}:"
+SSTATE_SWSPEC     = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
 SSTATE_PKGNAME    = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC', True), d.getVar('BB_TASKHASH', True), d)}"
 SSTATE_PKG        = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
 SSTATE_EXTRAPATH   = ""
@@ -31,7 +31,7 @@
 SSTATE_SCAN_FILES ?= "*.la *-config *_config"
 SSTATE_SCAN_CMD ?= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES", True).split())}" \) -type f'
 
-BB_HASHFILENAME = "${SSTATE_EXTRAPATH} ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
+BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
 
 SSTATE_ARCHS = " \
     ${BUILD_ARCH} \
@@ -51,8 +51,15 @@
 SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
 SSTATEPOSTINSTFUNCS = ""
 EXTRA_STAGING_FIXMES ?= ""
+SSTATECLEANFUNCS = ""
 
-SIGGEN_LOCKEDSIGS_CHECK_LEVEL ?= 'error'
+# Check whether sstate exists for tasks that support sstate and are in the
+# locked signatures file.
+SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error'
+
+# Check whether the task's computed hash matches the task's hash in the
+# locked signatures file.
+SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error"
 
 # The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
 # not sign)
@@ -79,6 +86,7 @@
 
     if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
         d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
+        d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
         d.setVar('SSTATE_EXTRAPATHWILDCARD', "*/")
 
     # These classes encode staging paths into their scripts data so can only be
@@ -124,6 +132,7 @@
     if task == "populate_lic":
         d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
         d.setVar("SSTATE_EXTRAPATH", "")
+        d.setVar('SSTATE_EXTRAPATHWILDCARD', "")
 
     ss = sstate_init(task, d)
     for i in range(len(inputs)):
@@ -271,6 +280,7 @@
 def sstate_installpkg(ss, d):
     import oe.path
     import subprocess
+    from oe.gpg_sign import get_signer
 
     def prepdir(dir):
         # remove dir if it exists, ensure any parent directories do exist
@@ -296,7 +306,8 @@
     d.setVar('SSTATE_PKG', sstatepkg)
 
     if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG", True), False):
-        if subprocess.call(["gpg", "--verify", sstatepkg + ".sig", sstatepkg]) != 0:
+        signer = get_signer(d, 'local')
+        if not signer.verify(sstatepkg + '.sig'):
             bb.warn("Cannot verify signature on sstate package %s" % sstatepkg)
 
     for f in (d.getVar('SSTATEPREINSTFUNCS', True) or '').split() + ['sstate_unpack_package'] + (d.getVar('SSTATEPOSTUNPACKFUNCS', True) or '').split():
@@ -440,6 +451,10 @@
                 stfile.endswith(rm_nohash):
             oe.path.remove(stfile)
 
+    # Removes the users/groups created by the package
+    for cleanfunc in (d.getVar('SSTATECLEANFUNCS', True) or '').split():
+        bb.build.exec_func(cleanfunc, d)
+
 sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
 
 CLEANFUNCS += "sstate_cleanall"
@@ -571,7 +586,8 @@
     d.setVar('SSTATE_BUILDDIR', sstatebuild)
     d.setVar('SSTATE_PKG', sstatepkg)
 
-    for f in (d.getVar('SSTATECREATEFUNCS', True) or '').split() + ['sstate_create_package'] + \
+    for f in (d.getVar('SSTATECREATEFUNCS', True) or '').split() + \
+             ['sstate_create_package', 'sstate_sign_package'] + \
              (d.getVar('SSTATEPOSTCREATEFUNCS', True) or '').split():
         # All hooks should run in SSTATE_BUILDDIR.
         bb.build.exec_func(f, d, (sstatebuild,))
@@ -672,26 +688,35 @@
 	else
 		tar -cz --file=$TFILE --files-from=/dev/null
 	fi
-	chmod 0664 $TFILE 
+	chmod 0664 $TFILE
 	mv -f $TFILE ${SSTATE_PKG}
 
-	if [ -n "${SSTATE_SIG_KEY}" ]; then
-		rm -f ${SSTATE_PKG}.sig
-		echo ${SSTATE_SIG_PASSPHRASE} | gpg --batch --passphrase-fd 0 --detach-sign --local-user ${SSTATE_SIG_KEY} --output ${SSTATE_PKG}.sig ${SSTATE_PKG}
-	fi
-
 	cd ${WORKDIR}
 	rm -rf ${SSTATE_BUILDDIR}
 }
 
+python sstate_sign_package () {
+    from oe.gpg_sign import get_signer
+
+    if d.getVar('SSTATE_SIG_KEY', True):
+        signer = get_signer(d, 'local')
+        sstate_pkg = d.getVar('SSTATE_PKG', True)
+        if os.path.exists(sstate_pkg + '.sig'):
+            os.unlink(sstate_pkg + '.sig')
+        signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
+                           d.getVar('SSTATE_SIG_PASSPHRASE', True), armor=False)
+}
+
 #
 # Shell function to decompress and prepare a package for installation
 # Will be run from within SSTATE_INSTDIR.
 #
 sstate_unpack_package () {
-	tar -xmvzf ${SSTATE_PKG}
+	tar -xvzf ${SSTATE_PKG}
 	# Use "! -w ||" to return true for read only files
 	[ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG}
+	[ ! -w ${SSTATE_PKG}.sig ] || [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig
+	[ ! -w ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo
 }
 
 BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
@@ -708,7 +733,10 @@
         # Magic data from BB_HASHFILENAME
         splithashfn = sq_hashfn[task].split(" ")
         spec = splithashfn[1]
-        extrapath = splithashfn[0]
+        if splithashfn[0] == "True":
+            extrapath = d.getVar("NATIVELSBSTRING", True) + "/"
+        else:
+            extrapath = ""
 
         tname = sq_task[task][3:]
 
@@ -830,7 +858,7 @@
         return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x
 
     def isPostInstDep(x):
-        if x in ["qemu-native", "gdk-pixbuf-native", "qemuwrapper-cross", "depmodwrapper-cross", "systemd-systemctl-native", "gtk-icon-utils-native"]:
+        if x in ["qemu-native", "gdk-pixbuf-native", "qemuwrapper-cross", "depmodwrapper-cross", "systemd-systemctl-native", "gtk-icon-utils-native", "ca-certificates-native"]:
             return True
         return False
 
@@ -838,6 +866,14 @@
     if taskdependees[task][1] == "do_populate_lic":
         return True
 
+    # We only need to trigger packagedata through direct dependencies
+    # but need to preserve packagedata on packagedata links
+    if taskdependees[task][1] == "do_packagedata":
+        for dep in taskdependees:
+            if taskdependees[dep][1] == "do_packagedata":
+                return False
+        return True
+
     for dep in taskdependees:
         bb.debug(2, "  considering dependency: %s" % (str(taskdependees[dep])))
         if task == dep:
@@ -856,6 +892,11 @@
         if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
             continue
 
+        # This is due to the [depends] in useradd.bbclass complicating matters
+        # The logic *is* reversed here due to the way hard setscene dependencies are injected
+        if (taskdependees[task][1] == 'do_package' or taskdependees[task][1] == 'do_populate_sysroot') and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
+            continue
+
         # Consider sysroot depending on sysroot tasks
         if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
             # base-passwd/shadow-sysroot don't need their dependencies
@@ -879,11 +920,10 @@
         if taskdependees[task][1] == 'do_shared_workdir':
             continue
 
-        # This is due to the [depends] in useradd.bbclass complicating matters
-        # The logic *is* reversed here due to the way hard setscene dependencies are injected
-        if taskdependees[task][1] == 'do_package' and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
+        if taskdependees[dep][1] == "do_populate_lic":
             continue
 
+
         # Safe fallthrough default
         bb.debug(2, " Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])))
         return False
@@ -933,8 +973,12 @@
                 if stamp not in stamps:
                     toremove.append(l)
                     if stamp not in seen:
-                        bb.note("Stamp %s is not reachable, removing related manifests" % stamp)
+                        bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
                         seen.append(stamp)
+
+        if toremove:
+            bb.note("There are %d recipes to be removed from sysroot %s, removing..." % (len(toremove), a))
+
         for r in toremove:
             (stamp, manifest, workdir) = r.split()
             for m in glob.glob(manifest + ".*"):
diff --git a/yocto-poky/meta/classes/staging.bbclass b/yocto-poky/meta/classes/staging.bbclass
index 967eddd..bc5dfa8 100644
--- a/yocto-poky/meta/classes/staging.bbclass
+++ b/yocto-poky/meta/classes/staging.bbclass
@@ -127,7 +127,10 @@
                     elf_file = isELF(file)
                     if elf_file & 1:
                         if elf_file & 2:
-                            bb.warn("File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn))
+                            if 'already-stripped' in (d.getVar('INSANE_SKIP_' + pn, True) or "").split():
+                                bb.note("Skipping file %s from %s for already-stripped QA test" % (file[len(dvar):], pn))
+                            else:
+                                bb.warn("File '%s' from %s was already stripped, this will prevent future debugging!" % (file[len(dvar):], pn))
                             continue
 
                         if s.st_ino in inodes:
@@ -158,7 +161,7 @@
 addtask populate_sysroot after do_install
 
 SYSROOT_PREPROCESS_FUNCS ?= ""
-SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir/"
+SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir"
 SYSROOT_LOCK = "${STAGING_DIR}/staging.lock"
 
 # We clean out any existing sstate from the sysroot if we rerun configure
diff --git a/yocto-poky/meta/classes/syslinux.bbclass b/yocto-poky/meta/classes/syslinux.bbclass
index 44ef9a9..4fcb0c5 100644
--- a/yocto-poky/meta/classes/syslinux.bbclass
+++ b/yocto-poky/meta/classes/syslinux.bbclass
@@ -20,19 +20,20 @@
 do_bootimg[depends] += "${MLPREFIX}syslinux:do_populate_sysroot \
                         syslinux-native:do_populate_sysroot"
 
-SYSLINUXCFG  = "${S}/syslinux.cfg"
-
-ISOLINUXDIR = "/isolinux"
+ISOLINUXDIR ?= "/isolinux"
 SYSLINUXDIR = "/"
 # The kernel has an internal default console, which you can override with
 # a console=...some_tty...
 SYSLINUX_DEFAULT_CONSOLE ?= ""
 SYSLINUX_SERIAL ?= "0 115200"
 SYSLINUX_SERIAL_TTY ?= "console=ttyS0,115200"
-ISO_BOOTIMG = "isolinux/isolinux.bin"
-ISO_BOOTCAT = "isolinux/boot.cat"
-MKISOFS_OPTIONS = "-no-emul-boot -boot-load-size 4 -boot-info-table"
-APPEND_prepend = " ${SYSLINUX_ROOT} "
+SYSLINUX_PROMPT ?= "0"
+SYSLINUX_TIMEOUT ?= "50"
+AUTO_SYSLINUXMENU ?= "1"
+SYSLINUX_ROOT ?= "${ROOT}"
+SYSLINUX_CFG_VM  ?= "${S}/syslinux_vm.cfg"
+SYSLINUX_CFG_LIVE ?= "${S}/syslinux_live.cfg"
+APPEND ?= ""
 
 # Need UUID utility code.
 inherit fs-uuid
@@ -45,7 +46,7 @@
 	install -d ${DEST}${BOOTDIR}
 
 	# Install the config files
-	install -m 0644 ${SYSLINUXCFG} ${DEST}${BOOTDIR}/${CFGNAME}
+	install -m 0644 ${SYSLINUX_CFG} ${DEST}${BOOTDIR}/${CFGNAME}
 	if [ "${AUTO_SYSLINUXMENU}" = 1 ] ; then
 		install -m 0644 ${STAGING_DATADIR}/syslinux/vesamenu.c32 ${DEST}${BOOTDIR}/vesamenu.c32
 		install -m 0444 ${STAGING_DATADIR}/syslinux/libcom32.c32 ${DEST}${BOOTDIR}/libcom32.c32
@@ -96,9 +97,9 @@
         bb.debug(1, "No labels, nothing to do")
         return
 
-    cfile = d.getVar('SYSLINUXCFG', True)
+    cfile = d.getVar('SYSLINUX_CFG', True)
     if not cfile:
-        raise bb.build.FuncFailed('Unable to read SYSLINUXCFG')
+        raise bb.build.FuncFailed('Unable to read SYSLINUX_CFG')
 
     try:
         cfgfile = file(cfile, 'w')
@@ -120,7 +121,7 @@
     if syslinux_serial:
         cfgfile.write('SERIAL %s\n' % syslinux_serial)
 
-    menu = d.getVar('AUTO_SYSLINUXMENU', True)
+    menu = (d.getVar('AUTO_SYSLINUXMENU', True) == "1")
 
     if menu and syslinux_serial:
         cfgfile.write('DEFAULT Graphics console %s\n' % (labels.split()[0]))
@@ -163,6 +164,10 @@
             btypes = [ [ "Graphics console ", syslinux_default_console  ],
                 [ "Serial console ", syslinux_serial_tty ] ]
 
+        root= d.getVar('SYSLINUX_ROOT', True)
+        if not root:
+            raise bb.build.FuncFailed('SYSLINUX_ROOT not defined')
+
         for btype in btypes:
             cfgfile.write('LABEL %s%s\nKERNEL /vmlinuz\n' % (btype[0], label))
 
@@ -173,18 +178,15 @@
             append = localdata.getVar('APPEND', True)
             initrd = localdata.getVar('INITRD', True)
 
-            if append:
-                cfgfile.write('APPEND ')
+            append = root + " " + append
+            cfgfile.write('APPEND ')
 
-                if initrd:
-                    cfgfile.write('initrd=/initrd ')
+            if initrd:
+                cfgfile.write('initrd=/initrd ')
 
-                cfgfile.write('LABEL=%s '% (label))
-                append = replace_rootfs_uuid(d, append)
-                cfgfile.write('%s %s\n' % (append, btype[1]))
-            else:
-                cfgfile.write('APPEND %s\n' % btype[1])
+            cfgfile.write('LABEL=%s '% (label))
+            append = replace_rootfs_uuid(d, append)
+            cfgfile.write('%s %s\n' % (append, btype[1]))
 
     cfgfile.close()
 }
-build_syslinux_cfg[vardeps] += "APPEND"
diff --git a/yocto-poky/meta/classes/systemd.bbclass b/yocto-poky/meta/classes/systemd.bbclass
index 46e72c7..db7873f 100644
--- a/yocto-poky/meta/classes/systemd.bbclass
+++ b/yocto-poky/meta/classes/systemd.bbclass
@@ -59,6 +59,8 @@
 
 
 python systemd_populate_packages() {
+    import re
+
     if not bb.utils.contains('DISTRO_FEATURES', 'systemd', True, False, d):
         return
 
@@ -144,10 +146,22 @@
         for pkg_systemd in systemd_packages.split():
             for service in get_package_var(d, 'SYSTEMD_SERVICE', pkg_systemd).split():
                 path_found = ''
+
+                # Deal with adding, for example, 'ifplugd@eth0.service' from
+                # 'ifplugd@.service'
+                base = None
+                if service.find('@') != -1:
+                    base = re.sub('@[^.]+.', '@.', service)
+
                 for path in searchpaths:
                     if os.path.exists(oe.path.join(d.getVar("D", True), path, service)):
                         path_found = path
                         break
+                    elif base is not None:
+                        if os.path.exists(oe.path.join(d.getVar("D", True), path, base)):
+                            path_found = path
+                            break
+
                 if path_found != '':
                     systemd_add_files_and_parse(pkg_systemd, path_found, service, keys)
                 else:
diff --git a/yocto-poky/meta/classes/terminal.bbclass b/yocto-poky/meta/classes/terminal.bbclass
index e577c6d..9f4c24e 100644
--- a/yocto-poky/meta/classes/terminal.bbclass
+++ b/yocto-poky/meta/classes/terminal.bbclass
@@ -1,8 +1,7 @@
 OE_TERMINAL ?= 'auto'
 OE_TERMINAL[type] = 'choice'
 OE_TERMINAL[choices] = 'auto none \
-                        ${@" ".join(o.name \
-                                    for o in oe.terminal.prioritized())}'
+                        ${@oe_terminal_prioritized()}'
 
 OE_TERMINAL_EXPORTS += 'EXTRA_OEMAKE'
 OE_TERMINAL_EXPORTS[type] = 'list'
@@ -10,12 +9,15 @@
 XAUTHORITY ?= "${HOME}/.Xauthority"
 SHELL ?= "bash"
 
+def oe_terminal_prioritized():
+    import oe.terminal
+    return " ".join(o.name for o in oe.terminal.prioritized())
 
 def emit_terminal_func(command, envdata, d):
     cmd_func = 'do_terminal'
 
     envdata.setVar(cmd_func, 'exec ' + command)
-    envdata.setVarFlag(cmd_func, 'func', 1)
+    envdata.setVarFlag(cmd_func, 'func', '1')
 
     runfmt = d.getVar('BB_RUNFMT', True) or "run.{func}.{pid}"
     runfile = runfmt.format(func=cmd_func, task=cmd_func, taskfunc=cmd_func, pid=os.getpid())
@@ -39,14 +41,14 @@
 
     for v in os.environ:
         envdata.setVar(v, os.environ[v])
-        envdata.setVarFlag(v, 'export', 1)
+        envdata.setVarFlag(v, 'export', '1')
 
     for export in oe.data.typed_value('OE_TERMINAL_EXPORTS', d):
         value = d.getVar(export, True)
         if value is not None:
             os.environ[export] = str(value)
             envdata.setVar(export, str(value))
-            envdata.setVarFlag(export, 'export', 1)
+            envdata.setVarFlag(export, 'export', '1')
         if export == "PSEUDO_DISABLED":
             if "PSEUDO_UNLOAD" in os.environ:
                 del os.environ["PSEUDO_UNLOAD"]
@@ -62,7 +64,7 @@
         if value is not None:
             os.environ[key] = str(value)
             envdata.setVar(key, str(value))
-            envdata.setVarFlag(key, 'export', 1)
+            envdata.setVarFlag(key, 'export', '1')
 
     # A complex PS1 might need more escaping of chars.
     # Lets not export PS1 instead.
diff --git a/yocto-poky/meta/classes/testimage-auto.bbclass b/yocto-poky/meta/classes/testimage-auto.bbclass
index 860599d..e0a22b7 100644
--- a/yocto-poky/meta/classes/testimage-auto.bbclass
+++ b/yocto-poky/meta/classes/testimage-auto.bbclass
@@ -18,6 +18,6 @@
 python do_testimage_auto() {
     testimage_main(d)
 }
-addtask testimage_auto before do_build after do_rootfs
+addtask testimage_auto before do_build after do_image_complete
 do_testimage_auto[depends] += "${TESTIMAGEDEPENDS}"
 do_testimage_auto[lockfiles] += "${TESTIMAGELOCK}"
diff --git a/yocto-poky/meta/classes/testimage.bbclass b/yocto-poky/meta/classes/testimage.bbclass
index b4d4a69..e77bb11 100644
--- a/yocto-poky/meta/classes/testimage.bbclass
+++ b/yocto-poky/meta/classes/testimage.bbclass
@@ -33,17 +33,27 @@
 TEST_EXPORT_ONLY ?= "0"
 
 RPMTESTSUITE = "${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'smart rpm', '', d)}"
+MINTESTSUITE = "ping"
+NETTESTSUITE = "${MINTESTSUITE} ssh df date scp syslog"
+DEVTESTSUITE = "gcc kernelmodule ldd"
 
-DEFAULT_TEST_SUITES = "ping auto"
-DEFAULT_TEST_SUITES_pn-core-image-minimal = "ping"
-DEFAULT_TEST_SUITES_pn-core-image-sato = "ping ssh df connman syslog xorg scp vnc date dmesg parselogs ${RPMTESTSUITE} \
+DEFAULT_TEST_SUITES = "${MINTESTSUITE} auto"
+DEFAULT_TEST_SUITES_pn-core-image-minimal = "${MINTESTSUITE}"
+DEFAULT_TEST_SUITES_pn-core-image-minimal-dev = "${MINTESTSUITE}"
+DEFAULT_TEST_SUITES_pn-core-image-full-cmdline = "${NETTESTSUITE} perl python logrotate"
+DEFAULT_TEST_SUITES_pn-core-image-x11 = "${MINTESTSUITE}"
+DEFAULT_TEST_SUITES_pn-core-image-lsb = "${NETTESTSUITE} pam parselogs ${RPMTESTSUITE}"
+DEFAULT_TEST_SUITES_pn-core-image-sato = "${NETTESTSUITE} connman xorg parselogs ${RPMTESTSUITE} \
     ${@bb.utils.contains('IMAGE_PKGTYPE', 'rpm', 'python', '', d)}"
-DEFAULT_TEST_SUITES_pn-core-image-sato-sdk = "ping ssh df connman syslog xorg scp vnc date perl ldd gcc kernelmodule dmesg python parselogs ${RPMTESTSUITE}"
-DEFAULT_TEST_SUITES_pn-core-image-lsb-sdk = "ping buildcvs buildiptables buildsudoku connman date df gcc kernelmodule ldd pam parselogs perl python scp ${RPMTESTSUITE} ssh syslog logrotate"
+DEFAULT_TEST_SUITES_pn-core-image-sato-sdk = "${NETTESTSUITE} connman xorg perl python \
+    ${DEVTESTSUITE} parselogs ${RPMTESTSUITE}"
+DEFAULT_TEST_SUITES_pn-core-image-lsb-dev = "${NETTESTSUITE} pam perl python parselogs ${RPMTESTSUITE}"
+DEFAULT_TEST_SUITES_pn-core-image-lsb-sdk = "${NETTESTSUITE} buildcvs buildiptables buildsudoku \
+    connman ${DEVTESTSUITE} pam perl python parselogs ${RPMTESTSUITE}"
 DEFAULT_TEST_SUITES_pn-meta-toolchain = "auto"
 
 # aarch64 has no graphics
-DEFAULT_TEST_SUITES_remove_aarch64 = "xorg vnc"
+DEFAULT_TEST_SUITES_remove_aarch64 = "xorg"
 
 #qemumips is too slow for buildsudoku
 DEFAULT_TEST_SUITES_remove_qemumips = "buildsudoku"
@@ -98,82 +108,6 @@
 do_testimage[depends] += "${TESTIMAGEDEPENDS}"
 do_testimage[lockfiles] += "${TESTIMAGELOCK}"
 
-python do_testsdk() {
-    testsdk_main(d)
-}
-addtask testsdk
-do_testsdk[nostamp] = "1"
-do_testsdk[depends] += "${TESTIMAGEDEPENDS}"
-do_testsdk[lockfiles] += "${TESTIMAGELOCK}"
-
-# get testcase list from specified file
-# if path is a relative path, then relative to build/conf/
-def read_testlist(d, fpath):
-    if not os.path.isabs(fpath):
-        builddir = d.getVar("TOPDIR", True)
-        fpath = os.path.join(builddir, "conf", fpath)
-    if not os.path.exists(fpath):
-        bb.fatal("No such manifest file: ", fpath)
-    tcs = []
-    for line in open(fpath).readlines():
-        line = line.strip()
-        if line and not line.startswith("#"):
-            tcs.append(line)
-    return " ".join(tcs)
-
-def get_tests_list(d, type="runtime"):
-    testsuites = []
-    testslist = []
-    manifests = d.getVar("TEST_SUITES_MANIFEST", True)
-    if manifests is not None:
-        manifests = manifests.split()
-        for manifest in manifests:
-            testsuites.extend(read_testlist(d, manifest).split())
-    else:
-        testsuites = d.getVar("TEST_SUITES", True).split()
-    if type == "sdk":
-        testsuites = (d.getVar("TEST_SUITES_SDK", True) or "auto").split()
-    bbpath = d.getVar("BBPATH", True).split(':')
-
-    # This relies on lib/ under each directory in BBPATH being added to sys.path
-    # (as done by default in base.bbclass)
-    for testname in testsuites:
-        if testname != "auto":
-            if testname.startswith("oeqa."):
-                testslist.append(testname)
-                continue
-            found = False
-            for p in bbpath:
-                if os.path.exists(os.path.join(p, 'lib', 'oeqa', type, testname + '.py')):
-                    testslist.append("oeqa." + type + "." + testname)
-                    found = True
-                    break
-                elif os.path.exists(os.path.join(p, 'lib', 'oeqa', type, testname.split(".")[0] + '.py')):
-                    testslist.append("oeqa." + type + "." + testname)
-                    found = True
-                    break
-            if not found:
-                bb.fatal('Test %s specified in TEST_SUITES could not be found in lib/oeqa/runtime under BBPATH' % testname)
-
-    if "auto" in testsuites:
-        def add_auto_list(path):
-            if not os.path.exists(os.path.join(path, '__init__.py')):
-                bb.fatal('Tests directory %s exists but is missing __init__.py' % path)
-            files = sorted([f for f in os.listdir(path) if f.endswith('.py') and not f.startswith('_')])
-            for f in files:
-                module = 'oeqa.' + type + '.' + f[:-3]
-                if module not in testslist:
-                    testslist.append(module)
-
-        for p in bbpath:
-            testpath = os.path.join(p, 'lib', 'oeqa', type)
-            bb.debug(2, 'Searching for tests in %s' % testpath)
-            if os.path.exists(testpath):
-                add_auto_list(testpath)
-
-    return testslist
-
-
 def exportTests(d,tc):
     import json
     import shutil
@@ -188,13 +122,13 @@
     savedata["host_dumper"] = {}
     for key in tc.__dict__:
         # special cases
-        if key != "d" and key != "target" and key != "host_dumper":
+        if key not in ['d', 'target', 'host_dumper', 'suite']:
             savedata[key] = getattr(tc, key)
     savedata["target"]["ip"] = tc.target.ip or d.getVar("TEST_TARGET_IP", True)
     savedata["target"]["server_ip"] = tc.target.server_ip or d.getVar("TEST_SERVER_IP", True)
 
     keys = [ key for key in d.keys() if not key.startswith("_") and not key.startswith("BB") \
-            and not key.startswith("B_pn") and not key.startswith("do_") and not d.getVarFlag(key, "func")]
+            and not key.startswith("B_pn") and not key.startswith("do_") and not d.getVarFlag(key, "func", True)]
     for key in keys:
         try:
             savedata["d"][key] = d.getVar(key, True)
@@ -229,11 +163,23 @@
     bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/runtime/files"))
     bb.utils.mkdirhier(os.path.join(exportpath, "oeqa/utils"))
     # copy test modules, this should cover tests in other layers too
+    bbpath = d.getVar("BBPATH", True).split(':')
     for t in tc.testslist:
+        isfolder = False
         if re.search("\w+\.\w+\.test_\S+", t):
             t = '.'.join(t.split('.')[:3])
         mod = pkgutil.get_loader(t)
-        shutil.copy2(mod.filename, os.path.join(exportpath, "oeqa/runtime"))
+        # More depth than usual?
+        if (t.count('.') > 2):
+            for p in bbpath:
+                foldername = os.path.join(p, 'lib',  os.sep.join(t.split('.')).rsplit(os.sep, 1)[0])
+                if os.path.isdir(foldername):
+                    isfolder = True
+                    target_folder = os.path.join(exportpath, "oeqa", "runtime", os.path.basename(foldername))
+                    if not os.path.exists(target_folder):
+                        shutil.copytree(foldername, target_folder)
+        if not isfolder:
+            shutil.copy2(mod.filename, os.path.join(exportpath, "oeqa/runtime"))
     # copy __init__.py files
     oeqadir = pkgutil.get_loader("oeqa").filename
     shutil.copy2(os.path.join(oeqadir, "__init__.py"), os.path.join(exportpath, "oeqa"))
@@ -253,14 +199,13 @@
 
     bb.plain("Exported tests to: %s" % exportpath)
 
-
 def testimage_main(d):
     import unittest
     import os
     import oeqa.runtime
     import time
     import signal
-    from oeqa.oetest import loadTests, runTests
+    from oeqa.oetest import ImageTestContext
     from oeqa.targetcontrol import get_target_controller
     from oeqa.utils.dump import get_host_dumper
 
@@ -271,65 +216,24 @@
         bb.utils.remove(d.getVar("TEST_EXPORT_DIR", True), recurse=True)
         bb.utils.mkdirhier(d.getVar("TEST_EXPORT_DIR", True))
 
-    # tests in TEST_SUITES become required tests
-    # they won't be skipped even if they aren't suitable for a image (like xorg for minimal)
-    # testslist is what we'll actually pass to the unittest loader
-    testslist = get_tests_list(d)
-    testsrequired = [t for t in d.getVar("TEST_SUITES", True).split() if t != "auto"]
-
-    tagexp = d.getVar("TEST_SUITES_TAGS", True)
-
     # we need the host dumper in test context
     host_dumper = get_host_dumper(d)
 
     # the robot dance
     target = get_target_controller(d)
 
-    class TestContext(object):
-        def __init__(self):
-            self.d = d
-            self.testslist = testslist
-            self.tagexp = tagexp
-            self.testsrequired = testsrequired
-            self.filesdir = os.path.join(os.path.dirname(os.path.abspath(oeqa.runtime.__file__)),"files")
-            self.target = target
-            self.host_dumper = host_dumper
-            self.imagefeatures = d.getVar("IMAGE_FEATURES", True).split()
-            self.distrofeatures = d.getVar("DISTRO_FEATURES", True).split()
-            manifest = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True), d.getVar("IMAGE_LINK_NAME", True) + ".manifest")
-            nomanifest = d.getVar("IMAGE_NO_MANIFEST", True)
-
-            self.sigterm = False
-            self.origsigtermhandler = signal.getsignal(signal.SIGTERM)
-            signal.signal(signal.SIGTERM, self.sigterm_exception)
-
-            if nomanifest is None or nomanifest != "1":
-                try:
-                    with open(manifest) as f:
-                        self.pkgmanifest = f.read()
-                except IOError as e:
-                    bb.fatal("No package manifest file found. Did you build the image?\n%s" % e)
-            else:
-                self.pkgmanifest = ""
-
-        def sigterm_exception(self, signum, stackframe):
-            bb.warn("TestImage received SIGTERM, shutting down...")
-            self.sigterm = True
-            self.target.stop()
-
     # test context
-    tc = TestContext()
+    tc = ImageTestContext(d, target, host_dumper)
 
     # this is a dummy load of tests
     # we are doing that to find compile errors in the tests themselves
     # before booting the image
     try:
-        loadTests(tc)
+        tc.loadTests()
     except Exception as e:
         import traceback
         bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
 
-
     if export:
         signal.signal(signal.SIGTERM, tc.origsigtermhandler)
         tc.origsigtermhandler = None
@@ -339,7 +243,7 @@
         try:
             target.start()
             starttime = time.time()
-            result = runTests(tc)
+            result = tc.runTests()
             stoptime = time.time()
             if result.wasSuccessful():
                 bb.plain("%s - Ran %d test%s in %.3fs" % (pn, result.testsRun, result.testsRun != 1 and "s" or "", stoptime - starttime))
@@ -356,93 +260,4 @@
 
 testimage_main[vardepsexclude] =+ "BB_ORIGENV"
 
-
-def testsdk_main(d):
-    import unittest
-    import os
-    import glob
-    import oeqa.runtime
-    import oeqa.sdk
-    import time
-    import subprocess
-    from oeqa.oetest import loadTests, runTests
-
-    pn = d.getVar("PN", True)
-    bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
-
-    # tests in TEST_SUITES become required tests
-    # they won't be skipped even if they aren't suitable.
-    # testslist is what we'll actually pass to the unittest loader
-    testslist = get_tests_list(d, "sdk")
-    testsrequired = [t for t in (d.getVar("TEST_SUITES_SDK", True) or "auto").split() if t != "auto"]
-
-    tcname = d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh")
-    if not os.path.exists(tcname):
-        bb.fatal("The toolchain is not built. Build it before running the tests: 'bitbake <image> -c populate_sdk' .")
-
-    class TestContext(object):
-        def __init__(self):
-            self.d = d
-            self.testslist = testslist
-            self.testsrequired = testsrequired
-            self.filesdir = os.path.join(os.path.dirname(os.path.abspath(oeqa.runtime.__file__)),"files")
-            self.sdktestdir = sdktestdir
-            self.sdkenv = sdkenv
-            self.imagefeatures = d.getVar("IMAGE_FEATURES", True).split()
-            self.distrofeatures = d.getVar("DISTRO_FEATURES", True).split()
-            manifest = d.getVar("SDK_TARGET_MANIFEST", True)
-            try:
-                with open(manifest) as f:
-                    self.pkgmanifest = f.read()
-            except IOError as e:
-                bb.fatal("No package manifest file found. Did you build the sdk image?\n%s" % e)
-            hostmanifest = d.getVar("SDK_HOST_MANIFEST", True)
-            try:
-                with open(hostmanifest) as f:
-                    self.hostpkgmanifest = f.read()
-            except IOError as e:
-                bb.fatal("No host package manifest file found. Did you build the sdk image?\n%s" % e)
-
-    sdktestdir = d.expand("${WORKDIR}/testimage-sdk/")
-    bb.utils.remove(sdktestdir, True)
-    bb.utils.mkdirhier(sdktestdir)
-    try:
-        subprocess.check_output("cd %s; %s <<EOF\n./tc\nY\nEOF" % (sdktestdir, tcname), shell=True)
-    except subprocess.CalledProcessError as e:
-        bb.fatal("Couldn't install the SDK:\n%s" % e.output)
-
-    try:
-        targets = glob.glob(d.expand(sdktestdir + "/tc/environment-setup-*"))
-        bb.warn(str(targets))
-        for sdkenv in targets:
-            bb.plain("Testing %s" % sdkenv)
-            # test context
-            tc = TestContext()
-
-            # this is a dummy load of tests
-            # we are doing that to find compile errors in the tests themselves
-            # before booting the image
-            try:
-                loadTests(tc, "sdk")
-            except Exception as e:
-                import traceback
-                bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
-
-    
-            starttime = time.time()
-            result = runTests(tc, "sdk")
-            stoptime = time.time()
-            if result.wasSuccessful():
-                bb.plain("%s SDK(%s):%s - Ran %d test%s in %.3fs" % (pn, os.path.basename(tcname), os.path.basename(sdkenv),result.testsRun, result.testsRun != 1 and "s" or "", stoptime - starttime))
-                msg = "%s - OK - All required tests passed" % pn
-                skipped = len(result.skipped)
-                if skipped:
-                    msg += " (skipped=%d)" % skipped
-                bb.plain(msg)
-            else:
-                raise bb.build.FuncFailed("%s - FAILED - check the task log and the commands log" % pn )
-    finally:
-        bb.utils.remove(sdktestdir, True)
-
-testsdk_main[vardepsexclude] =+ "BB_ORIGENV"
-
+inherit testsdk
diff --git a/yocto-poky/meta/classes/testsdk.bbclass b/yocto-poky/meta/classes/testsdk.bbclass
new file mode 100644
index 0000000..f4dc2c3
--- /dev/null
+++ b/yocto-poky/meta/classes/testsdk.bbclass
@@ -0,0 +1,142 @@
+# Copyright (C) 2013 - 2016 Intel Corporation
+#
+# Released under the MIT license (see COPYING.MIT)
+
+# testsdk.bbclass enables testing for SDK and Extensible SDK
+#
+# For run SDK tests you need to do,
+# - bitbake core-image-sato -c populate_sdk
+# - bitbake core-image-sato -c testsdk
+#
+# For run eSDK tests you need to do,
+# - bitbake core-image-sato -c populate_sdk_ext
+# - bitbake core-image-sato -c testsdkext
+
+TEST_LOG_DIR ?= "${WORKDIR}/testimage"
+TESTSDKLOCK = "${TMPDIR}/testsdk.lock"
+
+def run_test_context(CTestContext, d, testdir, tcname, pn, *args):
+    import glob
+    import time
+
+    targets = glob.glob(d.expand(testdir + "/tc/environment-setup-*"))
+    for sdkenv in targets:
+        bb.plain("Testing %s" % sdkenv)
+        tc = CTestContext(d, testdir, sdkenv, tcname, args)
+
+        # this is a dummy load of tests
+        # we are doing that to find compile errors in the tests themselves
+        # before booting the image
+        try:
+            tc.loadTests()
+        except Exception as e:
+            import traceback
+            bb.fatal("Loading tests failed:\n%s" % traceback.format_exc())
+
+        starttime = time.time()
+        result = tc.runTests()
+        stoptime = time.time()
+        if result.wasSuccessful():
+            bb.plain("%s SDK(%s):%s - Ran %d test%s in %.3fs" % (pn, os.path.basename(tcname), os.path.basename(sdkenv),result.testsRun, result.testsRun != 1 and "s" or "", stoptime - starttime))
+            msg = "%s - OK - All required tests passed" % pn
+            skipped = len(result.skipped)
+            if skipped:
+                msg += " (skipped=%d)" % skipped
+            bb.plain(msg)
+        else:
+            raise bb.build.FuncFailed("%s - FAILED - check the task log and the commands log" % pn )
+
+def testsdk_main(d):
+    import os
+    import oeqa.sdk
+    import subprocess
+    from oeqa.oetest import SDKTestContext
+
+    pn = d.getVar("PN", True)
+    bb.utils.mkdirhier(d.getVar("TEST_LOG_DIR", True))
+
+    tcname = d.expand("${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh")
+    if not os.path.exists(tcname):
+        bb.fatal("The toolchain is not built. Build it before running the tests: 'bitbake <image> -c populate_sdk' .")
+
+    sdktestdir = d.expand("${WORKDIR}/testimage-sdk/")
+    bb.utils.remove(sdktestdir, True)
+    bb.utils.mkdirhier(sdktestdir)
+    try:
+        subprocess.check_output("cd %s; %s <<EOF\n./tc\nY\nEOF" % (sdktestdir, tcname), shell=True)
+    except subprocess.CalledProcessError as e:
+        bb.fatal("Couldn't install the SDK:\n%s" % e.output)
+
+    try:
+        run_test_context(SDKTestContext, d, sdktestdir, tcname, pn)
+    finally:
+        bb.utils.remove(sdktestdir, True)
+
+testsdk_main[vardepsexclude] =+ "BB_ORIGENV"
+
+python do_testsdk() {
+    testsdk_main(d)
+}
+addtask testsdk
+do_testsdk[nostamp] = "1"
+do_testsdk[lockfiles] += "${TESTSDKLOCK}"
+
+TEST_LOG_SDKEXT_DIR ?= "${WORKDIR}/testsdkext"
+TESTSDKEXTLOCK = "${TMPDIR}/testsdkext.lock"
+
+def testsdkext_main(d):
+    import os
+    import oeqa.sdkext
+    import subprocess
+    from bb.utils import export_proxies
+    from oeqa.oetest import SDKTestContext, SDKExtTestContext
+    from oeqa.utils import avoid_paths_in_environ
+
+
+    # extensible sdk use network
+    export_proxies(d)
+
+    # extensible sdk can be contaminated if native programs are
+    # in PATH, i.e. use perl-native instead of eSDK one.
+    paths_to_avoid = [d.getVar('STAGING_DIR', True),
+                      d.getVar('BASE_WORKDIR', True)]
+    os.environ['PATH'] = avoid_paths_in_environ(paths_to_avoid)
+
+    pn = d.getVar("PN", True)
+    bb.utils.mkdirhier(d.getVar("TEST_LOG_SDKEXT_DIR", True))
+
+    tcname = d.expand("${SDK_DEPLOY}/${TOOLCHAINEXT_OUTPUTNAME}.sh")
+    if not os.path.exists(tcname):
+        bb.fatal("The toolchain ext is not built. Build it before running the" \
+                 " tests: 'bitbake <image> -c populate_sdk_ext' .")
+
+    testdir = d.expand("${WORKDIR}/testsdkext/")
+    bb.utils.remove(testdir, True)
+    bb.utils.mkdirhier(testdir)
+    try:
+        subprocess.check_output("%s -y -d %s/tc" % (tcname, testdir), shell=True)
+    except subprocess.CalledProcessError as e:
+        bb.fatal("Couldn't install the SDK EXT:\n%s" % e.output)
+
+    try:
+        bb.plain("Running SDK Compatibility tests ...")
+        run_test_context(SDKExtTestContext, d, testdir, tcname, pn, True)
+    finally:
+        pass
+
+    try:
+        bb.plain("Running Extensible SDK tests ...")
+        run_test_context(SDKExtTestContext, d, testdir, tcname, pn)
+    finally:
+        pass
+
+    bb.utils.remove(testdir, True)
+
+testsdkext_main[vardepsexclude] =+ "BB_ORIGENV"
+
+python do_testsdkext() {
+    testsdkext_main(d)
+}
+addtask testsdkext
+do_testsdkext[nostamp] = "1"
+do_testsdkext[lockfiles] += "${TESTSDKEXTLOCK}"
diff --git a/yocto-poky/meta/classes/tinderclient.bbclass b/yocto-poky/meta/classes/tinderclient.bbclass
index 6984efd..2bc75fc 100644
--- a/yocto-poky/meta/classes/tinderclient.bbclass
+++ b/yocto-poky/meta/classes/tinderclient.bbclass
@@ -142,7 +142,7 @@
 
     selector = url + "/xml/build_status.pl"
 
-    # now post it - in chunks of 10.000 charachters
+    # now post it - in chunks of 10.000 characters
     new_log = _log
     while len(new_log) > 0:
         content_type, body = tinder_format_http_post(d,status,new_log[0:18000])
diff --git a/yocto-poky/meta/classes/toaster.bbclass b/yocto-poky/meta/classes/toaster.bbclass
index d63cff5..1a70f14 100644
--- a/yocto-poky/meta/classes/toaster.bbclass
+++ b/yocto-poky/meta/classes/toaster.bbclass
@@ -112,27 +112,25 @@
                 pass    # ignore lines without valid key: value pairs
     return pkgdata
 
-
 python toaster_package_dumpdata() {
     """
-    Dumps the data created by emit_pkgdata
+    Dumps the data about the packages created by a recipe
     """
-    # replicate variables from the package.bbclass
 
-    packages = d.getVar('PACKAGES', True)
-    pkgdest = d.getVar('PKGDEST', True)
+    # No need to try and dumpdata if the recipe isn't generating packages
+    if not d.getVar('PACKAGES', True):
+        return
 
     pkgdatadir = d.getVar('PKGDESTWORK', True)
-
-    # scan and send data for each package
-
     lpkgdata = {}
-    for pkg in packages.split():
+    datadir = os.path.join(pkgdatadir, 'runtime')
 
-        lpkgdata = _toaster_load_pkgdatafile(pkgdatadir + "/runtime/", pkg)
-
-        # Fire an event containing the pkg data
-        bb.event.fire(bb.event.MetadataEvent("SinglePackageInfo", lpkgdata), d)
+    # scan and send data for each generated package
+    for datafile in os.listdir(datadir):
+        if not datafile.endswith('.packaged'):
+            lpkgdata = _toaster_load_pkgdatafile(datadir, datafile)
+            # Fire an event containing the pkg data
+            bb.event.fire(bb.event.MetadataEvent("SinglePackageInfo", lpkgdata), d)
 }
 
 # 2. Dump output image files information
@@ -143,35 +141,55 @@
     image_types.bbclass will spell out IMAGE_CMD_xxx variables that actually
     have hardcoded ways to create image file names in them.
     So we look for files starting with the set name.
+
+    We also look for other files in the images/ directory which don't
+    match IMAGE_NAME, such as the kernel bzImage, modules tarball etc.
     """
 
-    deploy_dir_image = d.getVar('DEPLOY_DIR_IMAGE', True);
+    dir_to_walk = d.getVar('DEPLOY_DIR_IMAGE', True);
     image_name = d.getVar('IMAGE_NAME', True);
-
     image_info_data = {}
     artifact_info_data = {}
 
-    # collect all artifacts
-    for dirpath, dirnames, filenames in os.walk(deploy_dir_image):
-        for fn in filenames:
+    # collect all images and artifacts in the images directory
+    for dirpath, dirnames, filenames in os.walk(dir_to_walk):
+        for filename in filenames:
+            full_path = os.path.join(dirpath, filename)
             try:
-                if fn.startswith(image_name):
-                    image_output = os.path.join(dirpath, fn)
-                    image_info_data[image_output] = os.stat(image_output).st_size
+                if filename.startswith(image_name):
+                    # image
+                    image_info_data[full_path] = os.stat(full_path).st_size
                 else:
-                    import stat
-                    artifact_path = os.path.join(dirpath, fn)
-                    filestat = os.stat(artifact_path)
-                    if not os.path.islink(artifact_path):
-                        artifact_info_data[artifact_path] = filestat.st_size
+                    # other non-image artifact
+                    if not os.path.islink(full_path):
+                        artifact_info_data[full_path] = os.stat(full_path).st_size
             except OSError as e:
                 bb.event.fire(bb.event.MetadataEvent("OSErrorException", e), d)
 
-    bb.event.fire(bb.event.MetadataEvent("ImageFileSize",image_info_data), d)
-    bb.event.fire(bb.event.MetadataEvent("ArtifactFileSize",artifact_info_data), d)
+    bb.event.fire(bb.event.MetadataEvent("ImageFileSize", image_info_data), d)
+    bb.event.fire(bb.event.MetadataEvent("ArtifactFileSize", artifact_info_data), d)
 }
 
+python toaster_artifact_dumpdata() {
+    """
+    Dump data about artifacts in the SDK_DEPLOY directory
+    """
 
+    dir_to_walk = d.getVar("SDK_DEPLOY", True)
+    artifact_info_data = {}
+
+    # collect all artifacts in the sdk directory
+    for dirpath, dirnames, filenames in os.walk(dir_to_walk):
+        for filename in filenames:
+            full_path = os.path.join(dirpath, filename)
+            try:
+                if not os.path.islink(full_path):
+                    artifact_info_data[full_path] = os.stat(full_path).st_size
+            except OSError as e:
+                bb.event.fire(bb.event.MetadataEvent("OSErrorException", e), d)
+
+    bb.event.fire(bb.event.MetadataEvent("ArtifactFileSize", artifact_info_data), d)
+}
 
 # collect list of buildstats files based on fired events; when the build completes, collect all stats and fire an event with collected data
 
@@ -182,29 +200,37 @@
     import bb.utils
     import os
 
+    toaster_statlist_file = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist")
+
     if not e.data.getVar('BUILDSTATS_BASE', True):
         return  # if we don't have buildstats, we cannot collect stats
 
+    def stat_to_float(value):
+        return float(value.strip('% \n\r'))
+
     def _append_read_list(v):
         lock = bb.utils.lockfile(e.data.expand("${TOPDIR}/toaster.lock"), False, True)
 
-        with open(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist"), "a") as fout:
-            bn = get_bn(e)
-            bsdir = os.path.join(e.data.getVar('BUILDSTATS_BASE', True), bn)
-            taskdir = os.path.join(bsdir, e.data.expand("${PF}"))
+        with open(toaster_statlist_file, "a") as fout:
+            taskdir = e.data.expand("${BUILDSTATS_BASE}/${BUILDNAME}/${PF}")
             fout.write("%s::%s::%s::%s\n" % (e.taskfile, e.taskname, os.path.join(taskdir, e.task), e.data.expand("${PN}")))
 
         bb.utils.unlockfile(lock)
 
     def _read_stats(filename):
-        cpu_usage = 0
-        disk_io = 0
-        startio = '0'
-        endio = '0'
-        started = '0'
-        ended = '0'
-        pn = ''
+        # seconds
+        cpu_time_user = 0
+        cpu_time_system = 0
+
+        # bytes
+        disk_io_read = 0
+        disk_io_write = 0
+
+        started = 0
+        ended = 0
+
         taskname = ''
+
         statinfo = {}
 
         with open(filename, 'r') as task_bs:
@@ -212,43 +238,49 @@
                 k,v = line.strip().split(": ", 1)
                 statinfo[k] = v
 
-        if "CPU usage" in statinfo:
-            cpu_usage = str(statinfo["CPU usage"]).strip('% \n\r')
-
-        if "EndTimeIO" in statinfo:
-            endio = str(statinfo["EndTimeIO"]).strip('% \n\r')
-
-        if "StartTimeIO" in statinfo:
-            startio = str(statinfo["StartTimeIO"]).strip('% \n\r')
-
         if "Started" in statinfo:
-            started = str(statinfo["Started"]).strip('% \n\r')
+            started = stat_to_float(statinfo["Started"])
 
         if "Ended" in statinfo:
-            ended = str(statinfo["Ended"]).strip('% \n\r')
+            ended = stat_to_float(statinfo["Ended"])
 
-        disk_io = int(endio) - int(startio)
+        if "Child rusage ru_utime" in statinfo:
+            cpu_time_user = cpu_time_user + stat_to_float(statinfo["Child rusage ru_utime"])
 
-        elapsed_time = float(ended) - float(started)
+        if "Child rusage ru_stime" in statinfo:
+            cpu_time_system = cpu_time_system + stat_to_float(statinfo["Child rusage ru_stime"])
 
-        cpu_usage = float(cpu_usage)
+        if "IO write_bytes" in statinfo:
+            write_bytes = int(statinfo["IO write_bytes"].strip('% \n\r'))
+            disk_io_write = disk_io_write + write_bytes
 
-        return {'cpu_usage': cpu_usage, 'disk_io': disk_io, 'elapsed_time': elapsed_time}
+        if "IO read_bytes" in statinfo:
+            read_bytes = int(statinfo["IO read_bytes"].strip('% \n\r'))
+            disk_io_read = disk_io_read + read_bytes
 
+        return {
+            'stat_file': filename,
+            'cpu_time_user': cpu_time_user,
+            'cpu_time_system': cpu_time_system,
+            'disk_io_read': disk_io_read,
+            'disk_io_write': disk_io_write,
+            'started': started,
+            'ended': ended
+        }
 
     if isinstance(e, (bb.build.TaskSucceeded, bb.build.TaskFailed)):
         _append_read_list(e)
         pass
 
-
-    if isinstance(e, bb.event.BuildCompleted) and os.path.exists(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist")):
+    if isinstance(e, bb.event.BuildCompleted) and os.path.exists(toaster_statlist_file):
         events = []
-        with open(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist"), "r") as fin:
+        with open(toaster_statlist_file, "r") as fin:
             for line in fin:
                 (taskfile, taskname, filename, recipename) = line.strip().split("::")
-                events.append((taskfile, taskname, _read_stats(filename), recipename))
+                stats = _read_stats(filename)
+                events.append((taskfile, taskname, stats, recipename))
         bb.event.fire(bb.event.MetadataEvent("BuildStatsList", events), e.data)
-        os.unlink(os.path.join(e.data.getVar('BUILDSTATS_BASE', True), "toasterstatlist"))
+        os.unlink(toaster_statlist_file)
 }
 
 # dump relevant build history data as an event when the build is completed
@@ -265,6 +297,7 @@
     allpkgs = {}
     files = {}
     for target in e._pkgs:
+        target = target.split(':')[0] # strip ':<task>' suffix from the target
         installed_img_path = e.data.expand(os.path.join(BUILDHISTORY_DIR_IMAGE_BASE, target))
         if os.path.exists(installed_img_path):
             images[target] = {}
@@ -347,9 +380,18 @@
 
 addhandler toaster_buildhistory_dump
 toaster_buildhistory_dump[eventmask] = "bb.event.BuildCompleted"
+
+do_packagedata_setscene[postfuncs] += "toaster_package_dumpdata "
+do_packagedata_setscene[vardepsexclude] += "toaster_package_dumpdata "
+
 do_package[postfuncs] += "toaster_package_dumpdata "
 do_package[vardepsexclude] += "toaster_package_dumpdata "
 
-do_rootfs[postfuncs] += "toaster_image_dumpdata "
+do_image_complete[postfuncs] += "toaster_image_dumpdata "
+do_image_complete[vardepsexclude] += "toaster_image_dumpdata "
+
 do_rootfs[postfuncs] += "toaster_licensemanifest_dump "
-do_rootfs[vardepsexclude] += "toaster_image_dumpdata toaster_licensemanifest_dump"
+do_rootfs[vardepsexclude] += "toaster_licensemanifest_dump "
+
+do_populate_sdk[postfuncs] += "toaster_artifact_dumpdata "
+do_populate_sdk[vardepsexclude] += "toaster_artifact_dumpdata "
diff --git a/yocto-poky/meta/classes/toolchain-scripts.bbclass b/yocto-poky/meta/classes/toolchain-scripts.bbclass
index ab4feb0..2e2c93a 100644
--- a/yocto-poky/meta/classes/toolchain-scripts.bbclass
+++ b/yocto-poky/meta/classes/toolchain-scripts.bbclass
@@ -9,6 +9,9 @@
 # This function creates an environment-setup-script for use in a deployable SDK
 toolchain_create_sdk_env_script () {
 	# Create environment setup script
+	base_sbindir=${10:-${base_sbindir_nativesdk}}
+	base_bindir=${9:-${base_bindir_nativesdk}}
+	sbindir=${8:-${sbindir_nativesdk}}
 	sdkpathnative=${7:-${SDKPATHNATIVE}}
 	prefix=${6:-${prefix_nativesdk}}
 	bindir=${5:-${bindir_nativesdk}}
@@ -23,7 +26,7 @@
 	for i in ${CANADIANEXTRAOS}; do
 		EXTRAPATH="$EXTRAPATH:$sdkpathnative$bindir/${TARGET_ARCH}${TARGET_VENDOR}-$i"
 	done
-	echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script
+	echo "export PATH=$sdkpathnative$bindir:$sdkpathnative$sbindir:$sdkpathnative$base_bindir:$sdkpathnative$base_sbindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$PATH' >> $script
 	echo "export CCACHE_PATH=$sdkpathnative$bindir:$sdkpathnative$bindir/../${HOST_SYS}/bin:$sdkpathnative$bindir/${TARGET_SYS}"$EXTRAPATH':$CCACHE_PATH' >> $script
 	echo 'export PKG_CONFIG_SYSROOT_DIR=$SDKTARGETSYSROOT' >> $script
 	echo 'export PKG_CONFIG_PATH=$SDKTARGETSYSROOT'"$libdir"'/pkgconfig' >> $script
@@ -31,7 +34,6 @@
 	echo "export OECORE_NATIVE_SYSROOT=\"$sdkpathnative\"" >> $script
 	echo 'export OECORE_TARGET_SYSROOT="$SDKTARGETSYSROOT"' >> $script
 	echo "export OECORE_ACLOCAL_OPTS=\"-I $sdkpathnative/usr/share/aclocal\"" >> $script
-	echo "export PYTHONHOME=$sdkpathnative$prefix" >> $script
 	echo 'unset command_not_found_handle' >> $script
 
 	toolchain_shared_env_script
diff --git a/yocto-poky/meta/classes/typecheck.bbclass b/yocto-poky/meta/classes/typecheck.bbclass
index 72da932..6bff7c7 100644
--- a/yocto-poky/meta/classes/typecheck.bbclass
+++ b/yocto-poky/meta/classes/typecheck.bbclass
@@ -5,7 +5,7 @@
 python check_types() {
     import oe.types
     for key in e.data.keys():
-        if e.data.getVarFlag(key, "type"):
+        if e.data.getVarFlag(key, "type", True):
             oe.data.typed_value(key, e.data)
 }
 addhandler check_types
diff --git a/yocto-poky/meta/classes/uninative.bbclass b/yocto-poky/meta/classes/uninative.bbclass
index 0cd27db..89cec07 100644
--- a/yocto-poky/meta/classes/uninative.bbclass
+++ b/yocto-poky/meta/classes/uninative.bbclass
@@ -1,20 +1,107 @@
-NATIVELSBSTRING = "universal"
+UNINATIVE_LOADER ?= "${STAGING_DIR}-uninative/${BUILD_ARCH}-linux/lib/${@bb.utils.contains('BUILD_ARCH', 'x86_64', 'ld-linux-x86-64.so.2', 'ld-linux.so.2', d)}"
 
-UNINATIVE_LOADER ?= "${@bb.utils.contains('BUILD_ARCH', 'x86_64', '${STAGING_DIR_NATIVE}/lib/ld-linux-x86-64.so.2', '${STAGING_DIR_NATIVE}/lib/ld-linux.so.2', d)}"
+UNINATIVE_URL ?= "unset"
+UNINATIVE_TARBALL ?= "${BUILD_ARCH}-nativesdk-libc.tar.bz2"
+# Example checksums
+#UNINATIVE_CHECKSUM[i586] = "dead"
+#UNINATIVE_CHECKSUM[x86_64] = "dead"
+UNINATIVE_DLDIR ?= "${DL_DIR}/uninative/"
 
-addhandler uninative_eventhandler
-uninative_eventhandler[eventmask] = "bb.event.BuildStarted"
+# https://wiki.debian.org/GCC5
+# We may see binaries built with gcc5 run or linked into gcc4 environment
+# so use the older libstdc++ standard for now until we don't support gcc4
+# on the host system.
+BUILD_CXXFLAGS_append = " -D_GLIBCXX_USE_CXX11_ABI=0"
 
-python uninative_eventhandler() {
-    loader = e.data.getVar("UNINATIVE_LOADER", True)
-    if not os.path.exists(loader):
-        import subprocess
-        cmd = e.data.expand("mkdir -p ${STAGING_DIR}; cd ${STAGING_DIR}; tar -xjf ${COREBASE}/${BUILD_ARCH}-nativesdk-libc.tar.bz2; ${STAGING_DIR}/relocate_sdk.py ${STAGING_DIR_NATIVE} ${UNINATIVE_LOADER} ${UNINATIVE_LOADER} ${STAGING_BINDIR_NATIVE}/patchelf-uninative")
-        #bb.warn("nativesdk lib extraction: " + cmd)
+#
+# icu configure defaults to CXX11 if no -std= option is passed in CXXFLAGS
+# therefore pass one
+BUILD_CXXFLAGS_append_pn-icu-native = " -std=c++98"
+
+addhandler uninative_event_fetchloader
+uninative_event_fetchloader[eventmask] = "bb.event.BuildStarted"
+
+addhandler uninative_event_enable
+uninative_event_enable[eventmask] = "bb.event.ConfigParsed"
+
+python uninative_event_fetchloader() {
+    """
+    This event fires on the parent and will try to fetch the tarball if the
+    loader isn't already present.
+    """
+
+    chksum = d.getVarFlag("UNINATIVE_CHECKSUM", d.getVar("BUILD_ARCH", True), True)
+    if not chksum:
+        bb.fatal("Uninative selected but not configured correctly, please set UNINATIVE_CHECKSUM[%s]" % d.getVar("BUILD_ARCH", True))
+
+    loader = d.getVar("UNINATIVE_LOADER", True)
+    loaderchksum = loader + ".chksum"
+    if os.path.exists(loader) and os.path.exists(loaderchksum):
+        with open(loaderchksum, "r") as f:
+            readchksum = f.read().strip()
+        if readchksum == chksum:
+            return
+
+    import subprocess
+    try:
+        # Save and restore cwd as Fetch.download() does a chdir()
+        olddir = os.getcwd()
+
+        tarball = d.getVar("UNINATIVE_TARBALL", True)
+        tarballdir = os.path.join(d.getVar("UNINATIVE_DLDIR", True), chksum)
+        tarballpath = os.path.join(tarballdir, tarball)
+
+        if not os.path.exists(tarballpath):
+            bb.utils.mkdirhier(tarballdir)
+            if d.getVar("UNINATIVE_URL", True) == "unset":
+                bb.fatal("Uninative selected but not configured, please set UNINATIVE_URL")
+
+            localdata = bb.data.createCopy(d)
+            localdata.setVar('FILESPATH', "")
+            localdata.setVar('DL_DIR', tarballdir)
+
+            srcuri = d.expand("${UNINATIVE_URL}${UNINATIVE_TARBALL};sha256sum=%s" % chksum)
+            bb.note("Fetching uninative binary shim from %s" % srcuri)
+
+            fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
+            fetcher.download()
+            localpath = fetcher.localpath(srcuri)
+            if localpath != tarballpath and os.path.exists(localpath) and not os.path.exists(tarballpath):
+                    os.symlink(localpath, tarballpath)
+
+        cmd = d.expand("mkdir -p ${STAGING_DIR}-uninative; cd ${STAGING_DIR}-uninative; tar -xjf ${UNINATIVE_DLDIR}/%s/${UNINATIVE_TARBALL}; ${STAGING_DIR}-uninative/relocate_sdk.py ${STAGING_DIR}-uninative/${BUILD_ARCH}-linux ${UNINATIVE_LOADER} ${UNINATIVE_LOADER} ${STAGING_DIR}-uninative/${BUILD_ARCH}-linux/${bindir_native}/patchelf-uninative ${STAGING_DIR}-uninative/${BUILD_ARCH}-linux${base_libdir_native}/libc*.so" % chksum)
         subprocess.check_call(cmd, shell=True)
+
+        with open(loaderchksum, "w") as f:
+            f.write(chksum)
+
+        enable_uninative(d)
+
+    except bb.fetch2.BBFetchException as exc:
+        bb.warn("Disabling uninative as unable to fetch uninative tarball: %s" % str(exc))
+        bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
+    except subprocess.CalledProcessError as exc:
+        bb.warn("Disabling uninative as unable to install uninative tarball: %s" % str(exc))
+        bb.warn("To build your own uninative loader, please bitbake uninative-tarball and set UNINATIVE_TARBALL appropriately.")
+    finally:
+        os.chdir(olddir)
 }
 
-SSTATEPOSTUNPACKFUNCS_append = " uninative_changeinterp"
+python uninative_event_enable() {
+    """
+    This event handler is called in the workers and is responsible for setting
+    up uninative if a loader is found.
+    """
+    enable_uninative(d)
+}
+
+def enable_uninative(d):
+    loader = d.getVar("UNINATIVE_LOADER", True)
+    if os.path.exists(loader):
+        bb.debug(2, "Enabling uninative")
+        d.setVar("NATIVELSBSTRING", "universal")
+        d.appendVar("SSTATEPOSTUNPACKFUNCS", " uninative_changeinterp")
+        d.prependVar("PATH", "${STAGING_DIR}-uninative/${BUILD_ARCH}-linux${bindir_native}:")
 
 python uninative_changeinterp () {
     import subprocess
@@ -27,6 +114,8 @@
     sstateinst = d.getVar('SSTATE_INSTDIR', True)
     for walkroot, dirs, files in os.walk(sstateinst):
         for file in files:
+            if file.endswith(".so") or ".so." in file:
+                continue
             f = os.path.join(walkroot, file)
             if os.path.islink(f):
                 continue
@@ -36,9 +125,16 @@
             elf = oe.qa.ELFFile(f)
             try:
                 elf.open()
-            except:
+            except oe.qa.NotELFFileError:
+                continue
+            if not elf.isDynamic():
                 continue
 
-            #bb.warn("patchelf-uninative --set-interpreter %s %s" % (d.getVar("UNINATIVE_LOADER", True), f))
-            subprocess.call("patchelf-uninative --set-interpreter %s %s" % (d.getVar("UNINATIVE_LOADER", True), f), shell=True)
+            try:
+                subprocess.check_output(("patchelf-uninative", "--set-interpreter",
+                                         d.getVar("UNINATIVE_LOADER", True), f),
+                                        stderr=subprocess.STDOUT)
+            except subprocess.CalledProcessError as e:
+                bb.fatal("'%s' failed with exit code %d and the following output:\n%s" %
+                         (e.cmd, e.returncode, e.output))
 }
diff --git a/yocto-poky/meta/classes/update-alternatives.bbclass b/yocto-poky/meta/classes/update-alternatives.bbclass
index a3c1657..70a8185 100644
--- a/yocto-poky/meta/classes/update-alternatives.bbclass
+++ b/yocto-poky/meta/classes/update-alternatives.bbclass
@@ -61,7 +61,7 @@
 ALTERNATIVE_PRIORITY = "10"
 
 # We need special processing for vardeps because it can not work on
-# modified flag values.  So we agregate the flags into a new variable
+# modified flag values.  So we aggregate the flags into a new variable
 # and include that vairable in the set.
 UPDALTVARS  = "ALTERNATIVE ALTERNATIVE_LINK_NAME ALTERNATIVE_TARGET ALTERNATIVE_PRIORITY"
 
@@ -252,7 +252,7 @@
             alt_target   = alt_target or d.getVar('ALTERNATIVE_TARGET_%s' % pkg, True) or d.getVar('ALTERNATIVE_TARGET', True) or alt_link
 
             if alt_link == alt_target:
-                bb.warn('alt_link == alt_target: %s == %s' % (alt_link, alt_target))
+                bb.warn('%s: alt_link == alt_target: %s == %s' % (pn, alt_link, alt_target))
                 alt_target = '%s.%s' % (alt_target, pn)
 
             if not os.path.lexists('%s/%s/%s' % (pkgdest, pkg, alt_target)):
diff --git a/yocto-poky/meta/classes/upstream-version-is-even.bbclass b/yocto-poky/meta/classes/upstream-version-is-even.bbclass
new file mode 100644
index 0000000..89556ed
--- /dev/null
+++ b/yocto-poky/meta/classes/upstream-version-is-even.bbclass
@@ -0,0 +1,5 @@
+# This class ensures that the upstream version check only
+# accepts even minor versions (i.e. 3.0.x, 3.2.x, 3.4.x, etc.)
+# This scheme is used by Gnome and a number of other projects
+# to signify stable releases vs development releases.
+UPSTREAM_CHECK_REGEX = "(?P<pver>\d+\.(\d*[02468])+(\.\d+)+)"
diff --git a/yocto-poky/meta/classes/useradd-staticids.bbclass b/yocto-poky/meta/classes/useradd-staticids.bbclass
index 924d6ea..a9b506d 100644
--- a/yocto-poky/meta/classes/useradd-staticids.bbclass
+++ b/yocto-poky/meta/classes/useradd-staticids.bbclass
@@ -22,6 +22,30 @@
         and return it as a list"""
         return list(itertools.islice(itertools.chain(iterable, itertools.repeat(obj)), length))
 
+    def merge_files(file_list, exp_fields):
+        """Read each passwd/group file in file_list, split each line and create
+        a dictionary with the user/group names as keys and the split lines as
+        values. If the user/group name already exists in the dictionary, then
+        update any fields in the list with the values from the new list (if they
+        are set)."""
+        id_table = dict()
+        for conf in file_list.split():
+            if os.path.exists(conf):
+                f = open(conf, "r")
+                for line in f:
+                    if line.startswith('#'):
+                        continue
+                    # Make sure there always are at least exp_fields elements in
+                    # the field list. This allows for leaving out trailing
+                    # colons in the files.
+                    fields = list_extend(line.rstrip().split(":"), exp_fields)
+                    if fields[0] not in id_table:
+                        id_table[fields[0]] = fields
+                    else:
+                        id_table[fields[0]] = list(itertools.imap(lambda x, y: x or y, fields, id_table[fields[0]]))
+
+        return id_table
+
     # We parse and rewrite the useradd components
     def rewrite_useradd(params):
         # The following comes from --help on useradd from shadow
@@ -37,21 +61,21 @@
         parser.add_argument("-k", "--skel", metavar="SKEL_DIR", help="use this alternative skeleton directory")
         parser.add_argument("-K", "--key", metavar="KEY=VALUE", help="override /etc/login.defs defaults")
         parser.add_argument("-l", "--no-log-init", help="do not add the user to the lastlog and faillog databases", action="store_true")
-        parser.add_argument("-m", "--create-home", help="create the user's home directory", action="store_true")
-        parser.add_argument("-M", "--no-create-home", help="do not create the user's home directory", action="store_true")
-        parser.add_argument("-N", "--no-user-group", help="do not create a group with the same name as the user", action="store_true")
+        parser.add_argument("-m", "--create-home", help="create the user's home directory", action="store_const", const=True)
+        parser.add_argument("-M", "--no-create-home", dest="create_home", help="do not create the user's home directory", action="store_const", const=False)
+        parser.add_argument("-N", "--no-user-group", dest="user_group", help="do not create a group with the same name as the user", action="store_const", const=False)
         parser.add_argument("-o", "--non-unique", help="allow to create users with duplicate (non-unique UID)", action="store_true")
         parser.add_argument("-p", "--password", metavar="PASSWORD", help="encrypted password of the new account")
         parser.add_argument("-R", "--root", metavar="CHROOT_DIR", help="directory to chroot into")
         parser.add_argument("-r", "--system", help="create a system account", action="store_true")
         parser.add_argument("-s", "--shell", metavar="SHELL", help="login shell of the new account")
         parser.add_argument("-u", "--uid", metavar="UID", help="user ID of the new account")
-        parser.add_argument("-U", "--user-group", help="create a group with the same name as the user", action="store_true")
+        parser.add_argument("-U", "--user-group", help="create a group with the same name as the user", action="store_const", const=True)
         parser.add_argument("LOGIN", help="Login name of the new user")
 
         # Return a list of configuration files based on either the default
         # files/passwd or the contents of USERADD_UID_TABLES
-        # paths are resulved via BBPATH
+        # paths are resolved via BBPATH
         def get_passwd_list(d):
             str = ""
             bbpath = d.getVar('BBPATH', True)
@@ -63,6 +87,7 @@
             return str
 
         newparams = []
+        users = None
         for param in re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params):
             param = param.strip()
             if not param:
@@ -72,10 +97,9 @@
             except:
                 raise bb.build.FuncFailed("%s: Unable to parse arguments for USERADD_PARAM_%s: '%s'" % (d.getVar('PN', True), pkg, param))
 
-            # files/passwd or the contents of USERADD_UID_TABLES
+            # Read all passwd files specified in USERADD_UID_TABLES or files/passwd
             # Use the standard passwd layout:
             #  username:password:user_id:group_id:comment:home_directory:login_shell
-            # (we want to process in reverse order, as 'last found' in the list wins)
             #
             # If a field is left blank, the original value will be used.  The 'username'
             # field is required.
@@ -84,65 +108,57 @@
             # in the useradd command may introduce a security hole.  It's assumed that
             # all new users get the default ('*' which prevents login) until the user is
             # specifically configured by the system admin.
-            for conf in get_passwd_list(d).split()[::-1]:
-                if os.path.exists(conf):
-                    f = open(conf, "r")
-                    for line in f:
-                        if line.startswith('#'):
-                            continue
-                        # Make sure there always are at least seven elements in
-                        # the field list. This allows for leaving out trailing
-                        # colons in the passwd file.
-                        field = list_extend(line.rstrip().split(":"), 7)
-                        if field[0] == uaargs.LOGIN:
-                            if uaargs.uid and field[2] and (uaargs.uid != field[2]):
-                                bb.warn("%s: Changing username %s's uid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), uaargs.LOGIN, uaargs.uid, field[2]))
-                            uaargs.uid = [field[2], uaargs.uid][not field[2]]
+            if not users:
+                users = merge_files(get_passwd_list(d), 7)
 
-                            # Determine the possible groupname
-                            # Unless the group name (or gid) is specified, we assume that the LOGIN is the groupname
-                            #
-                            # By default the system has creation of the matching groups enabled
-                            # So if the implicit username-group creation is on, then the implicit groupname (LOGIN)
-                            # is used, and we disable the user_group option.
-                            #
-                            uaargs.groupname = [uaargs.gid, uaargs.LOGIN][not uaargs.gid or uaargs.user_group]
-                            uaargs.groupid = [uaargs.gid, uaargs.groupname][not uaargs.gid]
-                            uaargs.groupid = [field[3], uaargs.groupid][not field[3]]
+            if uaargs.LOGIN not in users:
+                continue
 
-                            if not uaargs.gid or uaargs.gid != uaargs.groupid:
-                                if (uaargs.groupid and uaargs.groupid.isdigit()) and (uaargs.groupname and uaargs.groupname.isdigit()) and (uaargs.groupid != uaargs.groupname):
-                                    # We want to add a group, but we don't know it's name... so we can't add the group...
-                                    # We have to assume the group has previously been added or we'll fail on the adduser...
-                                    # Note: specifying the actual gid is very rare in OE, usually the group name is specified.
-                                    bb.warn("%s: Changing gid for login %s from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), uaargs.LOGIN, uaargs.groupname, uaargs.gid))
-                                elif (uaargs.groupid and not uaargs.groupid.isdigit()) and uaargs.groupid == uaargs.groupname:
-                                    # We don't have a number, so we have to add a name
-                                    bb.debug(1, "Adding group %s!" % (uaargs.groupname))
-                                    uaargs.gid = uaargs.groupid
-                                    uaargs.user_group = False
-                                    groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg, True)
-                                    newgroup = "%s %s" % (['', ' --system'][uaargs.system], uaargs.groupname)
-                                    if groupadd:
-                                        d.setVar("GROUPADD_PARAM_%s" % pkg, "%s ; %s" % (groupadd, newgroup))
-                                    else:
-                                        d.setVar("GROUPADD_PARAM_%s" % pkg, newgroup)
-                                elif uaargs.groupname and (uaargs.groupid and uaargs.groupid.isdigit()):
-                                    # We have a group name and a group number to assign it to
-                                    bb.debug(1, "Adding group %s  gid (%s)!" % (uaargs.groupname, uaargs.groupid))
-                                    uaargs.gid = uaargs.groupid
-                                    uaargs.user_group = False
-                                    groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg, True)
-                                    newgroup = "-g %s %s" % (uaargs.gid, uaargs.groupname)
-                                    if groupadd:
-                                        d.setVar("GROUPADD_PARAM_%s" % pkg, "%s ; %s" % (groupadd, newgroup))
-                                    else:
-                                        d.setVar("GROUPADD_PARAM_%s" % pkg, newgroup)
+            field = users[uaargs.LOGIN]
 
-                            uaargs.comment = ["'%s'" % field[4], uaargs.comment][not field[4]]
-                            uaargs.home_dir = [field[5], uaargs.home_dir][not field[5]]
-                            uaargs.shell = [field[6], uaargs.shell][not field[6]]
-                            break
+            if uaargs.uid and field[2] and (uaargs.uid != field[2]):
+                bb.warn("%s: Changing username %s's uid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), uaargs.LOGIN, uaargs.uid, field[2]))
+            uaargs.uid = field[2] or uaargs.uid
+
+            # Determine the possible groupname
+            # Unless the group name (or gid) is specified, we assume that the LOGIN is the groupname
+            #
+            # By default the system has creation of the matching groups enabled
+            # So if the implicit username-group creation is on, then the implicit groupname (LOGIN)
+            # is used, and we disable the user_group option.
+            #
+            user_group = uaargs.user_group is None or uaargs.user_group is True
+            uaargs.groupname = uaargs.LOGIN if user_group else uaargs.gid
+            uaargs.groupid = field[3] or uaargs.gid or uaargs.groupname
+
+            if uaargs.groupid and uaargs.gid != uaargs.groupid:
+                newgroup = None
+                if not uaargs.groupid.isdigit():
+                    # We don't have a group number, so we have to add a name
+                    bb.debug(1, "Adding group %s!" % uaargs.groupid)
+                    newgroup = "%s %s" % (' --system' if uaargs.system else '', uaargs.groupid)
+                elif uaargs.groupname and not uaargs.groupname.isdigit():
+                    # We have a group name and a group number to assign it to
+                    bb.debug(1, "Adding group %s (gid %s)!" % (uaargs.groupname, uaargs.groupid))
+                    newgroup = "-g %s %s" % (uaargs.groupid, uaargs.groupname)
+                else:
+                    # We want to add a group, but we don't know it's name... so we can't add the group...
+                    # We have to assume the group has previously been added or we'll fail on the adduser...
+                    # Note: specifying the actual gid is very rare in OE, usually the group name is specified.
+                    bb.warn("%s: Changing gid for login %s to %s, verify configuration files!" % (d.getVar('PN', True), uaargs.LOGIN, uaargs.groupid))
+
+                uaargs.gid = uaargs.groupid
+                uaargs.user_group = None
+                if newgroup:
+                    groupadd = d.getVar("GROUPADD_PARAM_%s" % pkg, True)
+                    if groupadd:
+                        d.setVar("GROUPADD_PARAM_%s" % pkg, "%s; %s" % (groupadd, newgroup))
+                    else:
+                        d.setVar("GROUPADD_PARAM_%s" % pkg, newgroup)
+
+            uaargs.comment = "'%s'" % field[4] if field[4] else uaargs.comment
+            uaargs.home_dir = field[5] or uaargs.home_dir
+            uaargs.shell = field[6] or uaargs.shell
 
             # Should be an error if a specific option is set...
             if d.getVar('USERADD_ERROR_DYNAMIC', True) == '1' and not ((uaargs.uid and uaargs.uid.isdigit()) and uaargs.gid):
@@ -161,21 +177,21 @@
             newparam += ['', ' --skel %s' % uaargs.skel][uaargs.skel != None]
             newparam += ['', ' --key %s' % uaargs.key][uaargs.key != None]
             newparam += ['', ' --no-log-init'][uaargs.no_log_init]
-            newparam += ['', ' --create-home'][uaargs.create_home]
-            newparam += ['', ' --no-create-home'][uaargs.no_create_home]
-            newparam += ['', ' --no-user-group'][uaargs.no_user_group]
+            newparam += ['', ' --create-home'][uaargs.create_home is True]
+            newparam += ['', ' --no-create-home'][uaargs.create_home is False]
+            newparam += ['', ' --no-user-group'][uaargs.user_group is False]
             newparam += ['', ' --non-unique'][uaargs.non_unique]
             newparam += ['', ' --password %s' % uaargs.password][uaargs.password != None]
             newparam += ['', ' --root %s' % uaargs.root][uaargs.root != None]
             newparam += ['', ' --system'][uaargs.system]
             newparam += ['', ' --shell %s' % uaargs.shell][uaargs.shell != None]
             newparam += ['', ' --uid %s' % uaargs.uid][uaargs.uid != None]
-            newparam += ['', ' --user-group'][uaargs.user_group]
+            newparam += ['', ' --user-group'][uaargs.user_group is True]
             newparam += ' %s' % uaargs.LOGIN
 
             newparams.append(newparam)
 
-        return " ;".join(newparams).strip()
+        return ";".join(newparams).strip()
 
     # We parse and rewrite the groupadd components
     def rewrite_groupadd(params):
@@ -192,7 +208,7 @@
 
         # Return a list of configuration files based on either the default
         # files/group or the contents of USERADD_GID_TABLES
-        # paths are resulved via BBPATH
+        # paths are resolved via BBPATH
         def get_group_list(d):
             str = ""
             bbpath = d.getVar('BBPATH', True)
@@ -204,6 +220,7 @@
             return str
 
         newparams = []
+        groups = None
         for param in re.split('''[ \t]*;[ \t]*(?=(?:[^'"]|'[^']*'|"[^"]*")*$)''', params):
             param = param.strip()
             if not param:
@@ -214,7 +231,7 @@
             except:
                 raise bb.build.FuncFailed("%s: Unable to parse arguments for GROUPADD_PARAM_%s: '%s'" % (d.getVar('PN', True), pkg, param))
 
-            # Need to iterate over layers and open the right file(s)
+            # Read all group files specified in USERADD_GID_TABLES or files/group
             # Use the standard group layout:
             #  groupname:password:group_id:group_members
             #
@@ -223,21 +240,18 @@
             #
             # Note: similar to the passwd file, the 'password' filed is ignored
             # Note: group_members is ignored, group members must be configured with the GROUPMEMS_PARAM
-            for conf in get_group_list(d).split()[::-1]:
-                if os.path.exists(conf):
-                    f = open(conf, "r")
-                    for line in f:
-                        if line.startswith('#'):
-                            continue
-                        # Make sure there always are at least four elements in
-                        # the field list. This allows for leaving out trailing
-                        # colons in the group file.
-                        field = list_extend(line.rstrip().split(":"), 4)
-                        if field[0] == gaargs.GROUP and field[2]:
-                            if gaargs.gid and (gaargs.gid != field[2]):
-                                bb.warn("%s: Changing groupname %s's gid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), gaargs.GROUP, gaargs.gid, field[2]))
-                            gaargs.gid = field[2]
-                            break
+            if not groups:
+                groups = merge_files(get_group_list(d), 4)
+
+            if gaargs.GROUP not in groups:
+                continue
+
+            field = groups[gaargs.GROUP]
+
+            if field[2]:
+                if gaargs.gid and (gaargs.gid != field[2]):
+                    bb.warn("%s: Changing groupname %s's gid from (%s) to (%s), verify configuration files!" % (d.getVar('PN', True), gaargs.GROUP, gaargs.gid, field[2]))
+                gaargs.gid = field[2]
 
             if d.getVar('USERADD_ERROR_DYNAMIC', True) == '1' and not (gaargs.gid and gaargs.gid.isdigit()):
                 #bb.error("Skipping recipe %s, package %s which adds groupname %s does not have a static gid defined." % (d.getVar('PN', True),  pkg, gaargs.GROUP))
@@ -255,7 +269,7 @@
 
             newparams.append(newparam)
 
-        return " ;".join(newparams).strip()
+        return ";".join(newparams).strip()
 
     # Load and process the users and groups, rewriting the adduser/addgroup params
     useradd_packages = d.getVar('USERADD_PACKAGES', True)
diff --git a/yocto-poky/meta/classes/useradd.bbclass b/yocto-poky/meta/classes/useradd.bbclass
index 4577e56..ee402ac 100644
--- a/yocto-poky/meta/classes/useradd.bbclass
+++ b/yocto-poky/meta/classes/useradd.bbclass
@@ -50,14 +50,14 @@
 
 # Perform group additions first, since user additions may depend
 # on these groups existing
-if test "x$GROUPADD_PARAM" != "x"; then
+if test "x`echo $GROUPADD_PARAM | tr -d '[:space:]'`" != "x"; then
 	echo "Running groupadd commands..."
 	# Invoke multiple instances of groupadd for parameter lists
 	# separated by ';'
 	opts=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1`
 	remaining=`echo "$GROUPADD_PARAM" | cut -d ';' -f 2-`
 	while test "x$opts" != "x"; do
-		perform_groupadd "$SYSROOT" "$OPT $opts" 10
+		perform_groupadd "$SYSROOT" "$OPT $opts"
 		if test "x$opts" = "x$remaining"; then
 			break
 		fi
@@ -66,14 +66,14 @@
 	done
 fi 
 
-if test "x$USERADD_PARAM" != "x"; then
+if test "x`echo $USERADD_PARAM | tr -d '[:space:]'`" != "x"; then
 	echo "Running useradd commands..."
 	# Invoke multiple instances of useradd for parameter lists
 	# separated by ';'
 	opts=`echo "$USERADD_PARAM" | cut -d ';' -f 1`
 	remaining=`echo "$USERADD_PARAM" | cut -d ';' -f 2-`
 	while test "x$opts" != "x"; do
-		perform_useradd "$SYSROOT" "$OPT $opts" 10
+		perform_useradd "$SYSROOT" "$OPT $opts"
 		if test "x$opts" = "x$remaining"; then
 			break
 		fi
@@ -82,14 +82,14 @@
 	done
 fi
 
-if test "x$GROUPMEMS_PARAM" != "x"; then
+if test "x`echo $GROUPMEMS_PARAM | tr -d '[:space:]'`" != "x"; then
 	echo "Running groupmems commands..."
 	# Invoke multiple instances of groupmems for parameter lists
 	# separated by ';'
 	opts=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 1`
 	remaining=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 2-`
 	while test "x$opts" != "x"; do
-		perform_groupmems "$SYSROOT" "$OPT $opts" 10
+		perform_groupmems "$SYSROOT" "$OPT $opts"
 		if test "x$opts" = "x$remaining"; then
 			break
 		fi
@@ -127,6 +127,35 @@
 	fi
 }
 
+userdel_sysroot_sstate () {
+if test "x${STAGING_DIR_TARGET}" != "x"; then
+    if [ "${BB_CURRENTTASK}" = "configure" -o "${BB_CURRENTTASK}" = "clean" ]; then
+        export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir}/pseudo"
+        OPT="--root ${STAGING_DIR_TARGET}"
+
+        # Remove groups and users defined for package
+        GROUPADD_PARAM="${@get_all_cmd_params(d, 'groupadd')}"
+        USERADD_PARAM="${@get_all_cmd_params(d, 'useradd')}"
+
+        if test "x`echo $USERADD_PARAM | tr -d '[:space:]'`" != "x"; then
+            user=`echo "$USERADD_PARAM" | cut -d ';' -f 1 | awk '{ print $NF }'`
+            perform_userdel "${STAGING_DIR_TARGET}" "$OPT $user"
+        fi
+
+        if test "x`echo $GROUPADD_PARAM | tr -d '[:space:]'`" != "x"; then
+            group=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1 | awk '{ print $NF }'`
+            perform_groupdel "${STAGING_DIR_TARGET}" "$OPT $group"
+        fi
+
+    fi
+fi
+}
+
+SSTATECLEANFUNCS = "userdel_sysroot_sstate"
+SSTATECLEANFUNCS_class-cross = ""
+SSTATECLEANFUNCS_class-native = ""
+SSTATECLEANFUNCS_class-nativesdk = ""
+
 do_install[prefuncs] += "${SYSROOTFUNC}"
 SYSROOTFUNC = "useradd_sysroot"
 SYSROOTFUNC_class-cross = ""
diff --git a/yocto-poky/meta/classes/useradd_base.bbclass b/yocto-poky/meta/classes/useradd_base.bbclass
index ab3cd35..0d81acc 100644
--- a/yocto-poky/meta/classes/useradd_base.bbclass
+++ b/yocto-poky/meta/classes/useradd_base.bbclass
@@ -4,7 +4,7 @@
 
 # The following functions basically have similar logic.
 # *) Perform necessary checks before invoking the actual command
-# *) Invoke the actual command, make retries if necessary
+# *) Invoke the actual command with flock
 # *) Error out if an error occurs.
 
 # Note that before invoking these functions, make sure the global variable
@@ -13,26 +13,16 @@
 perform_groupadd () {
 	local rootdir="$1"
 	local opts="$2"
-	local retries="$3"
-	bbnote "${PN}: Performing groupadd with [$opts] and $retries times of retry"
+	bbnote "${PN}: Performing groupadd with [$opts]"
 	local groupname=`echo "$opts" | awk '{ print $NF }'`
 	local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
 	if test "x$group_exists" = "x"; then
-		local count=0
-		while true; do
-			eval $PSEUDO groupadd $opts || true
-			group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
-			if test "x$group_exists" = "x"; then
-				bbwarn "${PN}: groupadd command did not succeed. Retrying..."
-			else
-				break
-			fi
-			count=`expr $count + 1`
-			if test $count = $retries; then
-				bbfatal "${PN}: Tried running groupadd command $retries times without success, giving up"
-			fi
-                        sleep $count
-		done
+		opts=`echo $opts | sed s/\'/\"/g`
+		eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupadd \$opts\" || true
+		group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
+		if test "x$group_exists" = "x"; then
+			bbfatal "${PN}: groupadd command did not succeed."
+		fi
 	else
 		bbnote "${PN}: group $groupname already exists, not re-creating it"
 	fi
@@ -41,26 +31,16 @@
 perform_useradd () {
 	local rootdir="$1"
 	local opts="$2"
-	local retries="$3"
-	bbnote "${PN}: Performing useradd with [$opts] and $retries times of retry"
+	bbnote "${PN}: Performing useradd with [$opts]"
 	local username=`echo "$opts" | awk '{ print $NF }'`
 	local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
 	if test "x$user_exists" = "x"; then
-	       local count=0
-	       while true; do
-		       eval $PSEUDO useradd $opts || true
-		       user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
-		       if test "x$user_exists" = "x"; then
-			       bbwarn "${PN}: useradd command did not succeed. Retrying..."
-		       else
-			       break
-		       fi
-		       count=`expr $count + 1`
-		       if test $count = $retries; then
-				bbfatal "${PN}: Tried running useradd command $retries times without success, giving up"
-		       fi
-		       sleep $count
-	       done
+		opts=`echo $opts | sed s/\'/\"/g`
+		eval flock -x $rootdir${sysconfdir} -c  \"$PSEUDO useradd \$opts\" || true
+		user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
+		if test "x$user_exists" = "x"; then
+			bbfatal "${PN}: useradd command did not succeed."
+		fi
 	else
 		bbnote "${PN}: user $username already exists, not re-creating it"
 	fi
@@ -69,8 +49,7 @@
 perform_groupmems () {
 	local rootdir="$1"
 	local opts="$2"
-	local retries="$3"
-	bbnote "${PN}: Performing groupmems with [$opts] and $retries times of retry"
+	bbnote "${PN}: Performing groupmems with [$opts]"
 	local groupname=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-g" || $i == "--group") print $(i+1) }'`
 	local username=`echo "$opts" | awk '{ for (i = 1; i < NF; i++) if ($i == "-a" || $i == "--add") print $(i+1) }'`
 	bbnote "${PN}: Running groupmems command with group $groupname and user $username"
@@ -84,25 +63,11 @@
 	fi
 	local mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*" $rootdir/etc/group || true`"
 	if test "x$mem_exists" = "x"; then
-		local count=0
-		while true; do
-			eval $PSEUDO groupmems $opts || true
-			mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*" $rootdir/etc/group || true`"
-			if test "x$mem_exists" = "x"; then
-				bbwarn "${PN}: groupmems command did not succeed. Retrying..."
-			else
-				break
-			fi
-			count=`expr $count + 1`
-			if test $count = $retries; then
-				if test "x$gshadow" = "xno"; then
-					rm -f $rootdir${sysconfdir}/gshadow
-					rm -f $rootdir${sysconfdir}/gshadow-
-				fi
-				bbfatal "${PN}: Tried running groupmems command $retries times without success, giving up"
-			fi
-			sleep $count
-		done
+		eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupmems \$opts\" || true
+		mem_exists="`grep "^$groupname:[^:]*:[^:]*:\([^,]*,\)*$username\(,[^,]*\)*" $rootdir/etc/group || true`"
+		if test "x$mem_exists" = "x"; then
+			bbfatal "${PN}: groupmems command did not succeed."
+		fi
 	else
 		bbnote "${PN}: group $groupname already contains $username, not re-adding it"
 	fi
@@ -115,26 +80,15 @@
 perform_groupdel () {
 	local rootdir="$1"
 	local opts="$2"
-	local retries="$3"
-	bbnote "${PN}: Performing groupdel with [$opts] and $retries times of retry"
+	bbnote "${PN}: Performing groupdel with [$opts]"
 	local groupname=`echo "$opts" | awk '{ print $NF }'`
 	local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
 	if test "x$group_exists" != "x"; then
-		local count=0
-		while true; do
-			eval $PSEUDO groupdel $opts || true
-			group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
-			if test "x$group_exists" != "x"; then
-				bbwarn "${PN}: groupdel command did not succeed. Retrying..."
-			else
-				break
-			fi
-			count=`expr $count + 1`
-			if test $count = $retries; then
-				bbfatal "${PN}: Tried running groupdel command $retries times without success, giving up"
-			fi
-			sleep $count
-		done
+		eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupdel \$opts\" || true
+		group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
+		if test "x$group_exists" != "x"; then
+			bbfatal "${PN}: groupdel command did not succeed."
+		fi
 	else
 		bbnote "${PN}: group $groupname doesn't exist, not removing it"
 	fi
@@ -143,26 +97,15 @@
 perform_userdel () {
 	local rootdir="$1"
 	local opts="$2"
-	local retries="$3"
-	bbnote "${PN}: Performing userdel with [$opts] and $retries times of retry"
+	bbnote "${PN}: Performing userdel with [$opts]"
 	local username=`echo "$opts" | awk '{ print $NF }'`
 	local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
 	if test "x$user_exists" != "x"; then
-	       local count=0
-	       while true; do
-		       eval $PSEUDO userdel $opts || true
-		       user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
-		       if test "x$user_exists" != "x"; then
-			       bbwarn "${PN}: userdel command did not succeed. Retrying..."
-		       else
-			       break
-		       fi
-		       count=`expr $count + 1`
-		       if test $count = $retries; then
-				bbfatal "${PN}: Tried running userdel command $retries times without success, giving up"
-		       fi
-		       sleep $count
-	       done
+		eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO userdel \$opts\" || true
+		user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
+		if test "x$user_exists" != "x"; then
+			bbfatal "${PN}: userdel command did not succeed."
+		fi
 	else
 		bbnote "${PN}: user $username doesn't exist, not removing it"
 	fi
@@ -174,25 +117,14 @@
 	set +e
 	local rootdir="$1"
 	local opts="$2"
-	local retries="$3"
-	bbnote "${PN}: Performing groupmod with [$opts] and $retries times of retry"
+	bbnote "${PN}: Performing groupmod with [$opts]"
 	local groupname=`echo "$opts" | awk '{ print $NF }'`
 	local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
 	if test "x$group_exists" != "x"; then
-		local count=0
-		while true; do
-			eval $PSEUDO groupmod $opts
-			if test $? != 0; then
-				bbwarn "${PN}: groupmod command did not succeed. Retrying..."
-			else
-				break
-			fi
-			count=`expr $count + 1`
-			if test $count = $retries; then
-				bbfatal "${PN}: Tried running groupmod command $retries times without success, giving up"
-			fi
-			sleep $count
-		done
+		eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupmod \$opts\"
+		if test $? != 0; then
+			bbwarn "${PN}: groupmod command did not succeed."
+		fi
 	else
 		bbwarn "${PN}: group $groupname doesn't exist, unable to modify it"
 	fi
@@ -204,25 +136,14 @@
 	set +e
 	local rootdir="$1"
 	local opts="$2"
-	local retries="$3"
-	bbnote "${PN}: Performing usermod with [$opts] and $retries times of retry"
+	bbnote "${PN}: Performing usermod with [$opts]"
 	local username=`echo "$opts" | awk '{ print $NF }'`
 	local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
 	if test "x$user_exists" != "x"; then
-	       local count=0
-	       while true; do
-		       eval $PSEUDO usermod $opts
-		       if test $? != 0; then
-			       bbwarn "${PN}: usermod command did not succeed. Retrying..."
-		       else
-			       break
-		       fi
-		       count=`expr $count + 1`
-		       if test $count = $retries; then
-				bbfatal "${PN}: Tried running usermod command $retries times without success, giving up"
-		       fi
-		       sleep $count
-	       done
+		eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO usermod \$opts\"
+		if test $? != 0; then
+			bbfatal "${PN}: usermod command did not succeed."
+		fi
 	else
 		bbwarn "${PN}: user $username doesn't exist, unable to modify it"
 	fi
diff --git a/yocto-poky/meta/classes/utility-tasks.bbclass b/yocto-poky/meta/classes/utility-tasks.bbclass
index e817b89..5bcfd0b 100644
--- a/yocto-poky/meta/classes/utility-tasks.bbclass
+++ b/yocto-poky/meta/classes/utility-tasks.bbclass
@@ -4,12 +4,12 @@
     taskdescs = {}
     maxlen = 0
     for e in d.keys():
-        if d.getVarFlag(e, 'task'):
+        if d.getVarFlag(e, 'task', True):
             maxlen = max(maxlen, len(e))
             if e.endswith('_setscene'):
-                desc = "%s (setscene version)" % (d.getVarFlag(e[:-9], 'doc') or '')
+                desc = "%s (setscene version)" % (d.getVarFlag(e[:-9], 'doc', True) or '')
             else:
-                desc = d.getVarFlag(e, 'doc') or ''
+                desc = d.getVarFlag(e, 'doc', True) or ''
             taskdescs[e] = desc
 
     tasks = sorted(taskdescs.keys())
@@ -43,11 +43,8 @@
     if len(src_uri) == 0:
         return
 
-    localdata = bb.data.createCopy(d)
-    bb.data.update_data(localdata)
-
     try:
-        fetcher = bb.fetch2.Fetch(src_uri, localdata)
+        fetcher = bb.fetch2.Fetch(src_uri, d)
         fetcher.checkstatus()
     except bb.fetch2.BBFetchException, e:
         raise bb.build.FuncFailed(e)
diff --git a/yocto-poky/meta/classes/vala.bbclass b/yocto-poky/meta/classes/vala.bbclass
index 9ff664a..615eb37 100644
--- a/yocto-poky/meta/classes/vala.bbclass
+++ b/yocto-poky/meta/classes/vala.bbclass
@@ -16,3 +16,9 @@
     ${datadir}/vala/vapi/*.deps \
     ${datadir}/gir-1.0 \
 "
+
+# Remove vapigen.m4 that is bundled with tarballs
+# because it does not yet have our cross-compile fixes
+do_configure_prepend() {
+        rm -f ${S}/m4/vapigen.m4
+}
diff --git a/yocto-poky/meta/classes/waf.bbclass b/yocto-poky/meta/classes/waf.bbclass
index 3a221e7..5e55833 100644
--- a/yocto-poky/meta/classes/waf.bbclass
+++ b/yocto-poky/meta/classes/waf.bbclass
@@ -1,9 +1,34 @@
+# avoids build breaks when using no-static-libs.inc
+DISABLE_STATIC = ""
+
+def get_waf_parallel_make(d):
+    pm = d.getVar('PARALLEL_MAKE', True)
+    if pm:
+        # look for '-j' and throw other options (e.g. '-l') away
+        # because they might have different meaning in bjam
+        pm = pm.split()
+        while pm:
+            v = None
+            opt = pm.pop(0)
+            if opt == '-j':
+                v = pm.pop(0)
+            elif opt.startswith('-j'):
+                v = opt[2:].strip()
+            else:
+                v = None
+
+            if v:
+                v = min(64, int(v))
+                return '-j' + str(v)
+
+    return ""
+
 waf_do_configure() {
 	${S}/waf configure --prefix=${prefix} ${EXTRA_OECONF}
 }
 
 waf_do_compile()  {
-	${S}/waf build ${PARALLEL_MAKE}
+	${S}/waf build ${@get_waf_parallel_make(d)}
 }
 
 waf_do_install() {