Squashed 'import-layers/meta-virtualization/' content from commit c4a1711
Change-Id: I42132e4f0aef12ec265e74d95f489a6409e22f46
git-subtree-dir: import-layers/meta-virtualization
git-subtree-split: c4a1711dd31659b027c70c07e4ef6da98591ac95
Signed-off-by: Patrick Williams <patrick@stwcx.xyz>
diff --git a/import-layers/meta-virtualization/.gitignore b/import-layers/meta-virtualization/.gitignore
new file mode 100644
index 0000000..65feb82
--- /dev/null
+++ b/import-layers/meta-virtualization/.gitignore
@@ -0,0 +1,6 @@
+build*/
+pyshtables.py
+*.swp
+*.orig
+*.rej
+*~
diff --git a/import-layers/meta-virtualization/COPYING.MIT b/import-layers/meta-virtualization/COPYING.MIT
new file mode 100644
index 0000000..fb950dc
--- /dev/null
+++ b/import-layers/meta-virtualization/COPYING.MIT
@@ -0,0 +1,17 @@
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/import-layers/meta-virtualization/README b/import-layers/meta-virtualization/README
new file mode 100644
index 0000000..c60044d2
--- /dev/null
+++ b/import-layers/meta-virtualization/README
@@ -0,0 +1,53 @@
+meta-virtualization
+===================
+
+This layer provides support for building Xen, KVM, Libvirt, and associated
+packages necessary for constructing OE-based virtualized solutions.
+
+Dependencies
+------------
+This layer depends on:
+
+URI: git://github.com/openembedded/openembedded-core.git
+branch: master
+revision: HEAD
+prio: default
+
+URI: git://github.com/openembedded/meta-openembedded.git
+branch: master
+revision: HEAD
+layers: meta-oe
+ meta-networking
+ meta-filesystems
+ meta-python
+
+URI: git://github.com/errordeveloper/oe-meta-go.git
+branch: master
+revision: HEAD
+
+BBFILE_PRIORITY_openembedded-layer = "4"
+
+Required for Xen XSM policy:
+URI: git://git.yoctoproject.org/meta-selinux
+branch: master
+revision: HEAD
+prio: default
+
+Maintenance
+-----------
+
+Send pull requests, patches, comments or questions to meta-virtualization@yoctoproject.org
+
+Maintainers: Raymond Danks <ray.danks@se-eng.com>
+ Bruce Ashfield <bruce.ashfield@gmail.com>
+
+When sending single patches, please using something like:
+'git send-email -1 --to meta-virtualization@yoctoproject.org --subject-prefix=meta-virtualization][PATCH'
+
+License
+-------
+
+All metadata is MIT licensed unless otherwise stated. Source code included
+in tree for individual recipes is under the LICENSE stated in each recipe
+(.bb file) unless otherwise stated.
+
diff --git a/import-layers/meta-virtualization/classes/go-osarchmap.bbclass b/import-layers/meta-virtualization/classes/go-osarchmap.bbclass
new file mode 100644
index 0000000..4e00c7b
--- /dev/null
+++ b/import-layers/meta-virtualization/classes/go-osarchmap.bbclass
@@ -0,0 +1,38 @@
+BUILD_GOOS = "${@go_map_os(d.getVar('BUILD_OS', True), d)}"
+BUILD_GOARCH = "${@go_map_arch(d.getVar('BUILD_ARCH', True), d)}"
+BUILD_GOTUPLE = "${BUILD_GOOS}_${BUILD_GOARCH}"
+HOST_GOOS = "${@go_map_os(d.getVar('HOST_OS', True), d)}"
+HOST_GOARCH = "${@go_map_arch(d.getVar('HOST_ARCH', True), d)}"
+HOST_GOARM = "${@go_map_arm(d.getVar('HOST_ARCH', True), d.getVar('TUNE_FEATURES', True), d)}"
+HOST_GOTUPLE = "${HOST_GOOS}_${HOST_GOARCH}"
+TARGET_GOOS = "${@go_map_os(d.getVar('TARGET_OS', True), d)}"
+TARGET_GOARCH = "${@go_map_arch(d.getVar('TARGET_ARCH', True), d)}"
+TARGET_GOARM = "${@go_map_arm(d.getVar('TARGET_ARCH', True), d.getVar('TUNE_FEATURES', True), d)}"
+TARGET_GOTUPLE = "${TARGET_GOOS}_${TARGET_GOARCH}"
+GO_BUILD_BINDIR = "${@['bin/${HOST_GOTUPLE}','bin'][d.getVar('BUILD_GOTUPLE',True) == d.getVar('HOST_GOTUPLE',True)]}"
+
+def go_map_arch(a, d):
+ import re
+ if re.match('i.86', a):
+ return '386'
+ elif a == 'x86_64':
+ return 'amd64'
+ elif re.match('arm.*', a):
+ return 'arm'
+ elif re.match('aarch64.*', a):
+ return 'arm64'
+ elif re.match('p(pc|owerpc)(|64)', a):
+ return 'powerpc'
+ else:
+ bb.error("cannot map '%s' to a Go architecture" % a)
+
+def go_map_arm(a, f, d):
+ import re
+ if re.match('arm.*', a) and re.match('arm.*7.*', f):
+ return '7'
+ return ''
+
+def go_map_os(o, d):
+ if o.startswith('linux'):
+ return 'linux'
+ return o
diff --git a/import-layers/meta-virtualization/conf/distro/include/virt_security_flags.inc b/import-layers/meta-virtualization/conf/distro/include/virt_security_flags.inc
new file mode 100644
index 0000000..f4c288b
--- /dev/null
+++ b/import-layers/meta-virtualization/conf/distro/include/virt_security_flags.inc
@@ -0,0 +1,5 @@
+# Build errors with the pie options enabled
+SECURITY_CFLAGS_pn-libvirt = "${SECURITY_NO_PIE_CFLAGS}"
+
+# This has text relco errors with the pie options enabled
+SECURITY_CFLAGS_pn-lxc = "${SECURITY_NO_PIE_CFLAGS}"
diff --git a/import-layers/meta-virtualization/conf/layer.conf b/import-layers/meta-virtualization/conf/layer.conf
new file mode 100644
index 0000000..f5c1b1e
--- /dev/null
+++ b/import-layers/meta-virtualization/conf/layer.conf
@@ -0,0 +1,21 @@
+# We have a conf and classes directory, append to BBPATH
+BBPATH .= ":${LAYERDIR}"
+
+# We have a recipes directory, add to BBFILES
+BBFILES += "${LAYERDIR}/recipes*/*/*.bb ${LAYERDIR}/recipes*/*/*.bbappend"
+
+BBFILE_COLLECTIONS += "virtualization-layer"
+BBFILE_PATTERN_virtualization-layer := "^${LAYERDIR}/"
+BBFILE_PRIORITY_virtualization-layer = "8"
+
+# Custom licenses used by some packages in this layer
+LICENSE_PATH += "${LAYERDIR}/files/custom-licenses"
+
+# This should only be incremented on significant changes that will
+# cause compatibility issues with other layers
+LAYERVERSION_virtualization-layer = "1"
+
+LAYERDEPENDS_virtualization-layer = "core networking-layer"
+
+# Override security flags
+require conf/distro/include/virt_security_flags.inc
diff --git a/import-layers/meta-virtualization/docs/00-INDEX b/import-layers/meta-virtualization/docs/00-INDEX
new file mode 100644
index 0000000..a2ffd99
--- /dev/null
+++ b/import-layers/meta-virtualization/docs/00-INDEX
@@ -0,0 +1,12 @@
+This is a brief list of all the files in meta-virtualization/docs and what
+they contain. If you add a documentation file, please list it here in
+alphabetical order as well.
+
+00-INDEX
+ - this file.
+
+00-README
+ - info on the goals of meta-virtualization and this docs subdir
+
+openvswitch.txt
+ - example on how to setup openvswitch with qemu/kvm.
diff --git a/import-layers/meta-virtualization/docs/00-README b/import-layers/meta-virtualization/docs/00-README
new file mode 100644
index 0000000..6fea112
--- /dev/null
+++ b/import-layers/meta-virtualization/docs/00-README
@@ -0,0 +1,6 @@
+meta-virtualization: docs
+=========================
+
+The docs subdirectory is a holding tank for meta-virtualization related
+READMEs, documentation, testing information, configuration and other
+notes that help the users of meta-virt.
diff --git a/import-layers/meta-virtualization/docs/openvswitch.txt b/import-layers/meta-virtualization/docs/openvswitch.txt
new file mode 100644
index 0000000..4410d27
--- /dev/null
+++ b/import-layers/meta-virtualization/docs/openvswitch.txt
@@ -0,0 +1,96 @@
+Simple setup for connecting openvswitch to qemu/kvm
+===================================================
+This example brings up openvswitch using a private network.
+
+Preliminary notes
+=================
+1. Make sure to build kernel support for openvswitch as a module. The
+openvswitch init scripts expect to load a module and upon success
+continue to setup the switch. If openvswitch is compiled
+statically, the init scripts not load the ovs-vswitchd daemon
+and none of the configured bridges will show up in the interfaces
+table (ifconfig). You can get around this limiation by running the
+following by hand:
+ # ovs-vswitchd --pidfile --detach
+
+2. Verify that ovs-vswitchd is running before proceeding:
+ # /etc/init.d/openvswitch-switch status
+ ovsdb-server is running with pid 1867
+ ovs-vswitchd is running with pid 1877
+
+3. A kernel and rootfs is required for qemu bring up.
+
+Qemu Setup
+==========
+The host requires a /etc/qemu-ifup script to setup the bridging and tap
+devices. Qemu will invoke this qemu-ifup script at startup. Here is
+an example script:
+$ cat /etc/qemu-fup
+ #!/bin/sh
+ # the tap is dynamically assigned and passed into this script
+ # as a parameter
+ TAP=$1
+
+ # Note: if booting over NFS, once the $ETH0 device is added to the bridge,
+ # your host will be unusable. In that case, setup networking
+ # init scripts appropriately and change the following to work
+ # with it.
+ ETH0="eth1"
+ NETMASK=255.255.255.0
+ IP=192.168.1.1
+ GATEWAY=
+ SWITCH=ovsbr0
+ if [ -n "$TAP" ];then
+ ifconfig $TAP up
+ ifconfig $SWITCH down &>/dev/null
+ ovs-vsctl del-br $SWITCH
+ ovs-vsctl add-br $SWITCH
+ ifconfig $ETH0 0.0.0.0
+ ifconfig $SWITCH $IP up netmask $NETMASK
+ #-- external access not required for this test.
+ #route add default gw $GATEWAY
+ ovs-vsctl add-port $SWITCH $ETH0
+ ovs-vsctl add-port $SWITCH $TAP
+ exit 0
+ else
+ echo "$0: No tap device"
+ exit 1
+ fi
+
+Start Qemu
+==========
+This example will bring up qemu with a tap network interface.
+Note: this command must be run as root due to the networking setup.
+
+ $ qemu-system-x86_64 -nographic -k en-us -m 1024 \
+ -net nic,macaddr=1a:46:0b:ca:bc:7a,model=virtio \
+ -net tap -enable-kvm\
+ -kernel /opt/dpdk-guest-kernel \
+ -append 'root=/dev/vda ro console=ttyS0' \
+ -drive file=/opt/intel-xeon-core-ovp-kvm-preempt-rt-dist.ext3,cache=none,if=virtio
+
+Once the guest OS is up and running, configure the quest network interface:
+ $ ifconfig eth0 192.168.1.10
+
+Ping the bridge:
+ $ ping 192.168.1.1
+
+From the host, view the bridged network:
+$ ovs-vsctl show
+c1212b96-ef49-4a8e-b598-09b05b854dd0
+ Bridge "ovsbr0"
+ Port "tap0"
+ Interface "tap0"
+ Port "eth1"
+ Interface "eth1"
+ Port "ovsbr0"
+ Interface "ovsbr0"
+ type: internal
+
+At this point, openvswitch is up and running. If you want external
+network access, you need to set a GATEWAY in the qemu-ifup script and
+make sure the external device is part of the bridge.
+
+Note:
+Proper setup will require a /etc/qemu-ifdown script to tear down the
+bridge and interfaces. (not provided here).
diff --git a/import-layers/meta-virtualization/files/custom-licenses/Intel-ACPI b/import-layers/meta-virtualization/files/custom-licenses/Intel-ACPI
new file mode 100644
index 0000000..df0d0bb
--- /dev/null
+++ b/import-layers/meta-virtualization/files/custom-licenses/Intel-ACPI
@@ -0,0 +1,104 @@
+1. Copyright Notice
+
+Some or all of this work - Copyright (c) 1999 - 2010, Intel Corp.
+All rights reserved.
+
+2. License
+
+2.1. This is your license from Intel Corp. under its intellectual property
+rights. You may have additional license terms from the party that provided
+you this software, covering your right to use that party's intellectual
+property rights.
+
+2.2. Intel grants, free of charge, to any person ("Licensee") obtaining a
+copy of the source code appearing in this file ("Covered Code") an
+irrevocable, perpetual, worldwide license under Intel's copyrights in the
+base code distributed originally by Intel ("Original Intel Code") to copy,
+make derivatives, distribute, use and display any portion of the Covered
+Code in any form, with the right to sublicense such rights; and
+
+2.3. Intel grants Licensee a non-exclusive and non-transferable patent
+license (with the right to sublicense), under only those claims of Intel
+patents that are infringed by the Original Intel Code, to make, use, sell,
+offer to sell, and import the Covered Code and derivative works thereof
+solely to the minimum extent necessary to exercise the above copyright
+license, and in no event shall the patent license extend to any additions
+to or modifications of the Original Intel Code. No other license or right
+is granted directly or by implication, estoppel or otherwise;
+
+The above copyright and patent license is granted only if the following
+conditions are met:
+
+3. Conditions
+
+3.1. Redistribution of Source with Rights to Further Distribute Source.
+Redistribution of source code of any substantial portion of the Covered
+Code or modification with rights to further distribute source must include
+the above Copyright Notice, the above License, this list of Conditions,
+and the following Disclaimer and Export Compliance provision. In addition,
+Licensee must cause all Covered Code to which Licensee contributes to
+contain a file documenting the changes Licensee made to create that Covered
+Code and the date of any change. Licensee must include in that file the
+documentation of any changes made by any predecessor Licensee. Licensee
+must include a prominent statement that the modification is derived,
+directly or indirectly, from Original Intel Code.
+
+3.2. Redistribution of Source with no Rights to Further Distribute Source.
+Redistribution of source code of any substantial portion of the Covered
+Code or modification without rights to further distribute source must
+include the following Disclaimer and Export Compliance provision in the
+documentation and/or other materials provided with distribution. In
+addition, Licensee may not authorize further sublicense of source of any
+portion of the Covered Code, and must include terms to the effect that the
+license from Licensee to its licensee is limited to the intellectual
+property embodied in the software Licensee provides to its licensee, and
+not to intellectual property embodied in modifications its licensee may
+make.
+
+3.3. Redistribution of Executable. Redistribution in executable form of any
+substantial portion of the Covered Code or modification must reproduce the
+above Copyright Notice, and the following Disclaimer and Export Compliance
+provision in the documentation and/or other materials provided with the
+distribution.
+
+3.4. Intel retains all right, title, and interest in and to the Original
+Intel Code.
+
+3.5. Neither the name Intel nor any other trademark owned or controlled by
+Intel shall be used in advertising or otherwise to promote the sale, use or
+other dealings in products derived from or relating to the Covered Code
+without prior written authorization from Intel.
+
+4. Disclaimer and Export Compliance
+
+4.1. INTEL MAKES NO WARRANTY OF ANY KIND REGARDING ANY SOFTWARE PROVIDED
+HERE. ANY SOFTWARE ORIGINATING FROM INTEL OR DERIVED FROM INTEL SOFTWARE
+IS PROVIDED "AS IS," AND INTEL WILL NOT PROVIDE ANY SUPPORT, ASSISTANCE,
+INSTALLATION, TRAINING OR OTHER SERVICES. INTEL WILL NOT PROVIDE ANY
+UPDATES, ENHANCEMENTS OR EXTENSIONS. INTEL SPECIFICALLY DISCLAIMS ANY
+IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT AND FITNESS FOR A
+PARTICULAR PURPOSE.
+
+4.2. IN NO EVENT SHALL INTEL HAVE ANY LIABILITY TO LICENSEE, ITS LICENSEES
+OR ANY OTHER THIRD PARTY, FOR ANY LOST PROFITS, LOST DATA, LOSS OF USE OR
+COSTS OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY INDIRECT,
+SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THIS AGREEMENT, UNDER ANY
+CAUSE OF ACTION OR THEORY OF LIABILITY, AND IRRESPECTIVE OF WHETHER INTEL
+HAS ADVANCE NOTICE OF THE POSSIBILITY OF SUCH DAMAGES. THESE LIMITATIONS
+SHALL APPLY NOTWITHSTANDING THE FAILURE OF THE ESSENTIAL PURPOSE OF ANY
+LIMITED REMEDY.
+
+4.3. Licensee shall not export, either directly or indirectly, any of this
+software or system incorporating such software without first obtaining any
+required license or other approval from the U. S. Department of Commerce or
+any other agency or department of the United States Government. In the
+event Licensee exports any such software from the United States or
+re-exports any such software from a foreign destination, Licensee shall
+ensure that the distribution and export/re-export of the software is in
+compliance with all laws, regulations, orders, or other restrictions of the
+U.S. Export Administration Regulations. Licensee agrees that neither it nor
+any of its subsidiaries will export/re-export any technical data, process,
+software, or service, directly or indirectly, to any country for which the
+United States government or any agency thereof requires an export license,
+other governmental approval, or letter of assurance, without first obtaining
+such license, approval or letter.
diff --git a/import-layers/meta-virtualization/recipes-containers/cgroup-lite/cgroup-lite_1.1.bb b/import-layers/meta-virtualization/recipes-containers/cgroup-lite/cgroup-lite_1.1.bb
new file mode 100644
index 0000000..3ca5238
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/cgroup-lite/cgroup-lite_1.1.bb
@@ -0,0 +1,22 @@
+SECTION = "devel"
+SUMMARY = "Light-weight package to set up cgroups at system boot."
+DESCRIPTION = "Light-weight package to set up cgroups at system boot."
+HOMEPAGE = "http://packages.ubuntu.com/source/precise/cgroup-lite"
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://debian/copyright;md5=5d5da4e0867cf06014f87102154d0102"
+SRC_URI = "http://archive.ubuntu.com/ubuntu/pool/main/c/cgroup-lite/cgroup-lite_1.1.tar.gz"
+SRC_URI += "file://cgroups-init"
+SRC_URI[md5sum] = "041a0d8ad2b192271a2e5507fdb6809f"
+SRC_URI[sha256sum] = "e7f9992b90b5b4634f3b8fb42580ff28ff31093edb297ab872c37f61a94586bc"
+
+inherit update-rc.d
+
+INITSCRIPT_NAME = "cgroups-init"
+INITSCRIPT_PARAMS = "start 8 2 3 4 5 . stop 20 0 1 6 ."
+do_install() {
+ install -d ${D}/bin
+ install -d ${D}${sysconfdir}/init.d
+ install -m 0755 ${S}/scripts/cgroups-mount ${D}/bin
+ install -m 0755 ${S}/scripts/cgroups-umount ${D}/bin
+ install -m 0755 ${WORKDIR}/cgroups-init ${D}${sysconfdir}/init.d/cgroups-init
+}
diff --git a/import-layers/meta-virtualization/recipes-containers/cgroup-lite/files/cgroups-init b/import-layers/meta-virtualization/recipes-containers/cgroup-lite/files/cgroups-init
new file mode 100755
index 0000000..e504024
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/cgroup-lite/files/cgroups-init
@@ -0,0 +1,27 @@
+#!/bin/sh
+### BEGIN INIT INFO
+# Provides: cgroups mount
+# Required-Start: $network $remote_fs
+# Required-Stop: $network $remote_fs
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: mount/unmount cgroups
+### END INIT INFO
+
+# must start before libvirtd is run
+case "$1" in
+ start)
+ echo -n "Mounting cgroups..."
+ /bin/cgroups-mount
+ echo "Done"
+ ;;
+ stop)
+ echo -n "Unmounting cgroups..."
+ /bin/cgroups-umount
+ echo "Done"
+ ;;
+ *)
+ echo "Usage: /etc/init.d/cgroups-init {start|stop}"
+ exit 1
+ ;;
+esac
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/criu_git.bb b/import-layers/meta-virtualization/recipes-containers/criu/criu_git.bb
new file mode 100644
index 0000000..48bcdc2
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/criu_git.bb
@@ -0,0 +1,71 @@
+SUMMARY = "CRIU"
+DESCRIPTION = "Checkpoint/Restore In Userspace, or CRIU, is a software tool for \
+Linux operating system. Using this tool, you can freeze a running application \
+(or part of it) and checkpoint it to a hard drive as a collection of files. \
+You can then use the files to restore and run the application from the point \
+it was frozen at. The distinctive feature of the CRIU project is that it is \
+mainly implemented in user space"
+HOMEPAGE = "http://criu.org"
+SECTION = "console/tools"
+LICENSE = "GPLv2"
+
+EXCLUDE_FROM_WORLD = "1"
+
+LIC_FILES_CHKSUM = "file://COPYING;md5=5cc804625b8b491b6b4312f0c9cb5efa"
+
+SRCREV = "4c5b23e52c1dc4e3fbbc7472b92e7b1ce9d22f02"
+PR = "r0"
+PV = "1.6+git${SRCPV}"
+
+SRC_URI = "git://github.com/xemul/criu.git;protocol=git \
+ file://0001-criu-Fix-toolchain-hardcode.patch \
+ file://0002-criu-Skip-documentation-install.patch \
+ file://0001-criu-Change-libraries-install-directory.patch \
+ "
+
+COMPATIBLE_HOST = "(x86_64|arm|aarch64).*-linux"
+
+DEPENDS += "protobuf-c-native protobuf-c"
+
+S = "${WORKDIR}/git"
+
+#
+# CRIU just can be built on ARMv7 and ARMv6, so the Makefile check
+# if the ARCH is ARMv7 or ARMv6.
+# ARM BSPs need set CRIU_BUILD_ARCH variable for building CRIU.
+#
+EXTRA_OEMAKE_arm += "ARCH=${CRIU_BUILD_ARCH} WERROR=0"
+EXTRA_OEMAKE_x86-64 += "ARCH=${TARGET_ARCH} WERROR=0"
+EXTRA_OEMAKE_aarch64 += "ARCH=${TARGET_ARCH} WERROR=0"
+
+EXTRA_OEMAKE_append += "SBINDIR=${sbindir} LIBDIR=${libdir} INCLUDEDIR=${includedir} PIEGEN=no"
+EXTRA_OEMAKE_append += "LOGROTATEDIR=${sysconfdir} SYSTEMDUNITDIR=${systemd_unitdir}"
+
+CFLAGS += "-D__USE_GNU -D_GNU_SOURCE"
+
+# overide LDFLAGS to allow criu to build without: "x86_64-poky-linux-ld: unrecognized option '-Wl,-O1'"
+export LDFLAGS=""
+
+export BUILD_SYS
+export HOST_SYS
+
+inherit setuptools
+
+do_compile_prepend() {
+ rm -rf ${S}/protobuf/google/protobuf/descriptor.proto
+ ln -s ${PKG_CONFIG_SYSROOT_DIR}/usr/include/google/protobuf/descriptor.proto ${S}/protobuf/google/protobuf/descriptor.proto
+}
+
+do_compile () {
+ oe_runmake
+}
+
+do_install () {
+ oe_runmake PREFIX=${exec_prefix} LIBDIR=${libdir} DESTDIR="${D}" install
+}
+
+FILES_${PN} += "${systemd_unitdir}/ \
+ ${libdir}/python2.7/site-packages/ \
+ ${libdir}/pycriu/ \
+ ${libdir}/crit-0.0.1-py2.7.egg-info \
+ "
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch
new file mode 100644
index 0000000..28d638b
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch
@@ -0,0 +1,48 @@
+From cb9933dc34af0b4d52c4584332600114ac65c402 Mon Sep 17 00:00:00 2001
+From: Jianchuan Wang <jianchuan.wang@windriver.com>
+Date: Tue, 4 Aug 2015 17:45:51 +0800
+Subject: [PATCH] criu: Change libraries install directory
+
+Install the libraries into /usr/lib(/usr/lib64)
+
+Signed-off-by: Jianchuan Wang <jianchuan.wang@windriver.com>
+---
+ Makefile | 2 +-
+ Makefile.inc | 9 ---------
+ 2 files changed, 1 insertion(+), 10 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index 7f5c890..6dbc436 100644
+--- a/Makefile
++++ b/Makefile
+@@ -351,7 +351,7 @@ install-man:
+
+ install-crit: crit
+ $(E) " INSTALL crit"
+- $(Q) python scripts/crit-setup.py install --root=$(DESTDIR) --prefix=$(PREFIX)
++ $(Q) python scripts/crit-setup.py install --root=$(DESTDIR) --prefix=$(PREFIX) --install-lib=$(LIBDIR)
+
+ .PHONY: install install-man install-crit install-criu
+
+diff --git a/Makefile.inc b/Makefile.inc
+index 5496f41..ba70aea 100644
+--- a/Makefile.inc
++++ b/Makefile.inc
+@@ -17,14 +17,5 @@ MANDIR := $(PREFIX)/share/man
+ SYSTEMDUNITDIR := $(PREFIX)/lib/systemd/system/
+ LOGROTATEDIR := $(PREFIX)/etc/logrotate.d/
+ LIBDIR := $(PREFIX)/lib
+-# For recent Debian/Ubuntu with multiarch support
+-DEB_HOST_MULTIARCH ?= $(shell dpkg-architecture \
+- -qDEB_HOST_MULTIARCH 2>/dev/null)
+-ifneq "$(DEB_HOST_MULTIARCH)" ""
+-LIBDIR := $(PREFIX)/lib/$(DEB_HOST_MULTIARCH)
+-# For most other systems
+-else ifeq "$(shell uname -m)" "x86_64"
+-LIBDIR := $(PREFIX)/lib64
+-endif
+
+ INCLUDEDIR := $(PREFIX)/include/criu
+--
+1.9.1
+
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Fix-toolchain-hardcode.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Fix-toolchain-hardcode.patch
new file mode 100644
index 0000000..2fabe0a
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Fix-toolchain-hardcode.patch
@@ -0,0 +1,46 @@
+From 3d4f112fdb434712eba09239a468842323f1af4c Mon Sep 17 00:00:00 2001
+From: Yang Shi <yang.shi@windriver.com>
+Date: Tue, 26 Aug 2014 14:42:42 -0700
+Subject: [PATCH 1/2] criu: Fix toolchain hardcode
+
+Replace ":=" to "?=" so that the toolchain used by bitbake build system will
+be taken.
+
+Signed-off-by: Yang Shi <yang.shi@windriver.com>
+Signed-off-by: Nam Ninh <nam.ninh@windriver.com>
+---
+ Makefile | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index f1c8784..43252ec 100644
+--- a/Makefile
++++ b/Makefile
+@@ -23,15 +23,15 @@ export VERSION_SO_MAJOR VERSION_SO_MINOR
+ # Common definitions
+ #
+
+-FIND := find
+-CSCOPE := cscope
+-RM := rm -f
+-LD := $(CROSS_COMPILE)ld
+-CC := $(CROSS_COMPILE)gcc
+-NM := $(CROSS_COMPILE)nm
+-SH := bash
+-MAKE := make
+-OBJCOPY := $(CROSS_COMPILE)objcopy
++FIND ?= find
++CSCOPE ?= cscope
++RM ?= rm -f
++LD ?= $(CROSS_COMPILE)ld
++CC ?= $(CROSS_COMPILE)gcc
++NM ?= $(CROSS_COMPILE)nm
++SH ?= bash
++MAKE ?= make
++OBJCOPY ?= $(CROSS_COMPILE)objcopy
+
+ CFLAGS += $(USERCFLAGS)
+
+--
+2.0.2
+
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/0001-protobuf-c-Remove-the-rules-which-depend-on-the-nati.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-protobuf-c-Remove-the-rules-which-depend-on-the-nati.patch
new file mode 100644
index 0000000..b6fbf01
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-protobuf-c-Remove-the-rules-which-depend-on-the-nati.patch
@@ -0,0 +1,46 @@
+From 81bc5928cdc1b432656eb6590967306d8cf3ac9d Mon Sep 17 00:00:00 2001
+From: Jianchuan Wang <jianchuan.wang@windriver.com>
+Date: Tue, 4 Aug 2015 10:22:21 +0800
+Subject: [PATCH] protobuf-c: Remove the rules which depend on the native
+ command
+
+Those rules are not for cross-compile since the command protoc-c/cxx-generate-packed-data
+need be executed to generate some local files in the compiling processing.
+
+Signed-off-by: Jianchuan Wang <jianchuan.wang@windriver.com>
+---
+ Makefile.am | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/Makefile.am b/Makefile.am
+index 310aa09..0602e96 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -148,17 +148,18 @@ t_generated_code2_cxx_generate_packed_data_CXXFLAGS = \
+ t_generated_code2_cxx_generate_packed_data_LDADD = \
+ $(protobuf_LIBS)
+
+-t/test.pb-c.c t/test.pb-c.h: $(top_builddir)/protoc-c/protoc-c$(EXEEXT) $(top_srcdir)/t/test.proto
+- $(AM_V_GEN)$(top_builddir)/protoc-c/protoc-c$(EXEEXT) -I$(top_srcdir) --c_out=$(top_builddir) $(top_srcdir)/t/test.proto
++t/test.pb-c.c t/test.pb-c.h: $(top_srcdir)/t/test.proto
++ $(AM_V_GEN)protoc-c -I$(top_srcdir) --c_out=$(top_builddir) $(top_srcdir)/t/test.proto
+
+-t/test-full.pb-c.c t/test-full.pb-c.h: $(top_builddir)/protoc-c/protoc-c$(EXEEXT) $(top_srcdir)/t/test-full.proto
+- $(AM_V_GEN)$(top_builddir)/protoc-c/protoc-c$(EXEEXT) -I$(top_srcdir) --c_out=$(top_builddir) $(top_srcdir)/t/test-full.proto
++t/test-full.pb-c.c t/test-full.pb-c.h: $(top_srcdir)/t/test-full.proto
++ $(AM_V_GEN)protoc-c -I$(top_srcdir) --c_out=$(top_builddir) $(top_srcdir)/t/test-full.proto
+
+ t/test-full.pb.cc t/test-full.pb.h: @PROTOC@ $(top_srcdir)/t/test-full.proto
+ $(AM_V_GEN)@PROTOC@ -I$(top_srcdir) --cpp_out=$(top_builddir) $(top_srcdir)/t/test-full.proto
+
+-t/generated-code2/test-full-cxx-output.inc: t/generated-code2/cxx-generate-packed-data$(EXEEXT)
+- $(AM_V_GEN)$(top_builddir)/t/generated-code2/cxx-generate-packed-data$(EXEEXT) > $(top_builddir)/t/generated-code2/test-full-cxx-output.inc
++t/generated-code2/test-full-cxx-output.inc:
++ mkdir -p $(top_builddir)/t/generated-code2
++ $(AM_V_GEN)cxx-generate-packed-data > $(top_builddir)/t/generated-code2/test-full-cxx-output.inc
+
+ BUILT_SOURCES += \
+ t/test.pb-c.c t/test.pb-c.h \
+--
+1.9.1
+
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/0002-criu-Skip-documentation-install.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/0002-criu-Skip-documentation-install.patch
new file mode 100644
index 0000000..eaf8160
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/0002-criu-Skip-documentation-install.patch
@@ -0,0 +1,29 @@
+From e9c2a94b9eb37ad24672b10caa398bd18282b962 Mon Sep 17 00:00:00 2001
+From: Yang Shi <yang.shi@windriver.com>
+Date: Tue, 26 Aug 2014 14:44:51 -0700
+Subject: [PATCH 2/2] criu: Skip documentation install
+
+asciidoc is needed to generate CRIU documentation, so skip it in install.
+
+Signed-off-by: Yang Shi <yang.shi@windriver.com>
+Signed-off-by: Nam Ninh <nam.ninh@windriver.com>
+---
+ Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Makefile b/Makefile
+index 43252ec..e25edcc 100644
+--- a/Makefile
++++ b/Makefile
+@@ -265,7 +265,7 @@ install: $(PROGRAM) install-man
+ $(Q) install -m 644 scripts/logrotate.d/criu-service $(DESTDIR)$(LOGROTATEDIR)
+
+ install-man:
+- $(Q) $(MAKE) -C Documentation install
++# $(Q) $(MAKE) -C Documentation install
+
+ .PHONY: install install-man
+
+--
+2.0.2
+
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/Omit-google-apputils-dependency.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/Omit-google-apputils-dependency.patch
new file mode 100644
index 0000000..ef60fc0
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/Omit-google-apputils-dependency.patch
@@ -0,0 +1,25 @@
+From f8b7c90f6da90b67bdd7d5301894c5c28bd9d076 Mon Sep 17 00:00:00 2001
+From: Jianchuan Wang <jianchuan.wang@windriver.com>
+Date: Mon, 10 Aug 2015 11:23:31 +0800
+Subject: [PATCH] Omit google-apputils dependency
+
+Signed-off-by: Jianchuan Wang <jianchuan.wang@windriver.com>
+---
+ python/setup.py | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/python/setup.py b/python/setup.py
+index 2450a77..6f6bffb 100755
+--- a/python/setup.py
++++ b/python/setup.py
+@@ -189,7 +189,6 @@ if __name__ == '__main__':
+ 'google.protobuf.text_format'],
+ cmdclass = { 'clean': clean, 'build_py': build_py },
+ install_requires = ['setuptools'],
+- setup_requires = ['google-apputils'],
+ ext_modules = ext_module_list,
+ url = 'https://developers.google.com/protocol-buffers/',
+ maintainer = maintainer_email,
+--
+1.9.1
+
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/disable_tests.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/disable_tests.patch
new file mode 100644
index 0000000..dac8942
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/disable_tests.patch
@@ -0,0 +1,19 @@
+diff -Naur protobuf-c-0.15.old/src/Makefile.am protobuf-c-0.15/src/Makefile.am
+--- protobuf-c-0.15.old/src/Makefile.am 2012-11-28 14:59:57.845251943 +0100
++++ protobuf-c-0.15/src/Makefile.am 2012-11-28 15:00:23.549252632 +0100
+@@ -1,5 +1,5 @@
+ if BUILD_PROTOC_C
+-SUBDIRS = . test
++
+ bin_PROGRAMS = protoc-c
+ protoc_c_SOURCES = \
+ google/protobuf/compiler/c/c_service.cc \
+@@ -23,7 +23,7 @@
+ lib_LTLIBRARIES = libprotobuf-c.la
+ protobufcincludedir = $(includedir)/google/protobuf-c
+
+-EXTRA_DIST = CMakeLists.txt test/CMakeLists.txt
++EXTRA_DIST = CMakeLists.txt
+
+ libprotobuf_c_la_SOURCES = \
+ google/protobuf-c/protobuf-c-dispatch.c \
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/protobuf-allow-running-python-scripts-from-anywhere.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/protobuf-allow-running-python-scripts-from-anywhere.patch
new file mode 100644
index 0000000..13d4e84
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/protobuf-allow-running-python-scripts-from-anywhere.patch
@@ -0,0 +1,38 @@
+From 46e331263eb92e47510e88478b255f226d30245c Mon Sep 17 00:00:00 2001
+From: Keith Holman <Keith.Holman@windriver.com>
+Date: Mon, 18 Aug 2014 15:19:35 -0400
+Subject: [PATCH] protobuf: allow running python scripts from anywhere
+
+The Makefile to generate the examples with Google Protocol Buffers
+generates some scripts for python. However, these generated scripts
+only work if they are ran in the same directory as the source files.
+This fix generates scripts to execute from anywhere on the system.
+
+Signed-off-by: Keith Holman <Keith.Holman@windriver.com>
+---
+ examples/Makefile | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/examples/Makefile b/examples/Makefile
+index 8dc9083..a993d63 100644
+--- a/examples/Makefile
++++ b/examples/Makefile
+@@ -48,11 +48,13 @@ list_people_java: javac_middleman
+ add_person_python: add_person.py protoc_middleman
+ @echo "Writing shortcut script add_person_python..."
+ @echo '#! /bin/sh' > add_person_python
+- @echo './add_person.py "$$@"' >> add_person_python
++ @echo 'SCRIPT_DIR=$$(dirname $$0)' >> add_person_python
++ @echo '$$SCRIPT_DIR/add_person.py "$$@"' >> add_person_python
+ @chmod +x add_person_python
+
+ list_people_python: list_people.py protoc_middleman
+ @echo "Writing shortcut script list_people_python..."
+ @echo '#! /bin/sh' > list_people_python
+- @echo './list_people.py "$$@"' >> list_people_python
++ @echo 'SCRIPT_DIR=$$(dirname $$0)' >> list_people_python
++ @echo '$$SCRIPT_DIR/list_people.py "$$@"' >> list_people_python
+ @chmod +x list_people_python
+--
+1.9.3
+
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/run-ptest b/import-layers/meta-virtualization/recipes-containers/criu/files/run-ptest
new file mode 100755
index 0000000..a5a7b0f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/run-ptest
@@ -0,0 +1,32 @@
+#!/bin/bash
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+TEST_FILE="/tmp/test.data"
+
+RETVAL=0
+# Test every writing test application
+for write_exe_full_path in ${DIR}/add_person_*; do
+ if [ -x "${write_exe_full_path}" ]; then
+ write_exe=`basename ${write_exe_full_path}`
+ echo "Generating new test file using ${write_exe}..."
+ ${write_exe_full_path} "${TEST_FILE}"
+ RETVAL=$?
+
+ # Test every reading test application
+ for read_exe_full_path in ${DIR}/list_people_*; do
+ read_exe=`basename ${read_exe_full_path}`
+ echo "Test: Write with ${write_exe}; Read with ${read_exe}..."
+ if [ -x "${read_exe_full_path}" ]; then
+ ${read_exe_full_path} "${TEST_FILE}"
+ RETVAL=$?
+ fi
+ done
+
+ # Cleanup...
+ if [ -e "${TEST_FILE}" ]; then
+ rm "${TEST_FILE}"
+ fi
+ fi
+done
+
+exit $RETVAL
+
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/protobuf-c_1.1.1.bb b/import-layers/meta-virtualization/recipes-containers/criu/protobuf-c_1.1.1.bb
new file mode 100644
index 0000000..0d03ebe
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/protobuf-c_1.1.1.bb
@@ -0,0 +1,28 @@
+SUMMARY = "protobuf-c"
+DESCRIPTION = "This package provides a code generator and runtime libraries to use Protocol Buffers from pure C"
+HOMEPAGE = "http://code.google.com/p/protobuf-c/"
+SECTION = "console/tools"
+LICENSE = "Apache-2.0"
+
+LIC_FILES_CHKSUM = "file://protobuf-c/protobuf-c.c;endline=28;md5=0feb44cc63eacef97219b0174967492f"
+
+COMPATIBLE_HOST = "(x86_64|arm|aarch64).*-linux"
+
+DEPENDS = "protobuf protobuf-c-native"
+
+SRC_URI[md5sum] = "41d437677ea16f9d3611d98841c4af3b"
+SRC_URI[sha256sum] = "09c5bb187b7a8e86bc0ff860f7df86370be9e8661cdb99c1072dcdab0763562c"
+SRC_URI = "https://github.com/protobuf-c/protobuf-c/releases/download/v1.1.1/protobuf-c-1.1.1.tar.gz "
+SRC_URI_append_class-target ="file://0001-protobuf-c-Remove-the-rules-which-depend-on-the-nati.patch"
+
+inherit autotools pkgconfig
+
+BBCLASSEXTEND = "native nativesdk"
+
+do_configure_prepend_class-target() {
+ export PKG_CONFIG_PATH="${STAGING_LIBDIR_NATIVE}/pkgconfig:${PKG_CONFIG_PATH}"
+}
+
+do_install_append_class-native() {
+ install -m 755 ${B}/t/generated-code2/cxx-generate-packed-data ${D}/${bindir}
+}
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/protobuf-native_2.6.1.bb b/import-layers/meta-virtualization/recipes-containers/criu/protobuf-native_2.6.1.bb
new file mode 100644
index 0000000..e88c9e7
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/protobuf-native_2.6.1.bb
@@ -0,0 +1,21 @@
+SUMMARY = "protobuf"
+DESCRIPTION = "Protocol Buffers are a way of encoding structured data in \
+an efficient yet extensible format. Google uses Protocol Buffers for \
+almost all of its internal RPC protocols and file formats."
+HOMEPAGE = "http://code.google.com/p/protobuf/"
+SECTION = "console/tools"
+LICENSE = "BSD-3-Clause"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=af6809583bfde9a31595a58bb4a24514"
+
+PR = "r0"
+
+SRC_URI[md5sum] = "af05b2cca289f7b86eef2734a0cdc8b9"
+SRC_URI[sha256sum] = "2667b7cda4a6bc8a09e5463adf3b5984e08d94e72338277affa8594d8b6e5cd1"
+SRC_URI = "https://github.com/google/protobuf/archive/v2.6.1.tar.gz;downloadfilename=protobuf-2.6.1.tar.gz \
+ "
+
+EXTRA_OECONF += " --with-protoc=echo --disable-shared"
+
+inherit native autotools
+
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/protobuf_2.6.1.bb b/import-layers/meta-virtualization/recipes-containers/criu/protobuf_2.6.1.bb
new file mode 100644
index 0000000..1b7ab20
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/protobuf_2.6.1.bb
@@ -0,0 +1,97 @@
+SUMMARY = "protobuf"
+DESCRIPTION = "Protocol Buffers are a way of encoding structured data in \
+an efficient yet extensible format. Google uses Protocol Buffers for \
+almost all of its internal RPC protocols and file formats."
+HOMEPAGE = "http://code.google.com/p/protobuf/"
+SECTION = "console/tools"
+LICENSE = "BSD-3-Clause"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=af6809583bfde9a31595a58bb4a24514"
+
+PR = "r0"
+EXCLUDE_FROM_WORLD = "1"
+
+SRC_URI[md5sum] = "af05b2cca289f7b86eef2734a0cdc8b9"
+SRC_URI[sha256sum] = "2667b7cda4a6bc8a09e5463adf3b5984e08d94e72338277affa8594d8b6e5cd1"
+SRC_URI = "https://github.com/google/protobuf/archive/v2.6.1.tar.gz;downloadfilename=protobuf-2.6.1.tar.gz\
+ file://protobuf-allow-running-python-scripts-from-anywhere.patch \
+ file://Omit-google-apputils-dependency.patch \
+ file://run-ptest"
+
+COMPATIBLE_HOST = "(x86_64|arm|aarch64).*-linux"
+
+EXTRA_OECONF += " --with-protoc=${STAGING_BINDIR_NATIVE}/protoc"
+inherit autotools setuptools ptest
+
+DEPENDS += "protobuf-native"
+
+PYTHON_SRC_DIR="python"
+TEST_SRC_DIR="examples"
+LANG_SUPPORT="cpp python"
+
+do_compile() {
+ # Compile protoc compiler
+ base_do_compile
+}
+
+do_compile_ptest() {
+ # Modify makefile to use the cross-compiler
+ sed -e "s|c++|${CXX}|g" -i "${S}/${TEST_SRC_DIR}/Makefile"
+
+ mkdir -p "${B}/${TEST_SRC_DIR}"
+
+ # Add the location of the cross-compiled header and library files
+ # which haven't been installed yet.
+ cp "${B}/protobuf.pc" "${B}/${TEST_SRC_DIR}/protobuf.pc"
+ sed -e 's|libdir=|libdir=${PKG_CONFIG_SYSROOT_DIR}|' -i "${B}/${TEST_SRC_DIR}/protobuf.pc"
+ sed -e 's|Cflags:|Cflags: -I${S}/src|' -i "${B}/${TEST_SRC_DIR}/protobuf.pc"
+ sed -e 's|Libs:|Libs: -L${B}/src/.libs|' -i "${B}/${TEST_SRC_DIR}/protobuf.pc"
+ export PKG_CONFIG_PATH="${B}/${TEST_SRC_DIR}"
+
+ # Save the pkgcfg sysroot variable, and update it to nothing so
+ # that it doesn't append the sysroot to the beginning of paths.
+ # The header and library files aren't installed to the target
+ # system yet. So the absolute paths were specified above.
+ save_pkg_config_sysroot_dir=$PKG_CONFIG_SYSROOT_DIR
+ export PKG_CONFIG_SYSROOT_DIR=
+
+ # Compile the tests
+ for lang in ${LANG_SUPPORT}; do
+ oe_runmake -C "${S}/${TEST_SRC_DIR}" ${lang}
+ done
+
+ # Restore the pkgconfig sysroot variable
+ export PKG_CONFIG_SYSROOT_DIR=$save_pkg_config_sysroot_dir
+}
+
+do_install() {
+ local olddir=`pwd`
+
+ # Install protoc compiler
+ autotools_do_install
+
+ # Install header files
+ export PROTOC="${STAGING_BINDIR_NATIVE}/protoc"
+ cd "${S}/${PYTHON_SRC_DIR}"
+ distutils_do_install
+
+ cd "$olddir"
+}
+
+do_install_ptest() {
+ local olddir=`pwd`
+
+ cd "${S}/${TEST_SRC_DIR}"
+ install -d "${D}/${PTEST_PATH}"
+ for i in add_person* list_people*; do
+ if [ -x "$i" ]; then
+ install "$i" "${D}/${PTEST_PATH}"
+ fi
+ done
+ cp "${S}/${TEST_SRC_DIR}/addressbook_pb2.py" "${D}/${PTEST_PATH}"
+
+ cd "$olddir"
+}
+
+BBCLASSEXTEND = "nativesdk"
+
diff --git a/import-layers/meta-virtualization/recipes-containers/docker-registry/docker-registry_git.bb b/import-layers/meta-virtualization/recipes-containers/docker-registry/docker-registry_git.bb
new file mode 100644
index 0000000..0320440
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker-registry/docker-registry_git.bb
@@ -0,0 +1,93 @@
+HOMEPAGE = "https://github.com/docker/docker-registry"
+SUMMARY = "Registry server for Docker"
+DESCRIPTION = "\
+ This is the classic python docker-registry. \
+ . \
+ hosting/delivering of repositories and images \
+ "
+
+SRCREV = "fd8c0c114985547b69088e0f1526e58bfe2ff914"
+SRC_URI = "\
+ git://github.com/docker/docker-registry.git \
+ file://docker-registry.conf \
+ file://docker-registry.service \
+ file://config.yml \
+ file://change_sqlalchemy_rqt.patch \
+ "
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=35e8e5305c1b7b4a5761f9de5d44e5f4"
+
+S = "${WORKDIR}/git"
+
+PV = "0.9.1+git${SRCREV}"
+
+RDEPENDS_${PN} += "\
+ docker \
+ gunicorn (>= 19.1.1) \
+ python-pip \
+ python-distribute \
+ python-m2crypto (>= 0.22.3) \
+ python-pyyaml (>= 3.11) \
+ python-flask (>= 0.10.1) \
+ python-gevent (>= 1.0.1) \
+ python-requests \
+ python-sqlalchemy (>= 0.9.4) \
+ python-blinker (>= 1.3) \
+ python-backports-lzma (>= 0.0.3) \
+ python-flask-cors (>= 1.10.3) \
+ python-bugsnag (>= 2.0.2) \
+ python-docker-registry-core (>= 2.0.3) \
+ python-newrelic (>= 2.22.0.19) \
+ python-itsdangerous (>= 0.21) \
+ python-jinja2 (>= 2.4) \
+ python-werkzeug (>= 0.7) \
+ python-simplejson (>= 3.6.2) \
+ python-redis (>= 2.10.3) \
+ python-boto (>= 2.34.0) \
+ python-webob \
+ "
+# OFFICIAL REQ:
+# docker-registry-core>=2,<3
+# blinker==1.3
+# backports.lzma==0.0.3,!=0.0.4
+
+# Flask==0.10.1
+# gevent==1.0.1
+# gunicorn==19.1.1
+# PyYAML==3.11
+# requests==2.3.0
+# M2Crypto==0.22.3
+# sqlalchemy==0.9.4
+# setuptools==5.8
+#
+# [bugsnag]
+# bugsnag>=2.0,<2.1
+#
+# [cors]
+# Flask-cors>=1.8,<2.0
+#
+# [newrelic]
+# newrelic>=2.22,<2.23
+
+
+inherit setuptools systemd
+
+SYSTEMD_PACKAGES = "${@base_contains('DISTRO_FEATURES','systemd','${PN}','',d)}"
+SYSTEMD_SERVICE_${PN} = "${@base_contains('DISTRO_FEATURES','systemd','docker-registry.service','',d)}"
+
+do_install_append() {
+ mkdir -p ${D}/etc/default/
+ cp ${WORKDIR}/docker-registry.conf ${D}/etc/default/docker-registry
+
+ if ${@base_contains('DISTRO_FEATURES','systemd','true','false',d)}; then
+ install -d ${D}${systemd_unitdir}/system
+ install -m 644 ${WORKDIR}/docker-registry.service ${D}/${systemd_unitdir}/system
+ sed -i "s|#WORKDIR#|${PYTHON_SITEPACKAGES_DIR}/docker_registry|" ${D}/${systemd_unitdir}/system/docker-registry.service
+ fi
+ # based on config_mirror.yml - uses /var/docker-registry instead of /tmp for files
+ install ${WORKDIR}/config.yml ${D}/etc/docker-registry.yml
+ mkdir -p ${D}/var/docker-registry
+}
+
+FILES_${PN} += "/etc/default /var/docker-registry /etc/ /etc/default/volatiles"
diff --git a/import-layers/meta-virtualization/recipes-containers/docker-registry/files/change_sqlalchemy_rqt.patch b/import-layers/meta-virtualization/recipes-containers/docker-registry/files/change_sqlalchemy_rqt.patch
new file mode 100644
index 0000000..75cbd6d
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker-registry/files/change_sqlalchemy_rqt.patch
@@ -0,0 +1,13 @@
+---
+ requirements/main.txt | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/requirements/main.txt
++++ b/requirements/main.txt
+@@ -5,5 +5,5 @@
+ PyYAML==3.11
+ requests==2.3.0
+ M2Crypto==0.22.3
+-sqlalchemy==0.9.4
++sqlalchemy>=0.9.4
+ setuptools==5.8
diff --git a/import-layers/meta-virtualization/recipes-containers/docker-registry/files/config.yml b/import-layers/meta-virtualization/recipes-containers/docker-registry/files/config.yml
new file mode 100644
index 0000000..8b33766
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker-registry/files/config.yml
@@ -0,0 +1,228 @@
+# All other flavors inherit the `common' config snippet
+common: &common
+ issue: '"docker-registry server"'
+ # Default log level is info
+ loglevel: _env:LOGLEVEL:info
+ # Enable debugging (additional informations in the output of the _ping endpoint)
+ debug: _env:DEBUG:false
+ # By default, the registry acts standalone (eg: doesn't query the index)
+ standalone: _env:STANDALONE:true
+ # The default endpoint to use (if NOT standalone) is index.docker.io
+ index_endpoint: _env:INDEX_ENDPOINT:https://index.docker.io
+ # Storage redirect is disabled
+ storage_redirect: _env:STORAGE_REDIRECT
+ # Token auth is enabled (if NOT standalone)
+ disable_token_auth: _env:DISABLE_TOKEN_AUTH
+ # No priv key
+ privileged_key: _env:PRIVILEGED_KEY
+ # No search backend
+ search_backend: _env:SEARCH_BACKEND
+ # SQLite search backend
+ sqlalchemy_index_database: _env:SQLALCHEMY_INDEX_DATABASE:sqlite:////var/docker-registry/docker-registry.db
+
+ # Mirroring is not enabled
+ mirroring:
+ source: _env:MIRROR_SOURCE # https://registry-1.docker.io
+ source_index: _env:MIRROR_SOURCE_INDEX # https://index.docker.io
+ tags_cache_ttl: _env:MIRROR_TAGS_CACHE_TTL:172800 # seconds
+
+ cache:
+ host: _env:CACHE_REDIS_HOST
+ port: _env:CACHE_REDIS_PORT
+ db: _env:CACHE_REDIS_DB:0
+ password: _env:CACHE_REDIS_PASSWORD
+
+ # Enabling LRU cache for small files
+ # This speeds up read/write on small files
+ # when using a remote storage backend (like S3).
+ cache_lru:
+ host: _env:CACHE_LRU_REDIS_HOST
+ port: _env:CACHE_LRU_REDIS_PORT
+ db: _env:CACHE_LRU_REDIS_DB:0
+ password: _env:CACHE_LRU_REDIS_PASSWORD
+
+ # Enabling these options makes the Registry send an email on each code Exception
+ email_exceptions:
+ smtp_host: _env:SMTP_HOST
+ smtp_port: _env:SMTP_PORT:25
+ smtp_login: _env:SMTP_LOGIN
+ smtp_password: _env:SMTP_PASSWORD
+ smtp_secure: _env:SMTP_SECURE:false
+ from_addr: _env:SMTP_FROM_ADDR:docker-registry@localdomain.local
+ to_addr: _env:SMTP_TO_ADDR:noise+dockerregistry@localdomain.local
+
+ # Enable bugsnag (set the API key)
+ bugsnag: _env:BUGSNAG
+
+ # CORS support is not enabled by default
+ cors:
+ origins: _env:CORS_ORIGINS
+ methods: _env:CORS_METHODS
+ headers: _env:CORS_HEADERS:[Content-Type]
+ expose_headers: _env:CORS_EXPOSE_HEADERS
+ supports_credentials: _env:CORS_SUPPORTS_CREDENTIALS
+ max_age: _env:CORS_MAX_AGE
+ send_wildcard: _env:CORS_SEND_WILDCARD
+ always_send: _env:CORS_ALWAYS_SEND
+ automatic_options: _env:CORS_AUTOMATIC_OPTIONS
+ vary_header: _env:CORS_VARY_HEADER
+ resources: _env:CORS_RESOURCES
+
+local: &local
+ <<: *common
+ storage: local
+ storage_path: _env:STORAGE_PATH:/var/docker-registry
+
+
+s3: &s3
+ <<: *common
+ storage: s3
+ s3_region: _env:AWS_REGION
+ s3_bucket: _env:AWS_BUCKET
+ boto_bucket: _env:AWS_BUCKET
+ storage_path: _env:STORAGE_PATH:/registry
+ s3_encrypt: _env:AWS_ENCRYPT:true
+ s3_secure: _env:AWS_SECURE:true
+ s3_access_key: _env:AWS_KEY
+ s3_secret_key: _env:AWS_SECRET
+ s3_use_sigv4: _env:AWS_USE_SIGV4
+ boto_host: _env:AWS_HOST
+ boto_port: _env:AWS_PORT
+ boto_calling_format: _env:AWS_CALLING_FORMAT
+
+cloudfronts3: &cloudfronts3
+ <<: *s3
+ cloudfront:
+ base: _env:CF_BASE_URL
+ keyid: _env:CF_KEYID
+ keysecret: _env:CF_KEYSECRET
+
+azureblob: &azureblob
+ <<: *common
+ storage: azureblob
+ azure_storage_account_name: _env:AZURE_STORAGE_ACCOUNT_NAME
+ azure_storage_account_key: _env:AZURE_STORAGE_ACCOUNT_KEY
+ azure_storage_container: _env:AZURE_STORAGE_CONTAINER:registry
+ azure_use_https: _env:AZURE_USE_HTTPS:true
+
+# Ceph Object Gateway Configuration
+# See http://ceph.com/docs/master/radosgw/ for details on installing this service.
+ceph-s3: &ceph-s3
+ <<: *common
+ storage: s3
+ s3_region: ~
+ s3_bucket: _env:AWS_BUCKET
+ s3_encrypt: _env:AWS_ENCRYPT:false
+ s3_secure: _env:AWS_SECURE:false
+ storage_path: _env:STORAGE_PATH:/registry
+ s3_access_key: _env:AWS_KEY
+ s3_secret_key: _env:AWS_SECRET
+ boto_bucket: _env:AWS_BUCKET
+ boto_host: _env:AWS_HOST
+ boto_port: _env:AWS_PORT
+ boto_debug: _env:AWS_DEBUG:0
+ boto_calling_format: _env:AWS_CALLING_FORMAT
+
+# Google Cloud Storage Configuration
+# See:
+# https://developers.google.com/storage/docs/reference/v1/getting-startedv1#keys
+# for details on access and secret keys.
+gcs:
+ <<: *common
+ storage: gcs
+ boto_bucket: _env:GCS_BUCKET
+ storage_path: _env:STORAGE_PATH:/registry
+ gs_secure: _env:GCS_SECURE:true
+ gs_access_key: _env:GCS_KEY
+ gs_secret_key: _env:GCS_SECRET
+ # OAuth 2.0 authentication with the storage.
+ # oauth2 can be set to true or false. If it is set to true, gs_access_key,
+ # gs_secret_key and gs_secure are not needed.
+ # Client ID and Client Secret must be set into OAUTH2_CLIENT_ID and
+ # OAUTH2_CLIENT_SECRET environment variables.
+ # See: https://developers.google.com/accounts/docs/OAuth2.
+ oauth2: _env:GCS_OAUTH2:false
+
+# This flavor is for storing images in Openstack Swift
+swift: &swift
+ <<: *common
+ storage: swift
+ storage_path: _env:STORAGE_PATH:/registry
+ # keystone authorization
+ swift_authurl: _env:OS_AUTH_URL
+ swift_container: _env:OS_CONTAINER
+ swift_user: _env:OS_USERNAME
+ swift_password: _env:OS_PASSWORD
+ swift_tenant_name: _env:OS_TENANT_NAME
+ swift_region_name: _env:OS_REGION_NAME
+
+# This flavor stores the images in Glance (to integrate with openstack)
+# See also: https://github.com/docker/openstack-docker
+glance: &glance
+ <<: *common
+ storage: glance
+ storage_alternate: _env:GLANCE_STORAGE_ALTERNATE:file
+ storage_path: _env:STORAGE_PATH:/var/docker-registry
+
+openstack:
+ <<: *glance
+
+# This flavor stores the images in Glance (to integrate with openstack)
+# and tags in Swift.
+glance-swift: &glance-swift
+ <<: *swift
+ storage: glance
+ storage_alternate: swift
+
+openstack-swift:
+ <<: *glance-swift
+
+elliptics:
+ <<: *common
+ storage: elliptics
+ elliptics_nodes: _env:ELLIPTICS_NODES
+ elliptics_wait_timeout: _env:ELLIPTICS_WAIT_TIMEOUT:60
+ elliptics_check_timeout: _env:ELLIPTICS_CHECK_TIMEOUT:60
+ elliptics_io_thread_num: _env:ELLIPTICS_IO_THREAD_NUM:2
+ elliptics_net_thread_num: _env:ELLIPTICS_NET_THREAD_NUM:2
+ elliptics_nonblocking_io_thread_num: _env:ELLIPTICS_NONBLOCKING_IO_THREAD_NUM:2
+ elliptics_groups: _env:ELLIPTICS_GROUPS
+ elliptics_verbosity: _env:ELLIPTICS_VERBOSITY:4
+ elliptics_logfile: _env:ELLIPTICS_LOGFILE:/dev/stderr
+ elliptics_addr_family: _env:ELLIPTICS_ADDR_FAMILY:2
+
+# This flavor stores the images in Aliyun OSS
+# See:
+# https://i.aliyun.com/access_key/
+# for details on access and secret keys.
+oss: &oss
+ <<: *common
+ storage: oss
+ storage_path: _env:STORAGE_PATH:/registry/
+ oss_host: _env:OSS_HOST
+ oss_bucket: _env:OSS_BUCKET
+ oss_accessid: _env:OSS_KEY
+ oss_accesskey: _env:OSS_SECRET
+
+
+
+# This is the default configuration when no flavor is specified
+dev: &dev
+ <<: *local
+ loglevel: _env:LOGLEVEL:debug
+ debug: _env:DEBUG:true
+ search_backend: _env:SEARCH_BACKEND:sqlalchemy
+
+# This flavor is used by unit tests
+test:
+ <<: *dev
+ index_endpoint: https://registry-stage.hub.docker.com
+ standalone: true
+ storage_path: _env:STORAGE_PATH:./tmp/test
+
+# To specify another flavor, set the environment variable SETTINGS_FLAVOR
+# $ export SETTINGS_FLAVOR=prod
+prod:
+ <<: *s3
+ storage_path: _env:STORAGE_PATH:/prod
+
diff --git a/import-layers/meta-virtualization/recipes-containers/docker-registry/files/docker-registry.conf b/import-layers/meta-virtualization/recipes-containers/docker-registry/files/docker-registry.conf
new file mode 100644
index 0000000..940ece1
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker-registry/files/docker-registry.conf
@@ -0,0 +1,19 @@
+# The Docker registry configuration file
+DOCKER_REGISTRY_CONFIG=/etc/docker-registry.yml
+
+# The configuration to use from DOCKER_REGISTRY_CONFIG file
+SETTINGS_FLAVOR=local
+
+# Address to bind the registry to
+REGISTRY_ADDRESS=0.0.0.0
+
+# Port to bind the registry to
+REGISTRY_PORT=5000
+
+# Number of workers to handle the connections
+GUNICORN_WORKERS=4
+
+STANDALONE=true
+
+MIRROR_SOURCE=https://registry-1.docker.io
+MIRROR_SOURCE_INDEX=https://index.docker.io
diff --git a/import-layers/meta-virtualization/recipes-containers/docker-registry/files/docker-registry.service b/import-layers/meta-virtualization/recipes-containers/docker-registry/files/docker-registry.service
new file mode 100644
index 0000000..4f4cfe7
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker-registry/files/docker-registry.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=Registry server for Docker
+After=docker.service
+Requires=docker.service
+
+[Service]
+Type=simple
+Environment=DOCKER_REGISTRY_CONFIG=/etc/docker-registry.yml
+EnvironmentFile=-/etc/default/docker-registry
+WorkingDirectory=#WORKDIR#
+ExecStart=/usr/bin/gunicorn --access-logfile /var/log/docker-registry-access.log --error-logfile /var/log/docker-registry-error.log --debug --max-requests 100 --graceful-timeout 3600 -t 3600 -k gevent -b ${REGISTRY_ADDRESS}:${REGISTRY_PORT} -w ${GUNICORN_WORKERS} docker_registry.wsgi:application
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/docker_git.bb b/import-layers/meta-virtualization/recipes-containers/docker/docker_git.bb
new file mode 100644
index 0000000..42a336e
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker/docker_git.bb
@@ -0,0 +1,147 @@
+HOMEPAGE = "http://www.docker.com"
+SUMMARY = "Linux container runtime"
+DESCRIPTION = "Linux container runtime \
+ Docker complements kernel namespacing with a high-level API which \
+ operates at the process level. It runs unix processes with strong \
+ guarantees of isolation and repeatability across servers. \
+ . \
+ Docker is a great building block for automating distributed systems: \
+ large-scale web deployments, database clusters, continuous deployment \
+ systems, private PaaS, service-oriented architectures, etc. \
+ . \
+ This package contains the daemon and client. Using docker.io on non-amd64 \
+ hosts is not supported at this time. Please be careful when using it \
+ on anything besides amd64. \
+ . \
+ Also, note that kernel version 3.8 or above is required for proper \
+ operation of the daemon process, and that any lower versions may have \
+ subtle and/or glaring issues. \
+ "
+
+SRCREV = "76d6bc9a9f1690e16f3721ba165364688b626de2"
+SRC_URI = "\
+ git://github.com/docker/docker.git;nobranch=1 \
+ file://docker.service \
+ file://docker.init \
+ file://hi.Dockerfile \
+ file://disable_sha1sum_startup.patch \
+ file://Bump-bolt-to-v1.1.0.patch \
+ "
+
+# Apache-2.0 for docker
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=cc2221abf0b96ea39dd68141b70f7937"
+
+S = "${WORKDIR}/git"
+
+DOCKER_VERSION = "1.9.0"
+PV = "${DOCKER_VERSION}+git${SRCREV}"
+
+DEPENDS = "go-cross \
+ go-cli \
+ go-pty \
+ go-context \
+ go-mux \
+ go-patricia \
+ go-libtrust \
+ go-logrus \
+ go-fsnotify \
+ go-dbus \
+ go-capability \
+ go-systemd \
+ btrfs-tools \
+ sqlite3 \
+ go-distribution-digest \
+ "
+
+DEPENDS_append_class-target = "lvm2"
+RDEPENDS_${PN} = "curl aufs-util git cgroup-lite util-linux iptables"
+RRECOMMENDS_${PN} = "lxc docker-registry rt-tests"
+RRECOMMENDS_${PN} += " kernel-module-dm-thin-pool kernel-module-nf-nat"
+DOCKER_PKG="github.com/docker/docker"
+
+do_configure[noexec] = "1"
+
+do_compile() {
+ export GOARCH="${TARGET_ARCH}"
+ # supported amd64, 386, arm arm64
+ if [ "${TARGET_ARCH}" = "x86_64" ]; then
+ export GOARCH="amd64"
+ fi
+ if [ "${TARGET_ARCH}" = "aarch64" ]; then
+ export GOARCH="arm64"
+ fi
+
+ # Set GOPATH. See 'PACKAGERS.md'. Don't rely on
+ # docker to download its dependencies but rather
+ # use dependencies packaged independently.
+ cd ${S}
+ rm -rf .gopath
+ mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")"
+ ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}"
+ export GOPATH="${S}/.gopath:${S}/vendor:${STAGING_DIR_TARGET}/${prefix}/local/go"
+ cd -
+
+ # Pass the needed cflags/ldflags so that cgo
+ # can find the needed headers files and libraries
+ export CGO_ENABLED="1"
+ export CGO_CFLAGS="${BUILDSDK_CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ export CGO_LDFLAGS="${BUILDSDK_LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+ # in order to exclude devicemapper and btrfs - https://github.com/docker/docker/issues/14056
+ export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs exclude_graphdriver_devicemapper'
+
+ # this is the unsupported built structure
+ # that doesn't rely on an existing docker
+ # to build this:
+ DOCKER_GITCOMMIT="${SRCREV}" \
+ ./hack/make.sh dynbinary
+}
+
+inherit systemd update-rc.d
+
+SYSTEMD_PACKAGES = "${@base_contains('DISTRO_FEATURES','systemd','${PN}','',d)}"
+SYSTEMD_SERVICE_${PN} = "${@base_contains('DISTRO_FEATURES','systemd','docker.service','',d)}"
+
+INITSCRIPT_PACKAGES += "${@base_contains('DISTRO_FEATURES','sysvinit','${PN}','',d)}"
+INITSCRIPT_NAME_${PN} = "${@base_contains('DISTRO_FEATURES','sysvinit','docker.init','',d)}"
+INITSCRIPT_PARAMS_${PN} = "${OS_DEFAULT_INITSCRIPT_PARAMS}"
+
+do_install() {
+ mkdir -p ${D}/${bindir}
+ cp ${S}/bundles/${DOCKER_VERSION}/dynbinary/docker-${DOCKER_VERSION} \
+ ${D}/${bindir}/docker
+ cp ${S}/bundles/${DOCKER_VERSION}/dynbinary/dockerinit-${DOCKER_VERSION} \
+ ${D}/${bindir}/dockerinit
+
+ if ${@base_contains('DISTRO_FEATURES','systemd','true','false',d)}; then
+ install -d ${D}${systemd_unitdir}/system
+ install -m 644 ${S}/contrib/init/systemd/docker.* ${D}/${systemd_unitdir}/system
+ # replaces one copied from above with one that uses the local registry for a mirror
+ install -m 644 ${WORKDIR}/docker.service ${D}/${systemd_unitdir}/system
+ else
+ install -d ${D}${sysconfdir}/init.d
+ install -m 0755 ${WORKDIR}/docker.init ${D}${sysconfdir}/init.d/docker.init
+ fi
+
+ mkdir -p ${D}/usr/share/docker/
+ cp ${WORKDIR}/hi.Dockerfile ${D}/usr/share/docker/
+}
+
+inherit useradd
+USERADD_PACKAGES = "${PN}"
+GROUPADD_PARAM_${PN} = "-r docker"
+
+FILES_${PN} += "/lib/systemd/system/*"
+
+# DO NOT STRIP docker and dockerinit!!!
+#
+# Reason:
+# The "docker" package contains two binaries: "docker" and "dockerinit",
+# which are both written in Go. The "dockerinit" package is built first,
+# then its checksum is given to the build process compiling the "docker"
+# binary. Hence the checksum of the unstripped "dockerinit" binary is hard
+# coded into the "docker" binary. At runtime the "docker" binary invokes
+# the "dockerinit" binary, but before doing that it ensures the checksum
+# of "dockerinit" matches with the hard coded value.
+#
+INHIBIT_PACKAGE_STRIP = "1"
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/files/Bump-bolt-to-v1.1.0.patch b/import-layers/meta-virtualization/recipes-containers/docker/files/Bump-bolt-to-v1.1.0.patch
new file mode 100644
index 0000000..ca4ad81
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker/files/Bump-bolt-to-v1.1.0.patch
@@ -0,0 +1,1828 @@
+From a41917c2c88bd7f694d141ac67f4a194aaa16fa1 Mon Sep 17 00:00:00 2001
+From: Qiang Huang <h.huangqiang@huawei.com>
+Date: Wed, 28 Oct 2015 08:49:45 +0800
+Subject: [PATCH] Bump bolt to v1.1.0
+
+It adds ARM64, ppc64le, s390x, solaris support, and a bunch of
+bugfixs.
+
+Signed-off-by: Qiang Huang <h.huangqiang@huawei.com>
+---
+ hack/vendor.sh | 2 +-
+ vendor/src/github.com/boltdb/bolt/.gitignore | 1 +
+ vendor/src/github.com/boltdb/bolt/README.md | 250 +++++++++++++++++++--
+ vendor/src/github.com/boltdb/bolt/batch.go | 138 ++++++++++++
+ vendor/src/github.com/boltdb/bolt/bolt_386.go | 5 +-
+ vendor/src/github.com/boltdb/bolt/bolt_amd64.go | 3 +
+ vendor/src/github.com/boltdb/bolt/bolt_arm.go | 5 +-
+ vendor/src/github.com/boltdb/bolt/bolt_arm64.go | 9 +
+ vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go | 9 +
+ vendor/src/github.com/boltdb/bolt/bolt_s390x.go | 9 +
+ vendor/src/github.com/boltdb/bolt/bolt_unix.go | 37 ++-
+ .../github.com/boltdb/bolt/bolt_unix_solaris.go | 101 +++++++++
+ vendor/src/github.com/boltdb/bolt/bolt_windows.go | 10 +-
+ vendor/src/github.com/boltdb/bolt/bucket.go | 29 ++-
+ vendor/src/github.com/boltdb/bolt/cursor.go | 12 +-
+ vendor/src/github.com/boltdb/bolt/db.go | 195 ++++++++++++----
+ vendor/src/github.com/boltdb/bolt/errors.go | 4 +
+ vendor/src/github.com/boltdb/bolt/freelist.go | 28 ++-
+ vendor/src/github.com/boltdb/bolt/node.go | 36 ++-
+ vendor/src/github.com/boltdb/bolt/page.go | 45 +++-
+ vendor/src/github.com/boltdb/bolt/tx.go | 80 +++++--
+ 21 files changed, 886 insertions(+), 122 deletions(-)
+ create mode 100644 vendor/src/github.com/boltdb/bolt/batch.go
+ create mode 100644 vendor/src/github.com/boltdb/bolt/bolt_arm64.go
+ create mode 100644 vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go
+ create mode 100644 vendor/src/github.com/boltdb/bolt/bolt_s390x.go
+ create mode 100644 vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go
+
+diff --git a/hack/vendor.sh b/hack/vendor.sh
+index d872d4a..c28e677 100755
+--- a/hack/vendor.sh
++++ b/hack/vendor.sh
+@@ -36,7 +36,7 @@ clone git github.com/coreos/etcd v2.2.0
+ fix_rewritten_imports github.com/coreos/etcd
+ clone git github.com/ugorji/go 5abd4e96a45c386928ed2ca2a7ef63e2533e18ec
+ clone git github.com/hashicorp/consul v0.5.2
+-clone git github.com/boltdb/bolt v1.0
++clone git github.com/boltdb/bolt v1.1.0
+
+ # get graph and distribution packages
+ clone git github.com/docker/distribution 20c4b7a1805a52753dfd593ee1cc35558722a0ce # docker/1.9 branch
+diff --git a/vendor/src/github.com/boltdb/bolt/.gitignore b/vendor/src/github.com/boltdb/bolt/.gitignore
+index b2bb382..c7bd2b7 100644
+--- a/vendor/src/github.com/boltdb/bolt/.gitignore
++++ b/vendor/src/github.com/boltdb/bolt/.gitignore
+@@ -1,3 +1,4 @@
+ *.prof
+ *.test
++*.swp
+ /bin/
+diff --git a/vendor/src/github.com/boltdb/bolt/README.md b/vendor/src/github.com/boltdb/bolt/README.md
+index 727e977..0a33ebc 100644
+--- a/vendor/src/github.com/boltdb/bolt/README.md
++++ b/vendor/src/github.com/boltdb/bolt/README.md
+@@ -16,7 +16,7 @@ and setting values. That's it.
+
+ ## Project Status
+
+-Bolt is stable and the API is fixed. Full unit test coverage and randomized
++Bolt is stable and the API is fixed. Full unit test coverage and randomized
+ black box testing are used to ensure database consistency and thread safety.
+ Bolt is currently in high-load production environments serving databases as
+ large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
+@@ -87,6 +87,11 @@ are not thread safe. To work with data in multiple goroutines you must start
+ a transaction for each one or use locking to ensure only one goroutine accesses
+ a transaction at a time. Creating transaction from the `DB` is thread safe.
+
++Read-only transactions and read-write transactions should not depend on one
++another and generally shouldn't be opened simultaneously in the same goroutine.
++This can cause a deadlock as the read-write transaction needs to periodically
++re-map the data file but it cannot do so while a read-only transaction is open.
++
+
+ #### Read-write transactions
+
+@@ -120,12 +125,88 @@ err := db.View(func(tx *bolt.Tx) error {
+ })
+ ```
+
+-You also get a consistent view of the database within this closure, however,
++You also get a consistent view of the database within this closure, however,
+ no mutating operations are allowed within a read-only transaction. You can only
+ retrieve buckets, retrieve values, and copy the database within a read-only
+ transaction.
+
+
++#### Batch read-write transactions
++
++Each `DB.Update()` waits for disk to commit the writes. This overhead
++can be minimized by combining multiple updates with the `DB.Batch()`
++function:
++
++```go
++err := db.Batch(func(tx *bolt.Tx) error {
++ ...
++ return nil
++})
++```
++
++Concurrent Batch calls are opportunistically combined into larger
++transactions. Batch is only useful when there are multiple goroutines
++calling it.
++
++The trade-off is that `Batch` can call the given
++function multiple times, if parts of the transaction fail. The
++function must be idempotent and side effects must take effect only
++after a successful return from `DB.Batch()`.
++
++For example: don't display messages from inside the function, instead
++set variables in the enclosing scope:
++
++```go
++var id uint64
++err := db.Batch(func(tx *bolt.Tx) error {
++ // Find last key in bucket, decode as bigendian uint64, increment
++ // by one, encode back to []byte, and add new key.
++ ...
++ id = newValue
++ return nil
++})
++if err != nil {
++ return ...
++}
++fmt.Println("Allocated ID %d", id)
++```
++
++
++#### Managing transactions manually
++
++The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
++function. These helper functions will start the transaction, execute a function,
++and then safely close your transaction if an error is returned. This is the
++recommended way to use Bolt transactions.
++
++However, sometimes you may want to manually start and end your transactions.
++You can use the `Tx.Begin()` function directly but _please_ be sure to close the
++transaction.
++
++```go
++// Start a writable transaction.
++tx, err := db.Begin(true)
++if err != nil {
++ return err
++}
++defer tx.Rollback()
++
++// Use the transaction...
++_, err := tx.CreateBucket([]byte("MyBucket"))
++if err != nil {
++ return err
++}
++
++// Commit the transaction and check for error.
++if err := tx.Commit(); err != nil {
++ return err
++}
++```
++
++The first argument to `DB.Begin()` is a boolean stating if the transaction
++should be writable.
++
++
+ ### Using buckets
+
+ Buckets are collections of key/value pairs within the database. All keys in a
+@@ -175,13 +256,61 @@ db.View(func(tx *bolt.Tx) error {
+ ```
+
+ The `Get()` function does not return an error because its operation is
+-guarenteed to work (unless there is some kind of system failure). If the key
++guaranteed to work (unless there is some kind of system failure). If the key
+ exists then it will return its byte slice value. If it doesn't exist then it
+ will return `nil`. It's important to note that you can have a zero-length value
+ set to a key which is different than the key not existing.
+
+ Use the `Bucket.Delete()` function to delete a key from the bucket.
+
++Please note that values returned from `Get()` are only valid while the
++transaction is open. If you need to use a value outside of the transaction
++then you must use `copy()` to copy it to another byte slice.
++
++
++### Autoincrementing integer for the bucket
++By using the NextSequence() function, you can let Bolt determine a sequence
++which can be used as the unique identifier for your key/value pairs. See the
++example below.
++
++```go
++// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
++func (s *Store) CreateUser(u *User) error {
++ return s.db.Update(func(tx *bolt.Tx) error {
++ // Retrieve the users bucket.
++ // This should be created when the DB is first opened.
++ b := tx.Bucket([]byte("users"))
++
++ // Generate ID for the user.
++ // This returns an error only if the Tx is closed or not writeable.
++ // That can't happen in an Update() call so I ignore the error check.
++ id, _ = b.NextSequence()
++ u.ID = int(id)
++
++ // Marshal user data into bytes.
++ buf, err := json.Marshal(u)
++ if err != nil {
++ return err
++ }
++
++ // Persist bytes to users bucket.
++ return b.Put(itob(u.ID), buf)
++ })
++}
++
++// itob returns an 8-byte big endian representation of v.
++func itob(v int) []byte {
++ b := make([]byte, 8)
++ binary.BigEndian.PutUint64(b, uint64(v))
++ return b
++}
++
++type User struct {
++ ID int
++ ...
++}
++
++```
+
+ ### Iterating over keys
+
+@@ -254,7 +383,7 @@ db.View(func(tx *bolt.Tx) error {
+ max := []byte("2000-01-01T00:00:00Z")
+
+ // Iterate over the 90's.
+- for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) != -1; k, v = c.Next() {
++ for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
+ fmt.Printf("%s: %s\n", k, v)
+ }
+
+@@ -294,7 +423,7 @@ func (*Bucket) DeleteBucket(key []byte) error
+
+ ### Database backups
+
+-Bolt is a single file so it's easy to backup. You can use the `Tx.Copy()`
++Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
+ function to write a consistent view of the database to a writer. If you call
+ this from a read-only transaction, it will perform a hot backup and not block
+ your other database reads and writes. It will also use `O_DIRECT` when available
+@@ -305,11 +434,12 @@ do database backups:
+
+ ```go
+ func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
+- err := db.View(func(tx bolt.Tx) error {
++ err := db.View(func(tx *bolt.Tx) error {
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
+ w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
+- return tx.Copy(w)
++ _, err := tx.WriteTo(w)
++ return err
+ })
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+@@ -351,14 +481,13 @@ go func() {
+ // Grab the current stats and diff them.
+ stats := db.Stats()
+ diff := stats.Sub(&prev)
+-
++
+ // Encode stats to JSON and print to STDERR.
+ json.NewEncoder(os.Stderr).Encode(diff)
+
+ // Save stats for the next loop.
+ prev = stats
+ }
+-}
+ }()
+ ```
+
+@@ -366,25 +495,83 @@ It's also useful to pipe these stats to a service such as statsd for monitoring
+ or to provide an HTTP endpoint that will perform a fixed-length sample.
+
+
++### Read-Only Mode
++
++Sometimes it is useful to create a shared, read-only Bolt database. To this,
++set the `Options.ReadOnly` flag when opening your database. Read-only mode
++uses a shared lock to allow multiple processes to read from the database but
++it will block any processes from opening the database in read-write mode.
++
++```go
++db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
++if err != nil {
++ log.Fatal(err)
++}
++```
++
++
+ ## Resources
+
+ For more information on getting started with Bolt, check out the following articles:
+
+ * [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
++* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
++
++
++## Comparison with other databases
++
++### Postgres, MySQL, & other relational databases
++
++Relational databases structure data into rows and are only accessible through
++the use of SQL. This approach provides flexibility in how you store and query
++your data but also incurs overhead in parsing and planning SQL statements. Bolt
++accesses all data by a byte slice key. This makes Bolt fast to read and write
++data by key but provides no built-in support for joining values together.
++
++Most relational databases (with the exception of SQLite) are standalone servers
++that run separately from your application. This gives your systems
++flexibility to connect multiple application servers to a single database
++server but also adds overhead in serializing and transporting data over the
++network. Bolt runs as a library included in your application so all data access
++has to go through your application's process. This brings data closer to your
++application but limits multi-process access to the data.
++
++
++### LevelDB, RocksDB
+
++LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
++they are libraries bundled into the application, however, their underlying
++structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
++random writes by using a write ahead log and multi-tiered, sorted files called
++SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
++have trade offs.
+
++If you require a high random write throughput (>10,000 w/sec) or you need to use
++spinning disks then LevelDB could be a good choice. If your application is
++read-heavy or does a lot of range scans then Bolt could be a good choice.
+
+-## Comparing Bolt to LMDB
++One other important consideration is that LevelDB does not have transactions.
++It supports batch writing of key/values pairs and it supports read snapshots
++but it will not give you the ability to do a compare-and-swap operation safely.
++Bolt supports fully serializable ACID transactions.
++
++
++### LMDB
+
+ Bolt was originally a port of LMDB so it is architecturally similar. Both use
+-a B+tree, have ACID semanetics with fully serializable transactions, and support
++a B+tree, have ACID semantics with fully serializable transactions, and support
+ lock-free MVCC using a single writer and multiple readers.
+
+ The two projects have somewhat diverged. LMDB heavily focuses on raw performance
+ while Bolt has focused on simplicity and ease of use. For example, LMDB allows
+-several unsafe actions such as direct writes and append writes for the sake of
+-performance. Bolt opts to disallow actions which can leave the database in a
+-corrupted state. The only exception to this in Bolt is `DB.NoSync`.
++several unsafe actions such as direct writes for the sake of performance. Bolt
++opts to disallow actions which can leave the database in a corrupted state. The
++only exception to this in Bolt is `DB.NoSync`.
++
++There are also a few differences in API. LMDB requires a maximum mmap size when
++opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
++automatically. LMDB overloads the getter and setter functions with multiple
++flags whereas Bolt splits these specialized cases into their own functions.
+
+
+ ## Caveats & Limitations
+@@ -425,14 +612,33 @@ Here are a few things to note when evaluating and using Bolt:
+ can in memory and will release memory as needed to other processes. This means
+ that Bolt can show very high memory usage when working with large databases.
+ However, this is expected and the OS will release memory as needed. Bolt can
+- handle databases much larger than the available physical RAM.
++ handle databases much larger than the available physical RAM, provided its
++ memory-map fits in the process virtual address space. It may be problematic
++ on 32-bits systems.
++
++* The data structures in the Bolt database are memory mapped so the data file
++ will be endian specific. This means that you cannot copy a Bolt file from a
++ little endian machine to a big endian machine and have it work. For most
++ users this is not a concern since most modern CPUs are little endian.
++
++* Because of the way pages are laid out on disk, Bolt cannot truncate data files
++ and return free pages back to the disk. Instead, Bolt maintains a free list
++ of unused pages within its data file. These free pages can be reused by later
++ transactions. This works well for many use cases as databases generally tend
++ to grow. However, it's important to note that deleting large chunks of data
++ will not allow you to reclaim that space on disk.
++
++ For more information on page allocation, [see this comment][page-allocation].
++
++[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
+
+
+ ## Other Projects Using Bolt
+
+ Below is a list of public, open source projects that use Bolt:
+
+-* [Bazil](https://github.com/bazillion/bazil) - A file system that lets your data reside where it is most convenient for it to reside.
++* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
++* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
+ * [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
+ * [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
+ * [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
+@@ -450,6 +656,16 @@ Below is a list of public, open source projects that use Bolt:
+ * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
+ * [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
+ * [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database.
++* [Seaweed File System](https://github.com/chrislusf/weed-fs) - Highly scalable distributed key~file system with O(1) disk read.
++* [InfluxDB](http://influxdb.com) - Scalable datastore for metrics, events, and real-time analytics.
++* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
++* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
++* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
++* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
++* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
++* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
++ backed by boltdb.
++* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
++ simple tx and key scans.
+
+ If you are using Bolt in a project please send a pull request to add it to the list.
+-
+diff --git a/vendor/src/github.com/boltdb/bolt/batch.go b/vendor/src/github.com/boltdb/bolt/batch.go
+new file mode 100644
+index 0000000..84acae6
+--- /dev/null
++++ b/vendor/src/github.com/boltdb/bolt/batch.go
+@@ -0,0 +1,138 @@
++package bolt
++
++import (
++ "errors"
++ "fmt"
++ "sync"
++ "time"
++)
++
++// Batch calls fn as part of a batch. It behaves similar to Update,
++// except:
++//
++// 1. concurrent Batch calls can be combined into a single Bolt
++// transaction.
++//
++// 2. the function passed to Batch may be called multiple times,
++// regardless of whether it returns error or not.
++//
++// This means that Batch function side effects must be idempotent and
++// take permanent effect only after a successful return is seen in
++// caller.
++//
++// The maximum batch size and delay can be adjusted with DB.MaxBatchSize
++// and DB.MaxBatchDelay, respectively.
++//
++// Batch is only useful when there are multiple goroutines calling it.
++func (db *DB) Batch(fn func(*Tx) error) error {
++ errCh := make(chan error, 1)
++
++ db.batchMu.Lock()
++ if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
++ // There is no existing batch, or the existing batch is full; start a new one.
++ db.batch = &batch{
++ db: db,
++ }
++ db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
++ }
++ db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
++ if len(db.batch.calls) >= db.MaxBatchSize {
++ // wake up batch, it's ready to run
++ go db.batch.trigger()
++ }
++ db.batchMu.Unlock()
++
++ err := <-errCh
++ if err == trySolo {
++ err = db.Update(fn)
++ }
++ return err
++}
++
++type call struct {
++ fn func(*Tx) error
++ err chan<- error
++}
++
++type batch struct {
++ db *DB
++ timer *time.Timer
++ start sync.Once
++ calls []call
++}
++
++// trigger runs the batch if it hasn't already been run.
++func (b *batch) trigger() {
++ b.start.Do(b.run)
++}
++
++// run performs the transactions in the batch and communicates results
++// back to DB.Batch.
++func (b *batch) run() {
++ b.db.batchMu.Lock()
++ b.timer.Stop()
++ // Make sure no new work is added to this batch, but don't break
++ // other batches.
++ if b.db.batch == b {
++ b.db.batch = nil
++ }
++ b.db.batchMu.Unlock()
++
++retry:
++ for len(b.calls) > 0 {
++ var failIdx = -1
++ err := b.db.Update(func(tx *Tx) error {
++ for i, c := range b.calls {
++ if err := safelyCall(c.fn, tx); err != nil {
++ failIdx = i
++ return err
++ }
++ }
++ return nil
++ })
++
++ if failIdx >= 0 {
++ // take the failing transaction out of the batch. it's
++ // safe to shorten b.calls here because db.batch no longer
++ // points to us, and we hold the mutex anyway.
++ c := b.calls[failIdx]
++ b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
++ // tell the submitter re-run it solo, continue with the rest of the batch
++ c.err <- trySolo
++ continue retry
++ }
++
++ // pass success, or bolt internal errors, to all callers
++ for _, c := range b.calls {
++ if c.err != nil {
++ c.err <- err
++ }
++ }
++ break retry
++ }
++}
++
++// trySolo is a special sentinel error value used for signaling that a
++// transaction function should be re-run. It should never be seen by
++// callers.
++var trySolo = errors.New("batch function returned an error and should be re-run solo")
++
++type panicked struct {
++ reason interface{}
++}
++
++func (p panicked) Error() string {
++ if err, ok := p.reason.(error); ok {
++ return err.Error()
++ }
++ return fmt.Sprintf("panic: %v", p.reason)
++}
++
++func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
++ defer func() {
++ if p := recover(); p != nil {
++ err = panicked{p}
++ }
++ }()
++ return fn(tx)
++}
+diff --git a/vendor/src/github.com/boltdb/bolt/bolt_386.go b/vendor/src/github.com/boltdb/bolt/bolt_386.go
+index 856f401..e659bfb 100644
+--- a/vendor/src/github.com/boltdb/bolt/bolt_386.go
++++ b/vendor/src/github.com/boltdb/bolt/bolt_386.go
+@@ -1,4 +1,7 @@
+ package bolt
+
+ // maxMapSize represents the largest mmap size supported by Bolt.
+-const maxMapSize = 0xFFFFFFF // 256MB
++const maxMapSize = 0x7FFFFFFF // 2GB
++
++// maxAllocSize is the size used when creating array pointers.
++const maxAllocSize = 0xFFFFFFF
+diff --git a/vendor/src/github.com/boltdb/bolt/bolt_amd64.go b/vendor/src/github.com/boltdb/bolt/bolt_amd64.go
+index 4262932..cca6b7e 100644
+--- a/vendor/src/github.com/boltdb/bolt/bolt_amd64.go
++++ b/vendor/src/github.com/boltdb/bolt/bolt_amd64.go
+@@ -2,3 +2,6 @@ package bolt
+
+ // maxMapSize represents the largest mmap size supported by Bolt.
+ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
++
++// maxAllocSize is the size used when creating array pointers.
++const maxAllocSize = 0x7FFFFFFF
+diff --git a/vendor/src/github.com/boltdb/bolt/bolt_arm.go b/vendor/src/github.com/boltdb/bolt/bolt_arm.go
+index 856f401..e659bfb 100644
+--- a/vendor/src/github.com/boltdb/bolt/bolt_arm.go
++++ b/vendor/src/github.com/boltdb/bolt/bolt_arm.go
+@@ -1,4 +1,7 @@
+ package bolt
+
+ // maxMapSize represents the largest mmap size supported by Bolt.
+-const maxMapSize = 0xFFFFFFF // 256MB
++const maxMapSize = 0x7FFFFFFF // 2GB
++
++// maxAllocSize is the size used when creating array pointers.
++const maxAllocSize = 0xFFFFFFF
+diff --git a/vendor/src/github.com/boltdb/bolt/bolt_arm64.go b/vendor/src/github.com/boltdb/bolt/bolt_arm64.go
+new file mode 100644
+index 0000000..6d23093
+--- /dev/null
++++ b/vendor/src/github.com/boltdb/bolt/bolt_arm64.go
+@@ -0,0 +1,9 @@
++// +build arm64
++
++package bolt
++
++// maxMapSize represents the largest mmap size supported by Bolt.
++const maxMapSize = 0xFFFFFFFFFFFF // 256TB
++
++// maxAllocSize is the size used when creating array pointers.
++const maxAllocSize = 0x7FFFFFFF
+diff --git a/vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go
+new file mode 100644
+index 0000000..8351e12
+--- /dev/null
++++ b/vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go
+@@ -0,0 +1,9 @@
++// +build ppc64le
++
++package bolt
++
++// maxMapSize represents the largest mmap size supported by Bolt.
++const maxMapSize = 0xFFFFFFFFFFFF // 256TB
++
++// maxAllocSize is the size used when creating array pointers.
++const maxAllocSize = 0x7FFFFFFF
+diff --git a/vendor/src/github.com/boltdb/bolt/bolt_s390x.go b/vendor/src/github.com/boltdb/bolt/bolt_s390x.go
+new file mode 100644
+index 0000000..f4dd26b
+--- /dev/null
++++ b/vendor/src/github.com/boltdb/bolt/bolt_s390x.go
+@@ -0,0 +1,9 @@
++// +build s390x
++
++package bolt
++
++// maxMapSize represents the largest mmap size supported by Bolt.
++const maxMapSize = 0xFFFFFFFFFFFF // 256TB
++
++// maxAllocSize is the size used when creating array pointers.
++const maxAllocSize = 0x7FFFFFFF
+diff --git a/vendor/src/github.com/boltdb/bolt/bolt_unix.go b/vendor/src/github.com/boltdb/bolt/bolt_unix.go
+index 95647a7..6eef6b2 100644
+--- a/vendor/src/github.com/boltdb/bolt/bolt_unix.go
++++ b/vendor/src/github.com/boltdb/bolt/bolt_unix.go
+@@ -1,8 +1,9 @@
+-// +build !windows,!plan9
++// +build !windows,!plan9,!solaris
+
+ package bolt
+
+ import (
++ "fmt"
+ "os"
+ "syscall"
+ "time"
+@@ -10,7 +11,7 @@ import (
+ )
+
+ // flock acquires an advisory lock on a file descriptor.
+-func flock(f *os.File, timeout time.Duration) error {
++func flock(f *os.File, exclusive bool, timeout time.Duration) error {
+ var t time.Time
+ for {
+ // If we're beyond our timeout then return an error.
+@@ -20,9 +21,13 @@ func flock(f *os.File, timeout time.Duration) error {
+ } else if timeout > 0 && time.Since(t) > timeout {
+ return ErrTimeout
+ }
++ flag := syscall.LOCK_SH
++ if exclusive {
++ flag = syscall.LOCK_EX
++ }
+
+ // Otherwise attempt to obtain an exclusive lock.
+- err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)
++ err := syscall.Flock(int(f.Fd()), flag|syscall.LOCK_NB)
+ if err == nil {
+ return nil
+ } else if err != syscall.EWOULDBLOCK {
+@@ -41,11 +46,28 @@ func funlock(f *os.File) error {
+
+ // mmap memory maps a DB's data file.
+ func mmap(db *DB, sz int) error {
++ // Truncate and fsync to ensure file size metadata is flushed.
++ // https://github.com/boltdb/bolt/issues/284
++ if !db.NoGrowSync && !db.readOnly {
++ if err := db.file.Truncate(int64(sz)); err != nil {
++ return fmt.Errorf("file resize error: %s", err)
++ }
++ if err := db.file.Sync(); err != nil {
++ return fmt.Errorf("file sync error: %s", err)
++ }
++ }
++
++ // Map the data file to memory.
+ b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED)
+ if err != nil {
+ return err
+ }
+
++ // Advise the kernel that the mmap is accessed randomly.
++ if err := madvise(b, syscall.MADV_RANDOM); err != nil {
++ return fmt.Errorf("madvise: %s", err)
++ }
++
+ // Save the original byte slice and convert to a byte array pointer.
+ db.dataref = b
+ db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
+@@ -67,3 +89,12 @@ func munmap(db *DB) error {
+ db.datasz = 0
+ return err
+ }
++
++// NOTE: This function is copied from stdlib because it is not available on darwin.
++func madvise(b []byte, advice int) (err error) {
++ _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
++ if e1 != 0 {
++ err = e1
++ }
++ return
++}
+diff --git a/vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go
+new file mode 100644
+index 0000000..f480ee7
+--- /dev/null
++++ b/vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go
+@@ -0,0 +1,101 @@
++
++package bolt
++
++import (
++ "fmt"
++ "os"
++ "syscall"
++ "time"
++ "unsafe"
++ "golang.org/x/sys/unix"
++)
++
++// flock acquires an advisory lock on a file descriptor.
++func flock(f *os.File, exclusive bool, timeout time.Duration) error {
++ var t time.Time
++ for {
++ // If we're beyond our timeout then return an error.
++ // This can only occur after we've attempted a flock once.
++ if t.IsZero() {
++ t = time.Now()
++ } else if timeout > 0 && time.Since(t) > timeout {
++ return ErrTimeout
++ }
++ var lock syscall.Flock_t
++ lock.Start = 0
++ lock.Len = 0
++ lock.Pid = 0
++ lock.Whence = 0
++ lock.Pid = 0
++ if exclusive {
++ lock.Type = syscall.F_WRLCK
++ } else {
++ lock.Type = syscall.F_RDLCK
++ }
++ err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock)
++ if err == nil {
++ return nil
++ } else if err != syscall.EAGAIN {
++ return err
++ }
++
++ // Wait for a bit and try again.
++ time.Sleep(50 * time.Millisecond)
++ }
++}
++
++// funlock releases an advisory lock on a file descriptor.
++func funlock(f *os.File) error {
++ var lock syscall.Flock_t
++ lock.Start = 0
++ lock.Len = 0
++ lock.Type = syscall.F_UNLCK
++ lock.Whence = 0
++ return syscall.FcntlFlock(uintptr(f.Fd()), syscall.F_SETLK, &lock)
++}
++
++// mmap memory maps a DB's data file.
++func mmap(db *DB, sz int) error {
++ // Truncate and fsync to ensure file size metadata is flushed.
++ // https://github.com/boltdb/bolt/issues/284
++ if !db.NoGrowSync && !db.readOnly {
++ if err := db.file.Truncate(int64(sz)); err != nil {
++ return fmt.Errorf("file resize error: %s", err)
++ }
++ if err := db.file.Sync(); err != nil {
++ return fmt.Errorf("file sync error: %s", err)
++ }
++ }
++
++ // Map the data file to memory.
++ b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED)
++ if err != nil {
++ return err
++ }
++
++ // Advise the kernel that the mmap is accessed randomly.
++ if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
++ return fmt.Errorf("madvise: %s", err)
++ }
++
++ // Save the original byte slice and convert to a byte array pointer.
++ db.dataref = b
++ db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
++ db.datasz = sz
++ return nil
++}
++
++// munmap unmaps a DB's data file from memory.
++func munmap(db *DB) error {
++ // Ignore the unmap if we have no mapped data.
++ if db.dataref == nil {
++ return nil
++ }
++
++ // Unmap using the original byte slice.
++ err := unix.Munmap(db.dataref)
++ db.dataref = nil
++ db.data = nil
++ db.datasz = 0
++ return err
++}
+diff --git a/vendor/src/github.com/boltdb/bolt/bolt_windows.go b/vendor/src/github.com/boltdb/bolt/bolt_windows.go
+index c8539d4..8b782be 100644
+--- a/vendor/src/github.com/boltdb/bolt/bolt_windows.go
++++ b/vendor/src/github.com/boltdb/bolt/bolt_windows.go
+@@ -16,7 +16,7 @@ func fdatasync(db *DB) error {
+ }
+
+ // flock acquires an advisory lock on a file descriptor.
+-func flock(f *os.File, _ time.Duration) error {
++func flock(f *os.File, _ bool, _ time.Duration) error {
+ return nil
+ }
+
+@@ -28,9 +28,11 @@ func funlock(f *os.File) error {
+ // mmap memory maps a DB's data file.
+ // Based on: https://github.com/edsrzf/mmap-go
+ func mmap(db *DB, sz int) error {
+- // Truncate the database to the size of the mmap.
+- if err := db.file.Truncate(int64(sz)); err != nil {
+- return fmt.Errorf("truncate: %s", err)
++ if !db.readOnly {
++ // Truncate the database to the size of the mmap.
++ if err := db.file.Truncate(int64(sz)); err != nil {
++ return fmt.Errorf("truncate: %s", err)
++ }
+ }
+
+ // Open a file mapping handle.
+diff --git a/vendor/src/github.com/boltdb/bolt/bucket.go b/vendor/src/github.com/boltdb/bolt/bucket.go
+index 2630800..2925288 100644
+--- a/vendor/src/github.com/boltdb/bolt/bucket.go
++++ b/vendor/src/github.com/boltdb/bolt/bucket.go
+@@ -99,6 +99,7 @@ func (b *Bucket) Cursor() *Cursor {
+
+ // Bucket retrieves a nested bucket by name.
+ // Returns nil if the bucket does not exist.
++// The bucket instance is only valid for the lifetime of the transaction.
+ func (b *Bucket) Bucket(name []byte) *Bucket {
+ if b.buckets != nil {
+ if child := b.buckets[string(name)]; child != nil {
+@@ -148,6 +149,7 @@ func (b *Bucket) openBucket(value []byte) *Bucket {
+
+ // CreateBucket creates a new bucket at the given key and returns the new bucket.
+ // Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
++// The bucket instance is only valid for the lifetime of the transaction.
+ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
+ if b.tx.db == nil {
+ return nil, ErrTxClosed
+@@ -192,6 +194,7 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
+
+ // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
+ // Returns an error if the bucket name is blank, or if the bucket name is too long.
++// The bucket instance is only valid for the lifetime of the transaction.
+ func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
+ child, err := b.CreateBucket(key)
+ if err == ErrBucketExists {
+@@ -252,6 +255,7 @@ func (b *Bucket) DeleteBucket(key []byte) error {
+
+ // Get retrieves the value for a key in the bucket.
+ // Returns a nil value if the key does not exist or if the key is a nested bucket.
++// The returned value is only valid for the life of the transaction.
+ func (b *Bucket) Get(key []byte) []byte {
+ k, v, flags := b.Cursor().seek(key)
+
+@@ -332,6 +336,12 @@ func (b *Bucket) NextSequence() (uint64, error) {
+ return 0, ErrTxNotWritable
+ }
+
++ // Materialize the root node if it hasn't been already so that the
++ // bucket will be saved during commit.
++ if b.rootNode == nil {
++ _ = b.node(b.root, nil)
++ }
++
+ // Increment and return the sequence.
+ b.bucket.sequence++
+ return b.bucket.sequence, nil
+@@ -339,7 +349,8 @@ func (b *Bucket) NextSequence() (uint64, error) {
+
+ // ForEach executes a function for each key/value pair in a bucket.
+ // If the provided function returns an error then the iteration is stopped and
+-// the error is returned to the caller.
++// the error is returned to the caller. The provided function must not modify
++// the bucket; this will result in undefined behavior.
+ func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
+ if b.tx.db == nil {
+ return ErrTxClosed
+@@ -511,8 +522,12 @@ func (b *Bucket) spill() error {
+ // Update parent node.
+ var c = b.Cursor()
+ k, _, flags := c.seek([]byte(name))
+- _assert(bytes.Equal([]byte(name), k), "misplaced bucket header: %x -> %x", []byte(name), k)
+- _assert(flags&bucketLeafFlag != 0, "unexpected bucket header flag: %x", flags)
++ if !bytes.Equal([]byte(name), k) {
++ panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
++ }
++ if flags&bucketLeafFlag == 0 {
++ panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
++ }
+ c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
+ }
+
+@@ -528,7 +543,9 @@ func (b *Bucket) spill() error {
+ b.rootNode = b.rootNode.root()
+
+ // Update the root node for this bucket.
+- _assert(b.rootNode.pgid < b.tx.meta.pgid, "pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)
++ if b.rootNode.pgid >= b.tx.meta.pgid {
++ panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
++ }
+ b.root = b.rootNode.pgid
+
+ return nil
+@@ -659,7 +676,9 @@ func (b *Bucket) pageNode(id pgid) (*page, *node) {
+ // Inline buckets have a fake page embedded in their value so treat them
+ // differently. We'll return the rootNode (if available) or the fake page.
+ if b.root == 0 {
+- _assert(id == 0, "inline bucket non-zero page access(2): %d != 0", id)
++ if id != 0 {
++ panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
++ }
+ if b.rootNode != nil {
+ return nil, b.rootNode
+ }
+diff --git a/vendor/src/github.com/boltdb/bolt/cursor.go b/vendor/src/github.com/boltdb/bolt/cursor.go
+index 3bfc2f1..006c548 100644
+--- a/vendor/src/github.com/boltdb/bolt/cursor.go
++++ b/vendor/src/github.com/boltdb/bolt/cursor.go
+@@ -2,6 +2,7 @@ package bolt
+
+ import (
+ "bytes"
++ "fmt"
+ "sort"
+ )
+
+@@ -9,6 +10,8 @@ import (
+ // Cursors see nested buckets with value == nil.
+ // Cursors can be obtained from a transaction and are valid as long as the transaction is open.
+ //
++// Keys and values returned from the cursor are only valid for the life of the transaction.
++//
+ // Changing data while traversing with a cursor may cause it to be invalidated
+ // and return unexpected keys and/or values. You must reposition your cursor
+ // after mutating data.
+@@ -24,6 +27,7 @@ func (c *Cursor) Bucket() *Bucket {
+
+ // First moves the cursor to the first item in the bucket and returns its key and value.
+ // If the bucket is empty then a nil key and value are returned.
++// The returned key and value are only valid for the life of the transaction.
+ func (c *Cursor) First() (key []byte, value []byte) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+ c.stack = c.stack[:0]
+@@ -40,6 +44,7 @@ func (c *Cursor) First() (key []byte, value []byte) {
+
+ // Last moves the cursor to the last item in the bucket and returns its key and value.
+ // If the bucket is empty then a nil key and value are returned.
++// The returned key and value are only valid for the life of the transaction.
+ func (c *Cursor) Last() (key []byte, value []byte) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+ c.stack = c.stack[:0]
+@@ -57,6 +62,7 @@ func (c *Cursor) Last() (key []byte, value []byte) {
+
+ // Next moves the cursor to the next item in the bucket and returns its key and value.
+ // If the cursor is at the end of the bucket then a nil key and value are returned.
++// The returned key and value are only valid for the life of the transaction.
+ func (c *Cursor) Next() (key []byte, value []byte) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+ k, v, flags := c.next()
+@@ -68,6 +74,7 @@ func (c *Cursor) Next() (key []byte, value []byte) {
+
+ // Prev moves the cursor to the previous item in the bucket and returns its key and value.
+ // If the cursor is at the beginning of the bucket then a nil key and value are returned.
++// The returned key and value are only valid for the life of the transaction.
+ func (c *Cursor) Prev() (key []byte, value []byte) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
+
+@@ -99,6 +106,7 @@ func (c *Cursor) Prev() (key []byte, value []byte) {
+ // Seek moves the cursor to a given key and returns it.
+ // If the key does not exist then the next key is used. If no keys
+ // follow, a nil key is returned.
++// The returned key and value are only valid for the life of the transaction.
+ func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
+ k, v, flags := c.seek(seek)
+
+@@ -228,8 +236,8 @@ func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
+ // search recursively performs a binary search against a given page/node until it finds a given key.
+ func (c *Cursor) search(key []byte, pgid pgid) {
+ p, n := c.bucket.pageNode(pgid)
+- if p != nil {
+- _assert((p.flags&(branchPageFlag|leafPageFlag)) != 0, "invalid page type: %d: %x", p.id, p.flags)
++ if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
++ panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
+ }
+ e := elemRef{page: p, node: n}
+ c.stack = append(c.stack, e)
+diff --git a/vendor/src/github.com/boltdb/bolt/db.go b/vendor/src/github.com/boltdb/bolt/db.go
+index 6c45736..d39c4aa 100644
+--- a/vendor/src/github.com/boltdb/bolt/db.go
++++ b/vendor/src/github.com/boltdb/bolt/db.go
+@@ -12,9 +12,6 @@ import (
+ "unsafe"
+ )
+
+-// The smallest size that the mmap can be.
+-const minMmapSize = 1 << 22 // 4MB
+-
+ // The largest step that can be taken when remapping the mmap.
+ const maxMmapStep = 1 << 30 // 1GB
+
+@@ -30,6 +27,12 @@ const magic uint32 = 0xED0CDAED
+ // must be synchronzied using the msync(2) syscall.
+ const IgnoreNoSync = runtime.GOOS == "openbsd"
+
++// Default values if not set in a DB instance.
++const (
++ DefaultMaxBatchSize int = 1000
++ DefaultMaxBatchDelay = 10 * time.Millisecond
++)
++
+ // DB represents a collection of buckets persisted to a file on disk.
+ // All data access is performed through transactions which can be obtained through the DB.
+ // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
+@@ -52,9 +55,33 @@ type DB struct {
+ // THIS IS UNSAFE. PLEASE USE WITH CAUTION.
+ NoSync bool
+
++ // When true, skips the truncate call when growing the database.
++ // Setting this to true is only safe on non-ext3/ext4 systems.
++ // Skipping truncation avoids preallocation of hard drive space and
++ // bypasses a truncate() and fsync() syscall on remapping.
++ //
++ // https://github.com/boltdb/bolt/issues/284
++ NoGrowSync bool
++
++ // MaxBatchSize is the maximum size of a batch. Default value is
++ // copied from DefaultMaxBatchSize in Open.
++ //
++ // If <=0, disables batching.
++ //
++ // Do not change concurrently with calls to Batch.
++ MaxBatchSize int
++
++ // MaxBatchDelay is the maximum delay before a batch starts.
++ // Default value is copied from DefaultMaxBatchDelay in Open.
++ //
++ // If <=0, effectively disables batching.
++ //
++ // Do not change concurrently with calls to Batch.
++ MaxBatchDelay time.Duration
++
+ path string
+ file *os.File
+- dataref []byte
++ dataref []byte // mmap'ed readonly, write throws SEGV
+ data *[maxMapSize]byte
+ datasz int
+ meta0 *meta
+@@ -66,6 +93,9 @@ type DB struct {
+ freelist *freelist
+ stats Stats
+
++ batchMu sync.Mutex
++ batch *batch
++
+ rwlock sync.Mutex // Allows only one writer at a time.
+ metalock sync.Mutex // Protects meta page access.
+ mmaplock sync.RWMutex // Protects mmap access during remapping.
+@@ -74,6 +104,10 @@ type DB struct {
+ ops struct {
+ writeAt func(b []byte, off int64) (n int, err error)
+ }
++
++ // Read only mode.
++ // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately.
++ readOnly bool
+ }
+
+ // Path returns the path to currently open database file.
+@@ -101,20 +135,34 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
+ if options == nil {
+ options = DefaultOptions
+ }
++ db.NoGrowSync = options.NoGrowSync
++
++ // Set default values for later DB operations.
++ db.MaxBatchSize = DefaultMaxBatchSize
++ db.MaxBatchDelay = DefaultMaxBatchDelay
++
++ flag := os.O_RDWR
++ if options.ReadOnly {
++ flag = os.O_RDONLY
++ db.readOnly = true
++ }
+
+ // Open data file and separate sync handler for metadata writes.
+ db.path = path
+-
+ var err error
+- if db.file, err = os.OpenFile(db.path, os.O_RDWR|os.O_CREATE, mode); err != nil {
++ if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil {
+ _ = db.close()
+ return nil, err
+ }
+
+- // Lock file so that other processes using Bolt cannot use the database
+- // at the same time. This would cause corruption since the two processes
+- // would write meta pages and free pages separately.
+- if err := flock(db.file, options.Timeout); err != nil {
++ // Lock file so that other processes using Bolt in read-write mode cannot
++ // use the database at the same time. This would cause corruption since
++ // the two processes would write meta pages and free pages separately.
++ // The database file is locked exclusively (only one process can grab the lock)
++ // if !options.ReadOnly.
++ // The database file is locked using the shared lock (more than one process may
++ // hold a lock at the same time) otherwise (options.ReadOnly is set).
++ if err := flock(db.file, !db.readOnly, options.Timeout); err != nil {
+ _ = db.close()
+ return nil, err
+ }
+@@ -162,16 +210,6 @@ func (db *DB) mmap(minsz int) error {
+ db.mmaplock.Lock()
+ defer db.mmaplock.Unlock()
+
+- // Dereference all mmap references before unmapping.
+- if db.rwtx != nil {
+- db.rwtx.root.dereference()
+- }
+-
+- // Unmap existing data before continuing.
+- if err := db.munmap(); err != nil {
+- return err
+- }
+-
+ info, err := db.file.Stat()
+ if err != nil {
+ return fmt.Errorf("mmap stat error: %s", err)
+@@ -184,7 +222,20 @@ func (db *DB) mmap(minsz int) error {
+ if size < minsz {
+ size = minsz
+ }
+- size = db.mmapSize(size)
++ size, err = db.mmapSize(size)
++ if err != nil {
++ return err
++ }
++
++ // Dereference all mmap references before unmapping.
++ if db.rwtx != nil {
++ db.rwtx.root.dereference()
++ }
++
++ // Unmap existing data before continuing.
++ if err := db.munmap(); err != nil {
++ return err
++ }
+
+ // Memory-map the data file as a byte slice.
+ if err := mmap(db, size); err != nil {
+@@ -215,22 +266,40 @@ func (db *DB) munmap() error {
+ }
+
+ // mmapSize determines the appropriate size for the mmap given the current size
+-// of the database. The minimum size is 4MB and doubles until it reaches 1GB.
+-func (db *DB) mmapSize(size int) int {
+- if size <= minMmapSize {
+- return minMmapSize
+- } else if size < maxMmapStep {
+- size *= 2
+- } else {
+- size += maxMmapStep
++// of the database. The minimum size is 1MB and doubles until it reaches 1GB.
++// Returns an error if the new mmap size is greater than the max allowed.
++func (db *DB) mmapSize(size int) (int, error) {
++ // Double the size from 32KB until 1GB.
++ for i := uint(15); i <= 30; i++ {
++ if size <= 1<<i {
++ return 1 << i, nil
++ }
++ }
++
++ // Verify the requested size is not above the maximum allowed.
++ if size > maxMapSize {
++ return 0, fmt.Errorf("mmap too large")
++ }
++
++ // If larger than 1GB then grow by 1GB at a time.
++ sz := int64(size)
++ if remainder := sz % int64(maxMmapStep); remainder > 0 {
++ sz += int64(maxMmapStep) - remainder
+ }
+
+ // Ensure that the mmap size is a multiple of the page size.
+- if (size % db.pageSize) != 0 {
+- size = ((size / db.pageSize) + 1) * db.pageSize
++ // This should always be true since we're incrementing in MBs.
++ pageSize := int64(db.pageSize)
++ if (sz % pageSize) != 0 {
++ sz = ((sz / pageSize) + 1) * pageSize
++ }
++
++ // If we've exceeded the max size then only grow up to the max size.
++ if sz > maxMapSize {
++ sz = maxMapSize
+ }
+
+- return size
++ return int(sz), nil
+ }
+
+ // init creates a new database file and initializes its meta pages.
+@@ -250,7 +319,6 @@ func (db *DB) init() error {
+ m.magic = magic
+ m.version = version
+ m.pageSize = uint32(db.pageSize)
+- m.version = version
+ m.freelist = 2
+ m.root = bucket{root: 3}
+ m.pgid = 4
+@@ -283,8 +351,15 @@ func (db *DB) init() error {
+ // Close releases all database resources.
+ // All transactions must be closed before closing the database.
+ func (db *DB) Close() error {
++ db.rwlock.Lock()
++ defer db.rwlock.Unlock()
++
+ db.metalock.Lock()
+ defer db.metalock.Unlock()
++
++ db.mmaplock.RLock()
++ defer db.mmaplock.RUnlock()
++
+ return db.close()
+ }
+
+@@ -304,8 +379,11 @@ func (db *DB) close() error {
+
+ // Close file handles.
+ if db.file != nil {
+- // Unlock the file.
+- _ = funlock(db.file)
++ // No need to unlock read-only file.
++ if !db.readOnly {
++ // Unlock the file.
++ _ = funlock(db.file)
++ }
+
+ // Close the file descriptor.
+ if err := db.file.Close(); err != nil {
+@@ -323,6 +401,11 @@ func (db *DB) close() error {
+ // will cause the calls to block and be serialized until the current write
+ // transaction finishes.
+ //
++// Transactions should not be depedent on one another. Opening a read
++// transaction and a write transaction in the same goroutine can cause the
++// writer to deadlock because the database periodically needs to re-mmap itself
++// as it grows and it cannot do that while a read transaction is open.
++//
+ // IMPORTANT: You must close read-only transactions after you are finished or
+ // else the database will not reclaim old pages.
+ func (db *DB) Begin(writable bool) (*Tx, error) {
+@@ -371,6 +454,11 @@ func (db *DB) beginTx() (*Tx, error) {
+ }
+
+ func (db *DB) beginRWTx() (*Tx, error) {
++ // If the database was opened with Options.ReadOnly, return an error.
++ if db.readOnly {
++ return nil, ErrDatabaseReadOnly
++ }
++
+ // Obtain writer lock. This is released by the transaction when it closes.
+ // This enforces only one writer transaction at a time.
+ db.rwlock.Lock()
+@@ -501,6 +589,12 @@ func (db *DB) View(fn func(*Tx) error) error {
+ return nil
+ }
+
++// Sync executes fdatasync() against the database file handle.
++//
++// This is not necessary under normal operation, however, if you use NoSync
++// then it allows you to force the database file to sync against the disk.
++func (db *DB) Sync() error { return fdatasync(db) }
++
+ // Stats retrieves ongoing performance stats for the database.
+ // This is only updated when a transaction closes.
+ func (db *DB) Stats() Stats {
+@@ -561,18 +655,30 @@ func (db *DB) allocate(count int) (*page, error) {
+ return p, nil
+ }
+
++func (db *DB) IsReadOnly() bool {
++ return db.readOnly
++}
++
+ // Options represents the options that can be set when opening a database.
+ type Options struct {
+ // Timeout is the amount of time to wait to obtain a file lock.
+ // When set to zero it will wait indefinitely. This option is only
+ // available on Darwin and Linux.
+ Timeout time.Duration
++
++ // Sets the DB.NoGrowSync flag before memory mapping the file.
++ NoGrowSync bool
++
++ // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
++ // grab a shared lock (UNIX).
++ ReadOnly bool
+ }
+
+ // DefaultOptions represent the options used if nil options are passed into Open().
+ // No timeout is used which will cause Bolt to wait indefinitely for a lock.
+ var DefaultOptions = &Options{
+- Timeout: 0,
++ Timeout: 0,
++ NoGrowSync: false,
+ }
+
+ // Stats represents statistics about the database.
+@@ -647,9 +753,11 @@ func (m *meta) copy(dest *meta) {
+
+ // write writes the meta onto a page.
+ func (m *meta) write(p *page) {
+-
+- _assert(m.root.root < m.pgid, "root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)
+- _assert(m.freelist < m.pgid, "freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)
++ if m.root.root >= m.pgid {
++ panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
++ } else if m.freelist >= m.pgid {
++ panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
++ }
+
+ // Page id is either going to be 0 or 1 which we can determine by the transaction ID.
+ p.id = pgid(m.txid % 2)
+@@ -675,13 +783,8 @@ func _assert(condition bool, msg string, v ...interface{}) {
+ }
+ }
+
+-func warn(v ...interface{}) {
+- fmt.Fprintln(os.Stderr, v...)
+-}
+-
+-func warnf(msg string, v ...interface{}) {
+- fmt.Fprintf(os.Stderr, msg+"\n", v...)
+-}
++func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) }
++func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) }
+
+ func printstack() {
+ stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n")
+diff --git a/vendor/src/github.com/boltdb/bolt/errors.go b/vendor/src/github.com/boltdb/bolt/errors.go
+index aa504f1..6883786 100644
+--- a/vendor/src/github.com/boltdb/bolt/errors.go
++++ b/vendor/src/github.com/boltdb/bolt/errors.go
+@@ -36,6 +36,10 @@ var (
+ // ErrTxClosed is returned when committing or rolling back a transaction
+ // that has already been committed or rolled back.
+ ErrTxClosed = errors.New("tx closed")
++
++ // ErrDatabaseReadOnly is returned when a mutating transaction is started on a
++ // read-only database.
++ ErrDatabaseReadOnly = errors.New("database is in read-only mode")
+ )
+
+ // These errors can occur when putting or deleting a value or a bucket.
+diff --git a/vendor/src/github.com/boltdb/bolt/freelist.go b/vendor/src/github.com/boltdb/bolt/freelist.go
+index 150e3e6..0161948 100644
+--- a/vendor/src/github.com/boltdb/bolt/freelist.go
++++ b/vendor/src/github.com/boltdb/bolt/freelist.go
+@@ -1,6 +1,7 @@
+ package bolt
+
+ import (
++ "fmt"
+ "sort"
+ "unsafe"
+ )
+@@ -47,15 +48,14 @@ func (f *freelist) pending_count() int {
+
+ // all returns a list of all free ids and all pending ids in one sorted list.
+ func (f *freelist) all() []pgid {
+- ids := make([]pgid, len(f.ids))
+- copy(ids, f.ids)
++ m := make(pgids, 0)
+
+ for _, list := range f.pending {
+- ids = append(ids, list...)
++ m = append(m, list...)
+ }
+
+- sort.Sort(pgids(ids))
+- return ids
++ sort.Sort(m)
++ return pgids(f.ids).merge(m)
+ }
+
+ // allocate returns the starting page id of a contiguous list of pages of a given size.
+@@ -67,7 +67,9 @@ func (f *freelist) allocate(n int) pgid {
+
+ var initial, previd pgid
+ for i, id := range f.ids {
+- _assert(id > 1, "invalid page allocation: %d", id)
++ if id <= 1 {
++ panic(fmt.Sprintf("invalid page allocation: %d", id))
++ }
+
+ // Reset initial page if this is not contiguous.
+ if previd == 0 || id-previd != 1 {
+@@ -103,13 +105,17 @@ func (f *freelist) allocate(n int) pgid {
+ // free releases a page and its overflow for a given transaction id.
+ // If the page is already free then a panic will occur.
+ func (f *freelist) free(txid txid, p *page) {
+- _assert(p.id > 1, "cannot free page 0 or 1: %d", p.id)
++ if p.id <= 1 {
++ panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
++ }
+
+ // Free page and all its overflow pages.
+ var ids = f.pending[txid]
+ for id := p.id; id <= p.id+pgid(p.overflow); id++ {
+ // Verify that page is not already free.
+- _assert(!f.cache[id], "page %d already freed", id)
++ if f.cache[id] {
++ panic(fmt.Sprintf("page %d already freed", id))
++ }
+
+ // Add to the freelist and cache.
+ ids = append(ids, id)
+@@ -120,15 +126,17 @@ func (f *freelist) free(txid txid, p *page) {
+
+ // release moves all page ids for a transaction id (or older) to the freelist.
+ func (f *freelist) release(txid txid) {
++ m := make(pgids, 0)
+ for tid, ids := range f.pending {
+ if tid <= txid {
+ // Move transaction's pending pages to the available freelist.
+ // Don't remove from the cache since the page is still free.
+- f.ids = append(f.ids, ids...)
++ m = append(m, ids...)
+ delete(f.pending, tid)
+ }
+ }
+- sort.Sort(pgids(f.ids))
++ sort.Sort(m)
++ f.ids = pgids(f.ids).merge(m)
+ }
+
+ // rollback removes the pages from a given pending tx.
+diff --git a/vendor/src/github.com/boltdb/bolt/node.go b/vendor/src/github.com/boltdb/bolt/node.go
+index c204c39..c9fb21c 100644
+--- a/vendor/src/github.com/boltdb/bolt/node.go
++++ b/vendor/src/github.com/boltdb/bolt/node.go
+@@ -2,6 +2,7 @@ package bolt
+
+ import (
+ "bytes"
++ "fmt"
+ "sort"
+ "unsafe"
+ )
+@@ -70,7 +71,9 @@ func (n *node) pageElementSize() int {
+
+ // childAt returns the child node at a given index.
+ func (n *node) childAt(index int) *node {
+- _assert(!n.isLeaf, "invalid childAt(%d) on a leaf node", index)
++ if n.isLeaf {
++ panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
++ }
+ return n.bucket.node(n.inodes[index].pgid, n)
+ }
+
+@@ -111,9 +114,13 @@ func (n *node) prevSibling() *node {
+
+ // put inserts a key/value.
+ func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
+- _assert(pgid < n.bucket.tx.meta.pgid, "pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)
+- _assert(len(oldKey) > 0, "put: zero-length old key")
+- _assert(len(newKey) > 0, "put: zero-length new key")
++ if pgid >= n.bucket.tx.meta.pgid {
++ panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
++ } else if len(oldKey) <= 0 {
++ panic("put: zero-length old key")
++ } else if len(newKey) <= 0 {
++ panic("put: zero-length new key")
++ }
+
+ // Find insertion index.
+ index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
+@@ -189,7 +196,9 @@ func (n *node) write(p *page) {
+ p.flags |= branchPageFlag
+ }
+
+- _assert(len(n.inodes) < 0xFFFF, "inode overflow: %d (pgid=%d)", len(n.inodes), p.id)
++ if len(n.inodes) >= 0xFFFF {
++ panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
++ }
+ p.count = uint16(len(n.inodes))
+
+ // Loop over each item and write it to the page.
+@@ -212,11 +221,20 @@ func (n *node) write(p *page) {
+ _assert(elem.pgid != p.id, "write: circular dependency occurred")
+ }
+
++ // If the length of key+value is larger than the max allocation size
++ // then we need to reallocate the byte array pointer.
++ //
++ // See: https://github.com/boltdb/bolt/pull/335
++ klen, vlen := len(item.key), len(item.value)
++ if len(b) < klen+vlen {
++ b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
++ }
++
+ // Write data for the element to the end of the page.
+ copy(b[0:], item.key)
+- b = b[len(item.key):]
++ b = b[klen:]
+ copy(b[0:], item.value)
+- b = b[len(item.value):]
++ b = b[vlen:]
+ }
+
+ // DEBUG ONLY: n.dump()
+@@ -348,7 +366,9 @@ func (n *node) spill() error {
+ }
+
+ // Write the node.
+- _assert(p.id < tx.meta.pgid, "pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)
++ if p.id >= tx.meta.pgid {
++ panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
++ }
+ node.pgid = p.id
+ node.write(p)
+ node.spilled = true
+diff --git a/vendor/src/github.com/boltdb/bolt/page.go b/vendor/src/github.com/boltdb/bolt/page.go
+index b3dc473..818aa1b 100644
+--- a/vendor/src/github.com/boltdb/bolt/page.go
++++ b/vendor/src/github.com/boltdb/bolt/page.go
+@@ -3,12 +3,12 @@ package bolt
+ import (
+ "fmt"
+ "os"
++ "sort"
+ "unsafe"
+ )
+
+ const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
+
+-const maxAllocSize = 0xFFFFFFF
+ const minKeysPerPage = 2
+
+ const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
+@@ -97,7 +97,7 @@ type branchPageElement struct {
+ // key returns a byte slice of the node key.
+ func (n *branchPageElement) key() []byte {
+ buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
+- return buf[n.pos : n.pos+n.ksize]
++ return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
+ }
+
+ // leafPageElement represents a node on a leaf page.
+@@ -111,13 +111,13 @@ type leafPageElement struct {
+ // key returns a byte slice of the node key.
+ func (n *leafPageElement) key() []byte {
+ buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
+- return buf[n.pos : n.pos+n.ksize]
++ return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
+ }
+
+ // value returns a byte slice of the node value.
+ func (n *leafPageElement) value() []byte {
+ buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
+- return buf[n.pos+n.ksize : n.pos+n.ksize+n.vsize]
++ return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize]
+ }
+
+ // PageInfo represents human readable information about a page.
+@@ -133,3 +133,40 @@ type pgids []pgid
+ func (s pgids) Len() int { return len(s) }
+ func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+ func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
++
++// merge returns the sorted union of a and b.
++func (a pgids) merge(b pgids) pgids {
++ // Return the opposite slice if one is nil.
++ if len(a) == 0 {
++ return b
++ } else if len(b) == 0 {
++ return a
++ }
++
++ // Create a list to hold all elements from both lists.
++ merged := make(pgids, 0, len(a)+len(b))
++
++ // Assign lead to the slice with a lower starting value, follow to the higher value.
++ lead, follow := a, b
++ if b[0] < a[0] {
++ lead, follow = b, a
++ }
++
++ // Continue while there are elements in the lead.
++ for len(lead) > 0 {
++ // Merge largest prefix of lead that is ahead of follow[0].
++ n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
++ merged = append(merged, lead[:n]...)
++ if n >= len(lead) {
++ break
++ }
++
++ // Swap lead and follow.
++ lead, follow = follow, lead[n:]
++ }
++
++ // Append what's left in follow.
++ merged = append(merged, follow...)
++
++ return merged
++}
+diff --git a/vendor/src/github.com/boltdb/bolt/tx.go b/vendor/src/github.com/boltdb/bolt/tx.go
+index c041d73..fe6c287 100644
+--- a/vendor/src/github.com/boltdb/bolt/tx.go
++++ b/vendor/src/github.com/boltdb/bolt/tx.go
+@@ -87,18 +87,21 @@ func (tx *Tx) Stats() TxStats {
+
+ // Bucket retrieves a bucket by name.
+ // Returns nil if the bucket does not exist.
++// The bucket instance is only valid for the lifetime of the transaction.
+ func (tx *Tx) Bucket(name []byte) *Bucket {
+ return tx.root.Bucket(name)
+ }
+
+ // CreateBucket creates a new bucket.
+ // Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
++// The bucket instance is only valid for the lifetime of the transaction.
+ func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
+ return tx.root.CreateBucket(name)
+ }
+
+ // CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
+ // Returns an error if the bucket name is blank, or if the bucket name is too long.
++// The bucket instance is only valid for the lifetime of the transaction.
+ func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
+ return tx.root.CreateBucketIfNotExists(name)
+ }
+@@ -127,7 +130,8 @@ func (tx *Tx) OnCommit(fn func()) {
+ }
+
+ // Commit writes all changes to disk and updates the meta page.
+-// Returns an error if a disk write error occurs.
++// Returns an error if a disk write error occurs, or if Commit is
++// called on a read-only transaction.
+ func (tx *Tx) Commit() error {
+ _assert(!tx.managed, "managed tx commit not allowed")
+ if tx.db == nil {
+@@ -203,7 +207,8 @@ func (tx *Tx) Commit() error {
+ return nil
+ }
+
+-// Rollback closes the transaction and ignores all previous updates.
++// Rollback closes the transaction and ignores all previous updates. Read-only
++// transactions must be rolled back and not committed.
+ func (tx *Tx) Rollback() error {
+ _assert(!tx.managed, "managed tx rollback not allowed")
+ if tx.db == nil {
+@@ -234,7 +239,8 @@ func (tx *Tx) close() {
+ var freelistPendingN = tx.db.freelist.pending_count()
+ var freelistAlloc = tx.db.freelist.size()
+
+- // Remove writer lock.
++ // Remove transaction ref & writer lock.
++ tx.db.rwtx = nil
+ tx.db.rwlock.Unlock()
+
+ // Merge statistics.
+@@ -248,41 +254,51 @@ func (tx *Tx) close() {
+ } else {
+ tx.db.removeTx(tx)
+ }
++
++ // Clear all references.
+ tx.db = nil
++ tx.meta = nil
++ tx.root = Bucket{tx: tx}
++ tx.pages = nil
+ }
+
+ // Copy writes the entire database to a writer.
+-// A reader transaction is maintained during the copy so it is safe to continue
+-// using the database while a copy is in progress.
+-// Copy will write exactly tx.Size() bytes into the writer.
++// This function exists for backwards compatibility. Use WriteTo() in
+ func (tx *Tx) Copy(w io.Writer) error {
+- var f *os.File
+- var err error
++ _, err := tx.WriteTo(w)
++ return err
++}
+
++// WriteTo writes the entire database to a writer.
++// If err == nil then exactly tx.Size() bytes will be written into the writer.
++func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
+ // Attempt to open reader directly.
++ var f *os.File
+ if f, err = os.OpenFile(tx.db.path, os.O_RDONLY|odirect, 0); err != nil {
+ // Fallback to a regular open if that doesn't work.
+ if f, err = os.OpenFile(tx.db.path, os.O_RDONLY, 0); err != nil {
+- return err
++ return 0, err
+ }
+ }
+
+ // Copy the meta pages.
+ tx.db.metalock.Lock()
+- _, err = io.CopyN(w, f, int64(tx.db.pageSize*2))
++ n, err = io.CopyN(w, f, int64(tx.db.pageSize*2))
+ tx.db.metalock.Unlock()
+ if err != nil {
+ _ = f.Close()
+- return fmt.Errorf("meta copy: %s", err)
++ return n, fmt.Errorf("meta copy: %s", err)
+ }
+
+ // Copy data pages.
+- if _, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)); err != nil {
++ wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
++ n += wn
++ if err != nil {
+ _ = f.Close()
+- return err
++ return n, err
+ }
+
+- return f.Close()
++ return n, f.Close()
+ }
+
+ // CopyFile copies the entire database to file at the given path.
+@@ -416,15 +432,39 @@ func (tx *Tx) write() error {
+ // Write pages to disk in order.
+ for _, p := range pages {
+ size := (int(p.overflow) + 1) * tx.db.pageSize
+- buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:size]
+ offset := int64(p.id) * int64(tx.db.pageSize)
+- if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
+- return err
+- }
+
+- // Update statistics.
+- tx.stats.Write++
++ // Write out page in "max allocation" sized chunks.
++ ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
++ for {
++ // Limit our write to our max allocation size.
++ sz := size
++ if sz > maxAllocSize-1 {
++ sz = maxAllocSize - 1
++ }
++
++ // Write chunk to disk.
++ buf := ptr[:sz]
++ if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
++ return err
++ }
++
++ // Update statistics.
++ tx.stats.Write++
++
++ // Exit inner for loop if we've written all the chunks.
++ size -= sz
++ if size == 0 {
++ break
++ }
++
++ // Otherwise move offset forward and move pointer to next chunk.
++ offset += int64(sz)
++ ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
++ }
+ }
++
++ // Ignore file sync if flag is set on DB.
+ if !tx.db.NoSync || IgnoreNoSync {
+ if err := fdatasync(tx.db); err != nil {
+ return err
+--
+1.9.1
+
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/files/disable_sha1sum_startup.patch b/import-layers/meta-virtualization/recipes-containers/docker/files/disable_sha1sum_startup.patch
new file mode 100644
index 0000000..d37d7a0
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker/files/disable_sha1sum_startup.patch
@@ -0,0 +1,56 @@
+From 12fd6388a033ab5ec9b3a7b144c4976031e6aa52 Mon Sep 17 00:00:00 2001
+From: Bogdan Purcareata <bogdan.purcareata@freescale.com>
+Date: Fri, 20 Nov 2015 10:02:09 +0000
+Subject: [PATCH] disable sha1sum startup
+
+Signed-off-by: Bogdan Purcareata <bogdan.purcareata@freescale.com>
+---
+ utils/utils.go | 18 +-----------------
+ 1 file changed, 1 insertion(+), 17 deletions(-)
+
+diff --git a/utils/utils.go b/utils/utils.go
+index a17ab9d..3fc514a 100644
+--- a/utils/utils.go
++++ b/utils/utils.go
+@@ -2,8 +2,6 @@ package utils
+
+ import (
+ "bufio"
+- "crypto/sha1"
+- "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+@@ -42,20 +40,6 @@ func SelfPath() string {
+ return path
+ }
+
+-func dockerInitSha1(target string) string {
+- f, err := os.Open(target)
+- if err != nil {
+- return ""
+- }
+- defer f.Close()
+- h := sha1.New()
+- _, err = io.Copy(h, f)
+- if err != nil {
+- return ""
+- }
+- return hex.EncodeToString(h.Sum(nil))
+-}
+-
+ func isValidDockerInitPath(target string, selfPath string) bool { // target and selfPath should be absolute (InitPath and SelfPath already do this)
+ if target == "" {
+ return false
+@@ -77,7 +61,7 @@ func isValidDockerInitPath(target string, selfPath string) bool { // target and
+ }
+ return os.SameFile(targetFileInfo, selfPathFileInfo)
+ }
+- return dockerversion.INITSHA1 != "" && dockerInitSha1(target) == dockerversion.INITSHA1
++ return true
+ }
+
+ // DockerInitPath figures out the path of our dockerinit (which may be SelfPath())
+--
+1.9.1
+
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/files/docker.init b/import-layers/meta-virtualization/recipes-containers/docker/files/docker.init
new file mode 100644
index 0000000..9c01c75
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker/files/docker.init
@@ -0,0 +1,126 @@
+#!/bin/sh
+#
+# /etc/rc.d/init.d/docker
+#
+# Daemon for docker.com
+#
+# chkconfig: 2345 95 95
+# description: Daemon for docker.com
+
+### BEGIN INIT INFO
+# Provides: docker
+# Required-Start: $network cgconfig
+# Required-Stop:
+# Should-Start:
+# Should-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: start and stop docker
+# Description: Daemon for docker.com
+### END INIT INFO
+
+# Source function library.
+. /etc/init.d/functions
+
+prog="docker"
+unshare=/usr/bin/unshare
+exec="/usr/bin/$prog"
+pidfile="/var/run/$prog.pid"
+lockfile="/var/lock/subsys/$prog"
+logfile="/var/log/$prog"
+
+[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
+
+start() {
+ [ -x $exec ] || exit 5
+
+ check_for_cleanup
+
+ if ! [ -f $pidfile ]; then
+ printf "Starting $prog:\t"
+ echo "\n$(date)\n" >> $logfile
+ "$unshare" -m -- $exec -d $other_args &>> $logfile &
+ pid=$!
+ touch $lockfile
+ # wait up to 10 seconds for the pidfile to exist. see
+ # https://github.com/docker/docker/issues/5359
+ tries=0
+ while [ ! -f $pidfile -a $tries -lt 10 ]; do
+ sleep 1
+ tries=$((tries + 1))
+ done
+ success
+ echo
+ else
+ failure
+ echo
+ printf "$pidfile still exists...\n"
+ exit 7
+ fi
+}
+
+stop() {
+ echo -n $"Stopping $prog: "
+ killproc $prog
+ retval=$?
+ echo
+ [ $retval -eq 0 ] && rm -f $lockfile
+ return $retval
+}
+
+restart() {
+ stop
+ start
+}
+
+reload() {
+ restart
+}
+
+force_reload() {
+ restart
+}
+
+rh_status() {
+ status -p $pidfile $prog
+}
+
+rh_status_q() {
+ rh_status >/dev/null 2>&1
+}
+
+
+check_for_cleanup() {
+ if [ -f ${pidfile} ]; then
+ /bin/ps -fp $(cat ${pidfile}) > /dev/null || rm ${pidfile}
+ fi
+}
+
+case "$1" in
+ start)
+ $1
+ ;;
+ stop)
+ $1
+ ;;
+ restart)
+ $1
+ ;;
+ reload)
+ $1
+ ;;
+ force-reload)
+ force_reload
+ ;;
+ status)
+ status
+ ;;
+ condrestart|try-restart)
+ restart
+ ;;
+ *)
+ echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
+ exit 2
+esac
+
+exit $?
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/files/docker.service b/import-layers/meta-virtualization/recipes-containers/docker/files/docker.service
new file mode 100644
index 0000000..6801031
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker/files/docker.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=Docker Application Container Engine
+Documentation=http://docs.docker.com
+After=network.target docker.socket
+Requires=docker.socket
+
+[Service]
+ExecStart=/usr/bin/docker -d -H fd:// --registry-mirror=http://localhost:5000 --insecure-registry=http://localhost:5000
+MountFlags=slave
+LimitNOFILE=1048576
+LimitNPROC=1048576
+LimitCORE=infinity
+
+[Install]
+WantedBy=multi-user.target
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/files/hi.Dockerfile b/import-layers/meta-virtualization/recipes-containers/docker/files/hi.Dockerfile
new file mode 100644
index 0000000..9af6805
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker/files/hi.Dockerfile
@@ -0,0 +1,7 @@
+FROM debian
+
+MAINTAINER amy.fong@windriver.com
+
+RUN apt-get update && apt-get install figlet
+
+ENTRYPOINT [ "/usr/bin/figlet", "hi" ]
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/Generate-lxc-restore-net-properly.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/Generate-lxc-restore-net-properly.patch
new file mode 100644
index 0000000..5adb730
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/Generate-lxc-restore-net-properly.patch
@@ -0,0 +1,121 @@
+From e08f3573b3561f1f0490624f7ca95b7ccd8157cb Mon Sep 17 00:00:00 2001
+Message-Id: <e08f3573b3561f1f0490624f7ca95b7ccd8157cb.1435177418.git.Jim.Somerville@windriver.com>
+From: Jim Somerville <Jim.Somerville@windriver.com>
+Date: Wed, 24 Jun 2015 16:16:38 -0400
+Subject: [PATCH 1/1] Generate lxc-restore-net properly
+
+It's a script that should be run through the configure
+mechanism the same as the others. We simply rename it
+to have a .in extension and add it to configure.ac .
+
+Also, by generating the script from a .in file, it gets
+placed into the build directory. This plays nice with
+build systems that keep the src separate from the build
+directory. Without this change, the install step won't
+find the lxc-restore-net script as it still just resides
+in the src directory and not in the build directory.
+
+Upstream-Status: Not applicable. This script has already
+been rearchitected out of existence by
+cba98d127bf490b018a016b792ae05fd2d29c5ee:
+"c/r: use criu option instead of lxc-restore-net
+
+As of criu 1.5, the --veth-pair argument supports an additional parameter that
+is the bridge name to attach to. This enables us to get rid of the goofy
+action-script hack that passed bridge names as environment variables.
+
+This patch is on top of the systemd/lxcfs mount rework patch, as we probably
+want to wait to use 1.5 options until it has been out for a while and is in
+distros.
+
+Signed-off-by: Tycho Andersen <tycho.andersen@canonical.com>
+Acked-by: Serge E. Hallyn <serge.hallyn@ubuntu.com>"
+
+Signed-off-by: Jim Somerville <Jim.Somerville@windriver.com>
+---
+ configure.ac | 1 +
+ src/lxc/lxc-restore-net | 26 --------------------------
+ src/lxc/lxc-restore-net.in | 26 ++++++++++++++++++++++++++
+ 3 files changed, 27 insertions(+), 26 deletions(-)
+ delete mode 100755 src/lxc/lxc-restore-net
+ create mode 100755 src/lxc/lxc-restore-net.in
+
+diff --git a/configure.ac b/configure.ac
+index 574b2cd..4972803 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -768,6 +768,7 @@ AC_CONFIG_FILES([
+ src/lxc/legacy/lxc-ls
+ src/lxc/lxc.functions
+ src/lxc/version.h
++ src/lxc/lxc-restore-net
+ src/python-lxc/Makefile
+ src/python-lxc/setup.py
+
+diff --git a/src/lxc/lxc-restore-net b/src/lxc/lxc-restore-net
+deleted file mode 100755
+index 6ae3c19..0000000
+--- a/src/lxc/lxc-restore-net
++++ /dev/null
+@@ -1,26 +0,0 @@
+-#!/bin/sh
+-
+-set -e
+-
+-i=0
+-while true; do
+- eval "bridge=\$LXC_CRIU_BRIDGE$i"
+- eval "veth=\$LXC_CRIU_VETH$i"
+-
+- if [ -z "$bridge" ] || [ -z "$veth" ]; then
+- exit 0
+- fi
+-
+- if [ "$CRTOOLS_SCRIPT_ACTION" = "network-lock" ]; then
+- brctl delif $bridge $veth
+- fi
+-
+- if [ "$CRTOOLS_SCRIPT_ACTION" = "network-unlock" ]; then
+- brctl addif $bridge $veth
+- ip link set dev $veth up
+- fi
+-
+- i=$((i+1))
+-done
+-
+-exit 1
+diff --git a/src/lxc/lxc-restore-net.in b/src/lxc/lxc-restore-net.in
+new file mode 100755
+index 0000000..6ae3c19
+--- /dev/null
++++ b/src/lxc/lxc-restore-net.in
+@@ -0,0 +1,26 @@
++#!/bin/sh
++
++set -e
++
++i=0
++while true; do
++ eval "bridge=\$LXC_CRIU_BRIDGE$i"
++ eval "veth=\$LXC_CRIU_VETH$i"
++
++ if [ -z "$bridge" ] || [ -z "$veth" ]; then
++ exit 0
++ fi
++
++ if [ "$CRTOOLS_SCRIPT_ACTION" = "network-lock" ]; then
++ brctl delif $bridge $veth
++ fi
++
++ if [ "$CRTOOLS_SCRIPT_ACTION" = "network-unlock" ]; then
++ brctl addif $bridge $veth
++ ip link set dev $veth up
++ fi
++
++ i=$((i+1))
++done
++
++exit 1
+--
+1.8.3.2
+
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/automake-ensure-VPATH-builds-correctly.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/automake-ensure-VPATH-builds-correctly.patch
new file mode 100644
index 0000000..2b5c853
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/automake-ensure-VPATH-builds-correctly.patch
@@ -0,0 +1,26 @@
+From fe23085d9a40d6d78387d9ce8ddb65785fe8d6e5 Mon Sep 17 00:00:00 2001
+From: Bogdan Purcareata <bogdan.purcareata@freescale.com>
+Date: Thu, 2 Oct 2014 18:31:50 -0400
+Subject: [PATCH] automake: ensure VPATH builds correctly
+
+Signed-off-by: Bogdan Purcareata <bogdan.purcareata@freescale.com>
+---
+ src/tests/Makefile.am | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/tests/Makefile.am b/src/tests/Makefile.am
+index d74c10d..6225f78 100644
+--- a/src/tests/Makefile.am
++++ b/src/tests/Makefile.am
+@@ -66,7 +66,7 @@ buildtest-TESTS: $(TESTS)
+ install-ptest:
+ install -d $(TEST_DIR)
+ install -D ../lxc/liblxc.so $(TEST_DIR)/../lxc/liblxc.so
+- install -D ../../config/test-driver $(TEST_DIR)/../../config/test-driver
++ install -D $(top_srcdir)/config/test-driver $(TEST_DIR)/../../config/test-driver
+ cp Makefile $(TEST_DIR)
+ @(for file in $(TESTS); do install $$file $(TEST_DIR); done;)
+ sed -i 's|^Makefile:|_Makefile:|' $(TEST_DIR)/Makefile
+--
+1.7.10.4
+
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/logs-optionally-use-base-filenames-to-report-src-fil.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/logs-optionally-use-base-filenames-to-report-src-fil.patch
new file mode 100644
index 0000000..583b6f1
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/logs-optionally-use-base-filenames-to-report-src-fil.patch
@@ -0,0 +1,70 @@
+From 4729d0f4c4d1dacd150ddfd7061dda875eb94e34 Mon Sep 17 00:00:00 2001
+Message-Id: <4729d0f4c4d1dacd150ddfd7061dda875eb94e34.1443216870.git.Jim.Somerville@windriver.com>
+From: Jim Somerville <Jim.Somerville@windriver.com>
+Date: Fri, 25 Sep 2015 15:08:17 -0400
+Subject: [PATCH 1/1] logs: optionally use base filenames to report src files
+
+Problem: Logs are nice in that they report the source file,
+routine, and line number where an issue occurs. But the
+file is printed as the absolute filename. Users do not
+need to see a long spew of path directory names where the package
+was built. It just confuses things.
+
+Solution: Optionally chop off all leading directories so that just
+the source filename ie. basename is printed. This is done by
+setting a #ifdef LXC_LOG_USE_BASENAME check in the code. That
+define is done via the optional --enable-log-src-basename provided
+at configure time.
+
+Using __BASE_FILE__ instead of __FILE__ did not work. It
+refers to the file name as presented to the compile
+machinery, and that may still be the absolute pathname to
+the file.
+
+Signed-off-by: Jim Somerville <Jim.Somerville@windriver.com>
+---
+ configure.ac | 9 +++++++++
+ src/lxc/log.h | 5 +++++
+ 2 files changed, 14 insertions(+)
+
+diff --git a/configure.ac b/configure.ac
+index deba90b..c1ed67b 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -357,6 +357,15 @@ AM_COND_IF([ENABLE_PYTHON],
+ PKG_CHECK_MODULES([PYTHONDEV], [python3 >= 3.2],[],[AC_MSG_ERROR([You must install python3-dev])])
+ AC_DEFINE_UNQUOTED([ENABLE_PYTHON], 1, [Python3 is available])])
+
++# Enable basenames in the logs for source files
++AC_ARG_ENABLE([log-src-basename],
++ [AC_HELP_STRING([--enable-log-src-basename], [Use the shorter source file basename in the logs [default=no]])],
++ [], [enable_log_src_basename=no])
++
++if test "x$enable_log_src_basename" = "xyes"; then
++ AC_DEFINE([LXC_LOG_USE_BASENAME], 1, [Enabling shorter src filenames in the logs])
++fi
++
+ # Enable dumping stack traces
+ AC_ARG_ENABLE([mutex-debugging],
+ [AC_HELP_STRING([--enable-mutex-debugging], [Makes mutexes to report error and provide stack trace [default=no]])],
+diff --git a/src/lxc/log.h b/src/lxc/log.h
+index 76bd4df..4365977 100644
+--- a/src/lxc/log.h
++++ b/src/lxc/log.h
+@@ -74,8 +74,13 @@ struct lxc_log_locinfo {
+ int line;
+ };
+
++#ifdef LXC_LOG_USE_BASENAME
++#define LXC_LOG_LOCINFO_INIT \
++ { .file = (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__), .func = __func__, .line = __LINE__ }
++#else
+ #define LXC_LOG_LOCINFO_INIT \
+ { .file = __FILE__, .func = __func__, .line = __LINE__ }
++#endif
+
+ /* brief logging event object */
+ struct lxc_log_event {
+--
+1.8.3.2
+
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/lxc-1.0.0-disable-udhcp-from-busybox-template.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/lxc-1.0.0-disable-udhcp-from-busybox-template.patch
new file mode 100644
index 0000000..723be27
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/lxc-1.0.0-disable-udhcp-from-busybox-template.patch
@@ -0,0 +1,24 @@
+From d7e07e7acb1cbad33806f49143a2a30b4468c369 Mon Sep 17 00:00:00 2001
+From: Bogdan Purcareata <bogdan.purcareata@freescale.com>
+Date: Mon, 8 Apr 2013 18:30:19 +0300
+Subject: [PATCH] lxc-0.9.0-disable-udhcp-from-busybox-template
+
+---
+ templates/lxc-busybox.in | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/templates/lxc-busybox.in b/templates/lxc-busybox.in
+index cb425ec..bb8c951 100644
+--- a/templates/lxc-busybox.in
++++ b/templates/lxc-busybox.in
+@@ -84,7 +84,6 @@ EOF
+ #!/bin/sh
+ /bin/syslogd
+ /bin/mount -a
+-/bin/udhcpc
+ EOF
+
+ # executable
+--
+1.7.11.7
+
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/lxc-doc-upgrade-to-use-docbook-3.1-DTD.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/lxc-doc-upgrade-to-use-docbook-3.1-DTD.patch
new file mode 100644
index 0000000..5f9d771
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/lxc-doc-upgrade-to-use-docbook-3.1-DTD.patch
@@ -0,0 +1,32 @@
+From 4cf207ffd64c6f815e62ecbbf25b5a378e707182 Mon Sep 17 00:00:00 2001
+Message-Id: <4cf207ffd64c6f815e62ecbbf25b5a378e707182.1439319694.git.Jim.Somerville@windriver.com>
+From: Jim Somerville <Jim.Somerville@windriver.com>
+Date: Tue, 11 Aug 2015 14:05:00 -0400
+Subject: [PATCH 1/1] lxc: doc: upgrade to use docbook 3.1 DTD
+
+docbook2man fails to build the man pages in poky
+due to missing the ancient Davenport 3.0 DTD.
+Poky meta has the Oasis 3.1 version so upgrade
+to use that instead.
+
+Signed-off-by: Jim Somerville <Jim.Somerville@windriver.com>
+---
+ configure.ac | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/configure.ac b/configure.ac
+index 4972803..2e67b5e 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -179,7 +179,7 @@ AM_CONDITIONAL([ENABLE_DOCBOOK], [test "x$db2xman" != "x"])
+ AM_CONDITIONAL([USE_DOCBOOK2X], [test "x$db2xman" != "xdocbook2man"])
+
+ if test "x$db2xman" = "xdocbook2man"; then
+- docdtd="\"-//Davenport//DTD DocBook V3.0//EN\""
++ docdtd="\"-//OASIS//DTD DocBook V3.1//EN\""
+ else
+ docdtd="\"-//OASIS//DTD DocBook XML\" \"http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd\""
+ fi
+--
+1.8.3.2
+
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/lxc-fix-B-S.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/lxc-fix-B-S.patch
new file mode 100644
index 0000000..a776b4f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/lxc-fix-B-S.patch
@@ -0,0 +1,16 @@
+Index: lxc-2.0.0/config/init/upstart/Makefile.am
+===================================================================
+--- lxc-2.0.0.orig/config/init/upstart/Makefile.am
++++ lxc-2.0.0/config/init/upstart/Makefile.am
+@@ -3,9 +3,9 @@
+ if INIT_SCRIPT_UPSTART
+ install-upstart: lxc.conf lxc-instance.conf lxc-net.conf
+ $(MKDIR_P) $(DESTDIR)$(sysconfdir)/init/
+- $(INSTALL_DATA) lxc.conf $(DESTDIR)$(sysconfdir)/init/
++ $(INSTALL_DATA) $(srcdir)/lxc.conf $(DESTDIR)$(sysconfdir)/init/
+ $(INSTALL_DATA) $(srcdir)/lxc-instance.conf $(DESTDIR)$(sysconfdir)/init/
+- $(INSTALL_DATA) lxc-net.conf $(DESTDIR)$(sysconfdir)/init/
++ $(INSTALL_DATA) $(srcdir)/lxc-net.conf $(DESTDIR)$(sysconfdir)/init/
+
+ uninstall-upstart:
+ rm -f $(DESTDIR)$(sysconfdir)/init/lxc.conf
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/run-ptest b/import-layers/meta-virtualization/recipes-containers/lxc/files/run-ptest
new file mode 100644
index 0000000..23a6256
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/run-ptest
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+make -C src/tests -k check-TESTS
+
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/runtest.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/runtest.patch
new file mode 100644
index 0000000..e4e034b
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/runtest.patch
@@ -0,0 +1,32 @@
+Add install-ptest rule.
+
+Signed-off-by: Mihaela Sendrea <mihaela.sendrea@enea.com>
+Upstream-status: Pending
+
+diff -uNr a/src/tests/Makefile.am b/src/tests/Makefile.am
+--- a/src/tests/Makefile.am 2014-04-07 16:25:59.246238815 +0300
++++ b/src/tests/Makefile.am 2014-04-10 18:09:43.195772467 +0300
+@@ -54,6 +54,23 @@
+
+ endif
+
++TESTS = lxc-test-containertests lxc-test-locktests \
++ lxc-test-getkeys lxc-test-lxcpath lxc-test-cgpath lxc-test-console \
++ lxc-test-snapshot lxc-test-concurrent lxc-test-may-control \
++ lxc-test-reboot lxc-test-list lxc-test-attach lxc-test-device-add-remove
++
++buildtest-TESTS: $(TESTS)
++
++install-ptest:
++ install -d $(TEST_DIR)
++ install -D ../lxc/liblxc.so $(TEST_DIR)/../lxc/liblxc.so
++ install -D ../../config/test-driver $(TEST_DIR)/../../config/test-driver
++ cp Makefile $(TEST_DIR)
++ @(for file in $(TESTS); do install $$file $(TEST_DIR); done;)
++ sed -i 's|^Makefile:|_Makefile:|' $(TEST_DIR)/Makefile
++ sed -i 's|^all-am:|_all-am:|' $(TEST_DIR)/Makefile
++ sed -i -e 's|^\(.*\.log:\) \(.*EXEEXT.*\)|\1|g' $(TEST_DIR)/Makefile
++
+ EXTRA_DIST = \
+ cgpath.c \
+ clonetest.c \
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/lxc_2.0.0.bb b/import-layers/meta-virtualization/recipes-containers/lxc/lxc_2.0.0.bb
new file mode 100644
index 0000000..34aab38
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/lxc_2.0.0.bb
@@ -0,0 +1,168 @@
+DESCRIPTION = "lxc aims to use these new functionnalities to provide an userspace container object"
+SECTION = "console/utils"
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c"
+PRIORITY = "optional"
+DEPENDS = "libxml2 libcap"
+RDEPENDS_${PN} = " \
+ rsync \
+ gzip \
+ libcap-bin \
+ bridge-utils \
+ dnsmasq \
+ perl-module-strict \
+ perl-module-getopt-long \
+ perl-module-vars \
+ perl-module-warnings-register \
+ perl-module-exporter \
+ perl-module-constant \
+ perl-module-overload \
+ perl-module-exporter-heavy \
+"
+RDEPENDS_${PN}-ptest += "file make"
+
+SRC_URI = "http://linuxcontainers.org/downloads/${BPN}-${PV}.tar.gz \
+ file://lxc-1.0.0-disable-udhcp-from-busybox-template.patch \
+ file://runtest.patch \
+ file://run-ptest \
+ file://automake-ensure-VPATH-builds-correctly.patch \
+ file://lxc-fix-B-S.patch \
+ file://lxc-doc-upgrade-to-use-docbook-3.1-DTD.patch \
+ file://logs-optionally-use-base-filenames-to-report-src-fil.patch \
+ "
+
+SRC_URI[md5sum] = "04a7245a614cd3296b0ae9ceeeb83fbb"
+SRC_URI[sha256sum] = "5b737e114d8ef1feb193fba936d77a5697a7c8a10199a068cdd90d1bd27c10e4"
+
+S = "${WORKDIR}/${BPN}-${PV}"
+
+# Let's not configure for the host distro.
+#
+PTEST_CONF = "${@base_contains('DISTRO_FEATURES', 'ptest', '--enable-tests', '', d)}"
+EXTRA_OECONF += "--with-distro=${DISTRO} ${PTEST_CONF}"
+
+EXTRA_OECONF += "--with-init-script=\
+${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'sysvinit,', '', d)}\
+${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'systemd', '', d)}"
+
+EXTRA_OECONF += "--enable-log-src-basename"
+
+PACKAGECONFIG ??= "templates \
+ ${@base_contains('DISTRO_FEATURES', 'selinux', 'selinux', '', d)} \
+"
+PACKAGECONFIG[doc] = "--enable-doc --enable-api-docs,--disable-doc --disable-api-docs,,"
+PACKAGECONFIG[rpath] = "--enable-rpath,--disable-rpath,,"
+PACKAGECONFIG[apparmour] = "--enable-apparmor,--disable-apparmor,apparmor,apparmor"
+PACKAGECONFIG[templates] = ",,, ${PN}-templates"
+PACKAGECONFIG[selinux] = "--enable-selinux,--disable-selinux,libselinux,libselinux"
+PACKAGECONFIG[seccomp] ="--enable-seccomp,--disable-seccomp,libseccomp,libseccomp"
+PACKAGECONFIG[python] = "--enable-python,--disable-python,python3,python3-core"
+
+# required by python3 to run setup.py
+export BUILD_SYS
+export HOST_SYS
+export STAGING_INCDIR
+export STAGING_LIBDIR
+
+inherit autotools pkgconfig ptest update-rc.d systemd python3native
+
+SYSTEMD_PACKAGES = "${PN}-setup"
+SYSTEMD_SERVICE_${PN}-setup = "lxc.service"
+SYSTEMD_AUTO_ENABLE_${PN}-setup = "disable"
+
+INITSCRIPT_PACKAGES = "${PN}-setup"
+INITSCRIPT_NAME_{PN}-setup = "lxc"
+INITSCRIPT_PARAMS_${PN}-setup = "${OS_DEFAULT_INITSCRIPT_PARAMS}"
+
+FILES_${PN}-doc = "${mandir} ${infodir}"
+# For LXC the docdir only contains example configuration files and should be included in the lxc package
+FILES_${PN} += "${docdir}"
+FILES_${PN} += "${libdir}/python3*"
+FILES_${PN}-dbg += "${libexecdir}/lxc/.debug"
+PACKAGES =+ "${PN}-templates ${PN}-setup ${PN}-networking"
+FILES_${PN}-templates += "${datadir}/lxc/templates"
+RDEPENDS_${PN}-templates += "bash"
+
+ALLOW_EMPTY_${PN}-networking = "1"
+
+FILES_${PN}-setup += "/etc/tmpfiles.d"
+FILES_${PN}-setup += "/lib/systemd/system"
+FILES_${PN}-setup += "/usr/lib/systemd/system"
+FILES_${PN}-setup += "/etc/init.d"
+
+PRIVATE_LIBS_${PN}-ptest = "liblxc.so.1"
+
+CACHED_CONFIGUREVARS += " \
+ ac_cv_path_PYTHON='${STAGING_BINDIR_NATIVE}/python3-native/python3' \
+ am_cv_python_pyexecdir='${exec_prefix}/${libdir}/python3.5/site-packages' \
+ am_cv_python_pythondir='${prefix}/${libdir}/python3.5/site-packages' \
+"
+
+do_install_append() {
+ # The /var/cache/lxc directory created by the Makefile
+ # is wiped out in volatile, we need to create this at boot.
+ rm -rf ${D}${localstatedir}/cache
+ install -d ${D}${sysconfdir}/default/volatiles
+ echo "d root root 0755 ${localstatedir}/cache/lxc none" \
+ > ${D}${sysconfdir}/default/volatiles/99_lxc
+
+ for i in `grep -l "#! */bin/bash" ${D}${datadir}/lxc/hooks/*`; do \
+ sed -e 's|#! */bin/bash|#!/bin/sh|' -i $i; done
+
+ if ${@base_contains('DISTRO_FEATURES', 'sysvinit', 'true', 'false', d)}; then
+ install -d ${D}${sysconfdir}/init.d
+ install -m 755 config/init/sysvinit/lxc* ${D}${sysconfdir}/init.d
+ fi
+
+ # since python3-native is used for install location this will not be
+ # suitable for the target and we will have to correct the package install
+ if ${@bb.utils.contains('PACKAGECONFIG', 'python', 'true', 'false', d)}; then
+ if [ -d ${D}${exec_prefix}/lib/python* ]; then mv ${D}${exec_prefix}/lib/python* ${D}${libdir}/; fi
+ rmdir --ignore-fail-on-non-empty ${D}${exec_prefix}/lib
+ fi
+}
+
+EXTRA_OEMAKE += "TEST_DIR=${D}${PTEST_PATH}/src/tests"
+
+do_install_ptest() {
+ oe_runmake -C src/tests install-ptest
+}
+
+pkg_postinst_${PN}() {
+ if [ -z "$D" ] && [ -e /etc/init.d/populate-volatile.sh ] ; then
+ /etc/init.d/populate-volatile.sh update
+ fi
+}
+
+pkg_postinst_${PN}-networking() {
+ if [ "x$D" != "x" ]; then
+ exit 1
+ fi
+
+ # setup for our bridge
+ echo "lxc.network.link=lxcbr0" >> ${sysconfdir}/lxc/default.conf
+
+cat >> /etc/network/interfaces << EOF
+
+auto lxcbr0
+iface lxcbr0 inet dhcp
+ bridge_ports eth0
+ bridge_fd 0
+ bridge_maxwait 0
+EOF
+
+cat<<EOF>/etc/network/if-pre-up.d/lxcbr0
+#! /bin/sh
+
+if test "x\$IFACE" = xlxcbr0 ; then
+ brctl show |grep lxcbr0 > /dev/null 2>/dev/null
+ if [ \$? != 0 ] ; then
+ brctl addbr lxcbr0
+ brctl addif lxcbr0 eth0
+ ip addr flush eth0
+ ifconfig eth0 up
+ fi
+fi
+EOF
+chmod 755 /etc/network/if-pre-up.d/lxcbr0
+}
diff --git a/import-layers/meta-virtualization/recipes-core/base-files/base-files_3.%.bbappend b/import-layers/meta-virtualization/recipes-core/base-files/base-files_3.%.bbappend
new file mode 100644
index 0000000..eb973ad
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-core/base-files/base-files_3.%.bbappend
@@ -0,0 +1,5 @@
+do_install_append() {
+ if echo "${DISTRO_FEATURES}" | grep -q 'xen'; then
+ echo "xenfs /proc/xen xenfs defaults 0 0" >> ${D}${sysconfdir}/fstab
+ fi
+}
diff --git a/import-layers/meta-virtualization/recipes-core/busybox/busybox/getopt.cfg b/import-layers/meta-virtualization/recipes-core/busybox/busybox/getopt.cfg
new file mode 100644
index 0000000..8dcd350
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-core/busybox/busybox/getopt.cfg
@@ -0,0 +1,2 @@
+CONFIG_GETOPT=y
+CONFIG_FEATURE_GETOPT_LONG=y
diff --git a/import-layers/meta-virtualization/recipes-core/busybox/busybox/lspci.cfg b/import-layers/meta-virtualization/recipes-core/busybox/busybox/lspci.cfg
new file mode 100644
index 0000000..6458af8
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-core/busybox/busybox/lspci.cfg
@@ -0,0 +1 @@
+CONFIG_LSPCI=y
diff --git a/import-layers/meta-virtualization/recipes-core/busybox/busybox/lsusb.cfg b/import-layers/meta-virtualization/recipes-core/busybox/busybox/lsusb.cfg
new file mode 100644
index 0000000..2aba6ef
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-core/busybox/busybox/lsusb.cfg
@@ -0,0 +1 @@
+CONFIG_LSUSB=y
diff --git a/import-layers/meta-virtualization/recipes-core/busybox/busybox/mdev.cfg b/import-layers/meta-virtualization/recipes-core/busybox/busybox/mdev.cfg
new file mode 100644
index 0000000..f8d6da8
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-core/busybox/busybox/mdev.cfg
@@ -0,0 +1,6 @@
+CONFIG_MDEV=y
+CONFIG_FEATURE_MDEV_CONF=y
+CONFIG_FEATURE_MDEV_RENAME=y
+CONFIG_FEATURE_MDEV_RENAME_REGEXP=y
+CONFIG_FEATURE_MDEV_EXEC=y
+CONFIG_FEATURE_MDEV_LOAD_FIRMWARE=y
diff --git a/import-layers/meta-virtualization/recipes-core/busybox/busybox/mount-cifs.cfg b/import-layers/meta-virtualization/recipes-core/busybox/busybox/mount-cifs.cfg
new file mode 100644
index 0000000..88f0404
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-core/busybox/busybox/mount-cifs.cfg
@@ -0,0 +1 @@
+CONFIG_FEATURE_MOUNT_CIFS=y
diff --git a/import-layers/meta-virtualization/recipes-core/busybox/busybox/ps-extras.cfg b/import-layers/meta-virtualization/recipes-core/busybox/busybox/ps-extras.cfg
new file mode 100644
index 0000000..7434635
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-core/busybox/busybox/ps-extras.cfg
@@ -0,0 +1,3 @@
+CONFIG_DESKTOP=y
+CONFIG_FEATURE_PS_TIME=y
+CONFIG_FEATURE_PS_ADDITIONAL_COLUMNS=y
diff --git a/import-layers/meta-virtualization/recipes-core/busybox/busybox_%.bbappend b/import-layers/meta-virtualization/recipes-core/busybox/busybox_%.bbappend
new file mode 100644
index 0000000..8369a03
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-core/busybox/busybox_%.bbappend
@@ -0,0 +1,10 @@
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
+
+SRC_URI += " \
+ file://lspci.cfg \
+ file://lsusb.cfg \
+ file://mdev.cfg \
+ file://mount-cifs.cfg \
+ file://ps-extras.cfg \
+ file://getopt.cfg \
+ "
diff --git a/import-layers/meta-virtualization/recipes-core/initscripts/initscripts_1.%.bbappend b/import-layers/meta-virtualization/recipes-core/initscripts/initscripts_1.%.bbappend
new file mode 100644
index 0000000..0600ab2
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-core/initscripts/initscripts_1.%.bbappend
@@ -0,0 +1,16 @@
+do_install_append() {
+
+ echo >> ${D}${sysconfdir}/init.d/functions
+ echo success \(\) \{ >> ${D}${sysconfdir}/init.d/functions
+ echo \ \ \ \ echo \$* >> ${D}${sysconfdir}/init.d/functions
+ echo \} >> ${D}${sysconfdir}/init.d/functions
+ echo failure \(\) \{ >> ${D}${sysconfdir}/init.d/functions
+ echo \ \ \ \ echo \$* >> ${D}${sysconfdir}/init.d/functions
+ echo \} >> ${D}${sysconfdir}/init.d/functions
+ echo warning \(\) \{ >> ${D}${sysconfdir}/init.d/functions
+ echo \ \ \ \ echo \$* >> ${D}${sysconfdir}/init.d/functions
+ echo \} >> ${D}${sysconfdir}/init.d/functions
+ echo begin \(\) \{ >> ${D}${sysconfdir}/init.d/functions
+ echo \ \ \ \ echo \$* >> ${D}${sysconfdir}/init.d/functions
+ echo \} >> ${D}${sysconfdir}/init.d/functions
+}
diff --git a/import-layers/meta-virtualization/recipes-core/sysvinit/sysvinit-inittab_2.%.bbappend b/import-layers/meta-virtualization/recipes-core/sysvinit/sysvinit-inittab_2.%.bbappend
new file mode 100644
index 0000000..1b89aec
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-core/sysvinit/sysvinit-inittab_2.%.bbappend
@@ -0,0 +1,6 @@
+do_install_append() {
+ if echo "${DISTRO_FEATURES}" | grep -q 'xen'; then
+ echo "" >> ${D}${sysconfdir}/inittab
+ echo "X0:12345:respawn:/sbin/getty 115200 hvc0" >> ${D}${sysconfdir}/inittab
+ fi
+}
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.4.inc b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.4.inc
new file mode 100644
index 0000000..a65459f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.4.inc
@@ -0,0 +1,15 @@
+require go-common.inc
+
+PV = "1.4.3"
+GO_BASEVERSION = "1.4"
+FILESEXTRAPATHS_prepend := "${FILE_DIRNAME}/go-${GO_BASEVERSION}:"
+
+SRC_URI += "\
+ file://016-armhf-elf-header.patch \
+ file://go-cross-backport-cmd-link-support-new-386-amd64-rel.patch \
+ file://syslog.patch \
+"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=591778525c869cdde0ab5a1bf283cd81"
+SRC_URI[md5sum] = "dfb604511115dd402a77a553a5923a04"
+SRC_URI[sha256sum] = "9947fc705b0b841b5938c48b22dc33e9647ec0752bae66e50278df4f23f64959"
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.4/016-armhf-elf-header.patch b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.4/016-armhf-elf-header.patch
new file mode 100644
index 0000000..1ae53a3
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.4/016-armhf-elf-header.patch
@@ -0,0 +1,21 @@
+Description: Use correct ELF header for armhf binaries.
+Author: Adam Conrad <adconrad@ubuntu.com>
+Last-Update: 2013-07-08
+
+Index: go/src/cmd/ld/elf.c
+===================================================================
+--- go.orig/src/cmd/ld/elf.c 2015-02-20 10:49:58.763451586 -0800
++++ go/src/cmd/ld/elf.c 2015-02-20 10:49:27.895478521 -0800
+@@ -57,7 +57,11 @@
+ case '5':
+ // we use EABI on both linux/arm and freebsd/arm.
+ if(HEADTYPE == Hlinux || HEADTYPE == Hfreebsd)
+- hdr.flags = 0x5000002; // has entry point, Version5 EABI
++#ifdef __ARM_PCS_VFP
++ hdr.flags = 0x5000402; // has entry point, Version5 EABI, hard-float ABI
++#else
++ hdr.flags = 0x5000202; // has entry point, Version5 EABI, soft-float ABI
++#endif
+ // fallthrough
+ default:
+ hdr.phoff = ELF32HDRSIZE; /* Must be be ELF32HDRSIZE: first PHdr must follow ELF header */
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.4/go-cross-backport-cmd-link-support-new-386-amd64-rel.patch b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.4/go-cross-backport-cmd-link-support-new-386-amd64-rel.patch
new file mode 100644
index 0000000..de3f49c
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.4/go-cross-backport-cmd-link-support-new-386-amd64-rel.patch
@@ -0,0 +1,223 @@
+From d6eefad445831c161fca130f9bdf7b3848aac23c Mon Sep 17 00:00:00 2001
+From: Paul Gortmaker <paul.gortmaker@windriver.com>
+Date: Tue, 29 Mar 2016 21:14:33 -0400
+Subject: [PATCH] go-cross: backport "cmd/link: support new 386/amd64
+ relocations"
+
+Newer binutils won't support building older go-1.4.3 as per:
+
+https://github.com/golang/go/issues/13114
+
+Upstream commit 914db9f060b1fd3eb1f74d48f3bd46a73d4ae9c7 (see subj)
+was identified as the fix and nominated for 1.4.4 but that release
+never happened. The paths in 1.4.3 aren't the same as go1.6beta1~662
+where this commit appeared, but the NetBSD folks indicated what a
+1.4.3 backport would look like here: https://gnats.netbsd.org/50777
+
+This is based on that, but without the BSD wrapper infrastructure
+layer that makes things look like patches of patches.
+
+Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
+
+diff --git a/src/cmd/6l/asm.c b/src/cmd/6l/asm.c
+index 18b5aa311981..2e9d339aef87 100644
+--- a/src/cmd/6l/asm.c
++++ b/src/cmd/6l/asm.c
+@@ -118,6 +118,8 @@ adddynrel(LSym *s, Reloc *r)
+ return;
+
+ case 256 + R_X86_64_GOTPCREL:
++ case 256 + R_X86_64_GOTPCRELX:
++ case 256 + R_X86_64_REX_GOTPCRELX:
+ if(targ->type != SDYNIMPORT) {
+ // have symbol
+ if(r->off >= 2 && s->p[r->off-2] == 0x8b) {
+diff --git a/src/cmd/8l/asm.c b/src/cmd/8l/asm.c
+index 98c04240374f..cff29488e8af 100644
+--- a/src/cmd/8l/asm.c
++++ b/src/cmd/8l/asm.c
+@@ -115,6 +115,7 @@ adddynrel(LSym *s, Reloc *r)
+ return;
+
+ case 256 + R_386_GOT32:
++ case 256 + R_386_GOT32X:
+ if(targ->type != SDYNIMPORT) {
+ // have symbol
+ if(r->off >= 2 && s->p[r->off-2] == 0x8b) {
+diff --git a/src/cmd/ld/elf.h b/src/cmd/ld/elf.h
+index e84d996f2596..bbf2cfaa3cc0 100644
+--- a/src/cmd/ld/elf.h
++++ b/src/cmd/ld/elf.h
+@@ -478,32 +478,47 @@ typedef struct {
+ * Relocation types.
+ */
+
+-#define R_X86_64_NONE 0 /* No relocation. */
+-#define R_X86_64_64 1 /* Add 64 bit symbol value. */
+-#define R_X86_64_PC32 2 /* PC-relative 32 bit signed sym value. */
+-#define R_X86_64_GOT32 3 /* PC-relative 32 bit GOT offset. */
+-#define R_X86_64_PLT32 4 /* PC-relative 32 bit PLT offset. */
+-#define R_X86_64_COPY 5 /* Copy data from shared object. */
+-#define R_X86_64_GLOB_DAT 6 /* Set GOT entry to data address. */
+-#define R_X86_64_JMP_SLOT 7 /* Set GOT entry to code address. */
+-#define R_X86_64_RELATIVE 8 /* Add load address of shared object. */
+-#define R_X86_64_GOTPCREL 9 /* Add 32 bit signed pcrel offset to GOT. */
+-#define R_X86_64_32 10 /* Add 32 bit zero extended symbol value */
+-#define R_X86_64_32S 11 /* Add 32 bit sign extended symbol value */
+-#define R_X86_64_16 12 /* Add 16 bit zero extended symbol value */
+-#define R_X86_64_PC16 13 /* Add 16 bit signed extended pc relative symbol value */
+-#define R_X86_64_8 14 /* Add 8 bit zero extended symbol value */
+-#define R_X86_64_PC8 15 /* Add 8 bit signed extended pc relative symbol value */
+-#define R_X86_64_DTPMOD64 16 /* ID of module containing symbol */
+-#define R_X86_64_DTPOFF64 17 /* Offset in TLS block */
+-#define R_X86_64_TPOFF64 18 /* Offset in static TLS block */
+-#define R_X86_64_TLSGD 19 /* PC relative offset to GD GOT entry */
+-#define R_X86_64_TLSLD 20 /* PC relative offset to LD GOT entry */
+-#define R_X86_64_DTPOFF32 21 /* Offset in TLS block */
+-#define R_X86_64_GOTTPOFF 22 /* PC relative offset to IE GOT entry */
+-#define R_X86_64_TPOFF32 23 /* Offset in static TLS block */
+-
+-#define R_X86_64_COUNT 24 /* Count of defined relocation types. */
++#define R_X86_64_NONE 0
++#define R_X86_64_64 1
++#define R_X86_64_PC32 2
++#define R_X86_64_GOT32 3
++#define R_X86_64_PLT32 4
++#define R_X86_64_COPY 5
++#define R_X86_64_GLOB_DAT 6
++#define R_X86_64_JMP_SLOT 7
++#define R_X86_64_RELATIVE 8
++#define R_X86_64_GOTPCREL 9
++#define R_X86_64_32 10
++#define R_X86_64_32S 11
++#define R_X86_64_16 12
++#define R_X86_64_PC16 13
++#define R_X86_64_8 14
++#define R_X86_64_PC8 15
++#define R_X86_64_DTPMOD64 16
++#define R_X86_64_DTPOFF64 17
++#define R_X86_64_TPOFF64 18
++#define R_X86_64_TLSGD 19
++#define R_X86_64_TLSLD 20
++#define R_X86_64_DTPOFF32 21
++#define R_X86_64_GOTTPOFF 22
++#define R_X86_64_TPOFF32 23
++#define R_X86_64_PC64 24
++#define R_X86_64_GOTOFF64 25
++#define R_X86_64_GOTPC32 26
++#define R_X86_64_GOT64 27
++#define R_X86_64_GOTPCREL64 28
++#define R_X86_64_GOTPC64 29
++#define R_X86_64_GOTPLT64 30
++#define R_X86_64_PLTOFF64 31
++#define R_X86_64_SIZE32 32
++#define R_X86_64_SIZE64 33
++#define R_X86_64_GOTPC32_TLSDEC 34
++#define R_X86_64_TLSDESC_CALL 35
++#define R_X86_64_TLSDESC 36
++#define R_X86_64_IRELATIVE 37
++#define R_X86_64_PC32_BND 40
++#define R_X86_64_GOTPCRELX 41
++#define R_X86_64_REX_GOTPCRELX 42
+
+
+ #define R_ALPHA_NONE 0 /* No reloc */
+@@ -581,39 +596,42 @@ typedef struct {
+ #define R_ARM_COUNT 38 /* Count of defined relocation types. */
+
+
+-#define R_386_NONE 0 /* No relocation. */
+-#define R_386_32 1 /* Add symbol value. */
+-#define R_386_PC32 2 /* Add PC-relative symbol value. */
+-#define R_386_GOT32 3 /* Add PC-relative GOT offset. */
+-#define R_386_PLT32 4 /* Add PC-relative PLT offset. */
+-#define R_386_COPY 5 /* Copy data from shared object. */
+-#define R_386_GLOB_DAT 6 /* Set GOT entry to data address. */
+-#define R_386_JMP_SLOT 7 /* Set GOT entry to code address. */
+-#define R_386_RELATIVE 8 /* Add load address of shared object. */
+-#define R_386_GOTOFF 9 /* Add GOT-relative symbol address. */
+-#define R_386_GOTPC 10 /* Add PC-relative GOT table address. */
+-#define R_386_TLS_TPOFF 14 /* Negative offset in static TLS block */
+-#define R_386_TLS_IE 15 /* Absolute address of GOT for -ve static TLS */
+-#define R_386_TLS_GOTIE 16 /* GOT entry for negative static TLS block */
+-#define R_386_TLS_LE 17 /* Negative offset relative to static TLS */
+-#define R_386_TLS_GD 18 /* 32 bit offset to GOT (index,off) pair */
+-#define R_386_TLS_LDM 19 /* 32 bit offset to GOT (index,zero) pair */
+-#define R_386_TLS_GD_32 24 /* 32 bit offset to GOT (index,off) pair */
+-#define R_386_TLS_GD_PUSH 25 /* pushl instruction for Sun ABI GD sequence */
+-#define R_386_TLS_GD_CALL 26 /* call instruction for Sun ABI GD sequence */
+-#define R_386_TLS_GD_POP 27 /* popl instruction for Sun ABI GD sequence */
+-#define R_386_TLS_LDM_32 28 /* 32 bit offset to GOT (index,zero) pair */
+-#define R_386_TLS_LDM_PUSH 29 /* pushl instruction for Sun ABI LD sequence */
+-#define R_386_TLS_LDM_CALL 30 /* call instruction for Sun ABI LD sequence */
+-#define R_386_TLS_LDM_POP 31 /* popl instruction for Sun ABI LD sequence */
+-#define R_386_TLS_LDO_32 32 /* 32 bit offset from start of TLS block */
+-#define R_386_TLS_IE_32 33 /* 32 bit offset to GOT static TLS offset entry */
+-#define R_386_TLS_LE_32 34 /* 32 bit offset within static TLS block */
+-#define R_386_TLS_DTPMOD32 35 /* GOT entry containing TLS index */
+-#define R_386_TLS_DTPOFF32 36 /* GOT entry containing TLS offset */
+-#define R_386_TLS_TPOFF32 37 /* GOT entry of -ve static TLS offset */
+-
+-#define R_386_COUNT 38 /* Count of defined relocation types. */
++#define R_386_NONE 0
++#define R_386_32 1
++#define R_386_PC32 2
++#define R_386_GOT32 3
++#define R_386_PLT32 4
++#define R_386_COPY 5
++#define R_386_GLOB_DAT 6
++#define R_386_JMP_SLOT 7
++#define R_386_RELATIVE 8
++#define R_386_GOTOFF 9
++#define R_386_GOTPC 10
++#define R_386_TLS_TPOFF 14
++#define R_386_TLS_IE 15
++#define R_386_TLS_GOTIE 16
++#define R_386_TLS_LE 17
++#define R_386_TLS_GD 18
++#define R_386_TLS_LDM 19
++#define R_386_TLS_GD_32 24
++#define R_386_TLS_GD_PUSH 25
++#define R_386_TLS_GD_CALL 26
++#define R_386_TLS_GD_POP 27
++#define R_386_TLS_LDM_32 28
++#define R_386_TLS_LDM_PUSH 29
++#define R_386_TLS_LDM_CALL 30
++#define R_386_TLS_LDM_POP 31
++#define R_386_TLS_LDO_32 32
++#define R_386_TLS_IE_32 33
++#define R_386_TLS_LE_32 34
++#define R_386_TLS_DTPMOD32 35
++#define R_386_TLS_DTPOFF32 36
++#define R_386_TLS_TPOFF32 37
++#define R_386_TLS_GOTDESC 39
++#define R_386_TLS_DESC_CALL 40
++#define R_386_TLS_DESC 41
++#define R_386_IRELATIVE 42
++#define R_386_GOT32X 43
+
+ #define R_PPC_NONE 0 /* No relocation. */
+ #define R_PPC_ADDR32 1
+diff --git a/src/cmd/ld/ldelf.c b/src/cmd/ld/ldelf.c
+index dd5fa0d2a839..2e2fbd17377f 100644
+--- a/src/cmd/ld/ldelf.c
++++ b/src/cmd/ld/ldelf.c
+@@ -888,12 +888,15 @@ reltype(char *pn, int elftype, uchar *siz)
+ case R('6', R_X86_64_PC32):
+ case R('6', R_X86_64_PLT32):
+ case R('6', R_X86_64_GOTPCREL):
++ case R('6', R_X86_64_GOTPCRELX):
++ case R('6', R_X86_64_REX_GOTPCRELX):
+ case R('8', R_386_32):
+ case R('8', R_386_PC32):
+ case R('8', R_386_GOT32):
+ case R('8', R_386_PLT32):
+ case R('8', R_386_GOTOFF):
+ case R('8', R_386_GOTPC):
++ case R('8', R_386_GOT32X):
+ *siz = 4;
+ break;
+ case R('6', R_X86_64_64):
+--
+2.7.2
+
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.4/syslog.patch b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.4/syslog.patch
new file mode 100644
index 0000000..ce82a4f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.4/syslog.patch
@@ -0,0 +1,57 @@
+diff -r -u go/src/log/syslog/syslog.go /home/achang/GOCOPY/go/src/log/syslog/syslog.go
+--- go/src/log/syslog/syslog.go 2013-11-28 13:38:28.000000000 -0800
++++ /home/achang/GOCOPY/go/src/log/syslog/syslog.go 2014-10-03 11:44:37.710403200 -0700
+@@ -33,6 +33,9 @@
+ const severityMask = 0x07
+ const facilityMask = 0xf8
+
++var writeTimeout = 1 * time.Second
++var connectTimeout = 1 * time.Second
++
+ const (
+ // Severity.
+
+@@ -100,6 +103,7 @@
+ type serverConn interface {
+ writeString(p Priority, hostname, tag, s, nl string) error
+ close() error
++ setWriteDeadline(t time.Time) error
+ }
+
+ type netConn struct {
+@@ -273,7 +277,11 @@
+ nl = "\n"
+ }
+
+- err := w.conn.writeString(p, w.hostname, w.tag, msg, nl)
++ err := w.conn.setWriteDeadline(time.Now().Add(writeTimeout))
++ if err != nil {
++ return 0, err
++ }
++ err = w.conn.writeString(p, w.hostname, w.tag, msg, nl)
+ if err != nil {
+ return 0, err
+ }
+@@ -305,6 +313,10 @@
+ return n.conn.Close()
+ }
+
++func (n *netConn) setWriteDeadline(t time.Time) error {
++ return n.conn.SetWriteDeadline(t)
++}
++
+ // NewLogger creates a log.Logger whose output is written to
+ // the system log service with the specified priority. The logFlag
+ // argument is the flag set passed through to log.New to create
+diff -r -u go/src/log/syslog/syslog_unix.go /home/achang/GOCOPY/go/src/log/syslog/syslog_unix.go
+--- go/src/log/syslog/syslog_unix.go 2013-11-28 13:38:28.000000000 -0800
++++ /home/achang/GOCOPY/go/src/log/syslog/syslog_unix.go 2014-10-03 11:44:39.010403175 -0700
+@@ -19,7 +19,7 @@
+ logPaths := []string{"/dev/log", "/var/run/syslog"}
+ for _, network := range logTypes {
+ for _, path := range logPaths {
+- conn, err := net.Dial(network, path)
++ conn, err := net.DialTimeout(network, path, connectTimeout)
+ if err != nil {
+ continue
+ } else {
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5.inc b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5.inc
new file mode 100644
index 0000000..bb91ed8
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5.inc
@@ -0,0 +1,19 @@
+require go-common.inc
+
+PV = "1.5.2"
+GO_BASEVERSION = "1.5"
+FILESEXTRAPATHS_prepend := "${FILE_DIRNAME}/go-${GO_BASEVERSION}:"
+
+
+SRC_URI += "\
+ file://armhf-elf-header.patch \
+ file://syslog.patch \
+ file://fix-target-cc-for-build.patch \
+ file://fix-cc-handling.patch \
+ file://split-host-and-target-build.patch \
+ file://gotooldir.patch \
+"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=591778525c869cdde0ab5a1bf283cd81"
+SRC_URI[md5sum] = "38fed22e7b80672291e7cba7fb9c3475"
+SRC_URI[sha256sum] = "f3ddd624c00461641ce3d3a8d8e3c622392384ca7699e901b370a4eac5987a74"
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5/armhf-elf-header.patch b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5/armhf-elf-header.patch
new file mode 100644
index 0000000..f56869b
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5/armhf-elf-header.patch
@@ -0,0 +1,19 @@
+Index: go/src/cmd/link/internal/ld/elf.go
+===================================================================
+--- go.orig/src/cmd/link/internal/ld/elf.go 2015-07-29 13:05:25.952533140 -0700
++++ go/src/cmd/link/internal/ld/elf.go 2015-07-29 13:14:53.413112995 -0700
+@@ -780,7 +780,13 @@
+ // 32-bit architectures
+ case '5':
+ // we use EABI on both linux/arm and freebsd/arm.
+- if HEADTYPE == obj.Hlinux || HEADTYPE == obj.Hfreebsd {
++ if HEADTYPE == obj.Hlinux {
++ if Ctxt.Goarm == 7 {
++ ehdr.flags = 0x5000402 // has entry point, Version5 EABI, hard float
++ } else {
++ ehdr.flags = 0x5000202 // has entry point, Version5 EABI, soft float
++ }
++ } else if HEADTYPE == obj.Hfreebsd {
+ ehdr.flags = 0x5000002 // has entry point, Version5 EABI
+ }
+ fallthrough
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5/fix-cc-handling.patch b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5/fix-cc-handling.patch
new file mode 100644
index 0000000..85770a9
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5/fix-cc-handling.patch
@@ -0,0 +1,46 @@
+Index: go/src/cmd/go/build.go
+===================================================================
+--- go.orig/src/cmd/go/build.go 2015-07-29 14:48:40.323185807 -0700
++++ go/src/cmd/go/build.go 2015-07-30 07:37:40.529818586 -0700
+@@ -2805,12 +2805,24 @@
+ return b.ccompilerCmd("CC", defaultCC, objdir)
+ }
+
++// gccCmd returns a gcc command line prefix
++// defaultCC is defined in zdefaultcc.go, written by cmd/dist.
++func (b *builder) gccCmdForReal() []string {
++ return envList("CC", defaultCC)
++}
++
+ // gxxCmd returns a g++ command line prefix
+ // defaultCXX is defined in zdefaultcc.go, written by cmd/dist.
+ func (b *builder) gxxCmd(objdir string) []string {
+ return b.ccompilerCmd("CXX", defaultCXX, objdir)
+ }
+
++// gxxCmd returns a g++ command line prefix
++// defaultCXX is defined in zdefaultcc.go, written by cmd/dist.
++func (b *builder) gxxCmdForReal() []string {
++ return envList("CXX", defaultCXX)
++}
++
+ // ccompilerCmd returns a command line prefix for the given environment
+ // variable and using the default command when the variable is empty.
+ func (b *builder) ccompilerCmd(envvar, defcmd, objdir string) []string {
+Index: go/src/cmd/go/env.go
+===================================================================
+--- go.orig/src/cmd/go/env.go 2015-07-29 14:48:40.323185807 -0700
++++ go/src/cmd/go/env.go 2015-07-30 07:40:54.461655721 -0700
+@@ -52,10 +52,9 @@
+
+ if goos != "plan9" {
+ cmd := b.gccCmd(".")
+- env = append(env, envVar{"CC", cmd[0]})
++ env = append(env, envVar{"CC", strings.Join(b.gccCmdForReal(), " ")})
+ env = append(env, envVar{"GOGCCFLAGS", strings.Join(cmd[3:], " ")})
+- cmd = b.gxxCmd(".")
+- env = append(env, envVar{"CXX", cmd[0]})
++ env = append(env, envVar{"CXX", strings.Join(b.gxxCmdForReal(), " ")})
+ }
+
+ if buildContext.CgoEnabled {
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5/fix-target-cc-for-build.patch b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5/fix-target-cc-for-build.patch
new file mode 100644
index 0000000..adfeb6b
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5/fix-target-cc-for-build.patch
@@ -0,0 +1,13 @@
+Index: go/src/make.bash
+===================================================================
+--- go.orig/src/make.bash 2015-07-29 13:28:11.334031696 -0700
++++ go/src/make.bash 2015-07-29 13:36:55.814465630 -0700
+@@ -158,7 +158,7 @@
+ fi
+
+ echo "##### Building packages and commands for $GOOS/$GOARCH."
+-CC=$CC_FOR_TARGET "$GOTOOLDIR"/go_bootstrap install $GO_FLAGS -gcflags "$GO_GCFLAGS" -ldflags "$GO_LDFLAGS" -v std cmd
++CC="$CC_FOR_TARGET" "$GOTOOLDIR"/go_bootstrap install $GO_FLAGS -gcflags "$GO_GCFLAGS" -ldflags "$GO_LDFLAGS" -v std cmd
+ echo
+
+ rm -f "$GOTOOLDIR"/go_bootstrap
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5/gotooldir.patch b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5/gotooldir.patch
new file mode 100644
index 0000000..473a328
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5/gotooldir.patch
@@ -0,0 +1,26 @@
+Index: go/src/go/build/build.go
+===================================================================
+--- go.orig/src/go/build/build.go
++++ go/src/go/build/build.go
+@@ -1388,7 +1388,7 @@ func init() {
+ }
+
+ // ToolDir is the directory containing build tools.
+-var ToolDir = filepath.Join(runtime.GOROOT(), "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH)
++var ToolDir = envOr("GOTOOLDIR", filepath.Join(runtime.GOROOT(), "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH))
+
+ // IsLocalImport reports whether the import path is
+ // a local import path, like ".", "..", "./foo", or "../foo".
+Index: go/src/cmd/go/build.go
+===================================================================
+--- go.orig/src/cmd/go/build.go
++++ go/src/cmd/go/build.go
+@@ -1312,7 +1312,7 @@ func (b *builder) build(a *action) (err
+ }
+
+ cgoExe := tool("cgo")
+- if a.cgo != nil && a.cgo.target != "" {
++ if a.cgo != nil && a.cgo.target != "" && os.Getenv("GOTOOLDIR") == "" {
+ cgoExe = a.cgo.target
+ }
+ outGo, outObj, err := b.cgo(a.p, cgoExe, obj, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, cxxfiles, a.p.MFiles)
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5/split-host-and-target-build.patch b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5/split-host-and-target-build.patch
new file mode 100644
index 0000000..85fb240
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5/split-host-and-target-build.patch
@@ -0,0 +1,58 @@
+Index: go/src/make.bash
+===================================================================
+--- go.orig/src/make.bash
++++ go/src/make.bash
+@@ -143,12 +143,23 @@ if [ "$1" = "--no-clean" ]; then
+ buildall=""
+ shift
+ fi
+-./cmd/dist/dist bootstrap $buildall $GO_DISTFLAGS -v # builds go_bootstrap
+-# Delay move of dist tool to now, because bootstrap may clear tool directory.
+-mv cmd/dist/dist "$GOTOOLDIR"/dist
+-echo
+
+-if [ "$GOHOSTARCH" != "$GOARCH" -o "$GOHOSTOS" != "$GOOS" ]; then
++do_host_build="yes"
++do_target_build="yes"
++if [ "$1" = "--target-only" ]; then
++ do_host_build="no"
++ shift
++elif [ "$1" = "--host-only" ]; then
++ do_target_build="no"
++ shift
++fi
++
++if [ "$do_host_build" = "yes" ]; then
++ ./cmd/dist/dist bootstrap $buildall $GO_DISTFLAGS -v # builds go_bootstrap
++ # Delay move of dist tool to now, because bootstrap may clear tool directory.
++ mv cmd/dist/dist "$GOTOOLDIR"/dist
++ echo
++
+ echo "##### Building packages and commands for host, $GOHOSTOS/$GOHOSTARCH."
+ # CC_FOR_TARGET is recorded as the default compiler for the go tool. When building for the host, however,
+ # use the host compiler, CC, from `cmd/dist/dist env` instead.
+@@ -157,11 +168,20 @@ if [ "$GOHOSTARCH" != "$GOARCH" -o "$GOH
+ echo
+ fi
+
+-echo "##### Building packages and commands for $GOOS/$GOARCH."
+-CC="$CC_FOR_TARGET" "$GOTOOLDIR"/go_bootstrap install $GO_FLAGS -gcflags "$GO_GCFLAGS" -ldflags "$GO_LDFLAGS" -v std cmd
+-echo
++if [ "$do_target_build" = "yes" ]; then
++ GO_INSTALL="${GO_TARGET_INSTALL:-std cmd}"
++ echo "##### Building packages and commands for $GOOS/$GOARCH."
++ if [ "$GOHOSTOS" = "$GOOS" -a "$GOHOSTARCH" = "$GOARCH" -a "$do_host_build" = "yes" ]; then
++ rm -rf ./host-tools
++ mkdir ./host-tools
++ mv "$GOTOOLDIR"/* ./host-tools
++ GOTOOLDIR="$PWD/host-tools"
++ fi
++ GOTOOLDIR="$GOTOOLDIR" CC="$CC_FOR_TARGET" "$GOTOOLDIR"/go_bootstrap install $GO_FLAGS -gcflags "$GO_GCFLAGS" -ldflags "$GO_LDFLAGS" -v ${GO_INSTALL}
++ echo
+
+-rm -f "$GOTOOLDIR"/go_bootstrap
++ rm -f "$GOTOOLDIR"/go_bootstrap
++fi
+
+ if [ "$1" != "--no-banner" ]; then
+ "$GOTOOLDIR"/dist banner
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5/syslog.patch b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5/syslog.patch
new file mode 100644
index 0000000..ce82a4f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.5/syslog.patch
@@ -0,0 +1,57 @@
+diff -r -u go/src/log/syslog/syslog.go /home/achang/GOCOPY/go/src/log/syslog/syslog.go
+--- go/src/log/syslog/syslog.go 2013-11-28 13:38:28.000000000 -0800
++++ /home/achang/GOCOPY/go/src/log/syslog/syslog.go 2014-10-03 11:44:37.710403200 -0700
+@@ -33,6 +33,9 @@
+ const severityMask = 0x07
+ const facilityMask = 0xf8
+
++var writeTimeout = 1 * time.Second
++var connectTimeout = 1 * time.Second
++
+ const (
+ // Severity.
+
+@@ -100,6 +103,7 @@
+ type serverConn interface {
+ writeString(p Priority, hostname, tag, s, nl string) error
+ close() error
++ setWriteDeadline(t time.Time) error
+ }
+
+ type netConn struct {
+@@ -273,7 +277,11 @@
+ nl = "\n"
+ }
+
+- err := w.conn.writeString(p, w.hostname, w.tag, msg, nl)
++ err := w.conn.setWriteDeadline(time.Now().Add(writeTimeout))
++ if err != nil {
++ return 0, err
++ }
++ err = w.conn.writeString(p, w.hostname, w.tag, msg, nl)
+ if err != nil {
+ return 0, err
+ }
+@@ -305,6 +313,10 @@
+ return n.conn.Close()
+ }
+
++func (n *netConn) setWriteDeadline(t time.Time) error {
++ return n.conn.SetWriteDeadline(t)
++}
++
+ // NewLogger creates a log.Logger whose output is written to
+ // the system log service with the specified priority. The logFlag
+ // argument is the flag set passed through to log.New to create
+diff -r -u go/src/log/syslog/syslog_unix.go /home/achang/GOCOPY/go/src/log/syslog/syslog_unix.go
+--- go/src/log/syslog/syslog_unix.go 2013-11-28 13:38:28.000000000 -0800
++++ /home/achang/GOCOPY/go/src/log/syslog/syslog_unix.go 2014-10-03 11:44:39.010403175 -0700
+@@ -19,7 +19,7 @@
+ logPaths := []string{"/dev/log", "/var/run/syslog"}
+ for _, network := range logTypes {
+ for _, path := range logPaths {
+- conn, err := net.Dial(network, path)
++ conn, err := net.DialTimeout(network, path, connectTimeout)
+ if err != nil {
+ continue
+ } else {
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6.inc b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6.inc
new file mode 100644
index 0000000..7a57eaf
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6.inc
@@ -0,0 +1,19 @@
+require go-common.inc
+
+PV = "1.6.2"
+GO_BASEVERSION = "1.6"
+FILESEXTRAPATHS_prepend := "${FILE_DIRNAME}/go-${GO_BASEVERSION}:"
+
+
+SRC_URI += "\
+ file://armhf-elf-header.patch \
+ file://syslog.patch \
+ file://fix-target-cc-for-build.patch \
+ file://fix-cc-handling.patch \
+ file://split-host-and-target-build.patch \
+ file://gotooldir.patch \
+"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=591778525c869cdde0ab5a1bf283cd81"
+SRC_URI[md5sum] = "d1b50fa98d9a71eeee829051411e6207"
+SRC_URI[sha256sum] = "787b0b750d037016a30c6ed05a8a70a91b2e9db4bd9b1a2453aa502a63f1bccc"
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6/armhf-elf-header.patch b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6/armhf-elf-header.patch
new file mode 100644
index 0000000..6113138
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6/armhf-elf-header.patch
@@ -0,0 +1,19 @@
+Index: go/src/cmd/link/internal/ld/elf.go
+===================================================================
+--- go.orig/src/cmd/link/internal/ld/elf.go
++++ go/src/cmd/link/internal/ld/elf.go
+@@ -827,7 +827,13 @@
+ // 32-bit architectures
+ case '5':
+ // we use EABI on both linux/arm and freebsd/arm.
+- if HEADTYPE == obj.Hlinux || HEADTYPE == obj.Hfreebsd {
++ if HEADTYPE == obj.Hlinux {
++ if Ctxt.Goarm == 7 {
++ ehdr.flags = 0x5000402 // has entry point, Version5 EABI, hard float
++ } else {
++ ehdr.flags = 0x5000202 // has entry point, Version5 EABI, soft float
++ }
++ } else if HEADTYPE == obj.Hfreebsd {
+ // We set a value here that makes no indication of which
+ // float ABI the object uses, because this is information
+ // used by the dynamic linker to compare executables and
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6/fix-cc-handling.patch b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6/fix-cc-handling.patch
new file mode 100644
index 0000000..85770a9
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6/fix-cc-handling.patch
@@ -0,0 +1,46 @@
+Index: go/src/cmd/go/build.go
+===================================================================
+--- go.orig/src/cmd/go/build.go 2015-07-29 14:48:40.323185807 -0700
++++ go/src/cmd/go/build.go 2015-07-30 07:37:40.529818586 -0700
+@@ -2805,12 +2805,24 @@
+ return b.ccompilerCmd("CC", defaultCC, objdir)
+ }
+
++// gccCmd returns a gcc command line prefix
++// defaultCC is defined in zdefaultcc.go, written by cmd/dist.
++func (b *builder) gccCmdForReal() []string {
++ return envList("CC", defaultCC)
++}
++
+ // gxxCmd returns a g++ command line prefix
+ // defaultCXX is defined in zdefaultcc.go, written by cmd/dist.
+ func (b *builder) gxxCmd(objdir string) []string {
+ return b.ccompilerCmd("CXX", defaultCXX, objdir)
+ }
+
++// gxxCmd returns a g++ command line prefix
++// defaultCXX is defined in zdefaultcc.go, written by cmd/dist.
++func (b *builder) gxxCmdForReal() []string {
++ return envList("CXX", defaultCXX)
++}
++
+ // ccompilerCmd returns a command line prefix for the given environment
+ // variable and using the default command when the variable is empty.
+ func (b *builder) ccompilerCmd(envvar, defcmd, objdir string) []string {
+Index: go/src/cmd/go/env.go
+===================================================================
+--- go.orig/src/cmd/go/env.go 2015-07-29 14:48:40.323185807 -0700
++++ go/src/cmd/go/env.go 2015-07-30 07:40:54.461655721 -0700
+@@ -52,10 +52,9 @@
+
+ if goos != "plan9" {
+ cmd := b.gccCmd(".")
+- env = append(env, envVar{"CC", cmd[0]})
++ env = append(env, envVar{"CC", strings.Join(b.gccCmdForReal(), " ")})
+ env = append(env, envVar{"GOGCCFLAGS", strings.Join(cmd[3:], " ")})
+- cmd = b.gxxCmd(".")
+- env = append(env, envVar{"CXX", cmd[0]})
++ env = append(env, envVar{"CXX", strings.Join(b.gxxCmdForReal(), " ")})
+ }
+
+ if buildContext.CgoEnabled {
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6/fix-target-cc-for-build.patch b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6/fix-target-cc-for-build.patch
new file mode 100644
index 0000000..adfeb6b
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6/fix-target-cc-for-build.patch
@@ -0,0 +1,13 @@
+Index: go/src/make.bash
+===================================================================
+--- go.orig/src/make.bash 2015-07-29 13:28:11.334031696 -0700
++++ go/src/make.bash 2015-07-29 13:36:55.814465630 -0700
+@@ -158,7 +158,7 @@
+ fi
+
+ echo "##### Building packages and commands for $GOOS/$GOARCH."
+-CC=$CC_FOR_TARGET "$GOTOOLDIR"/go_bootstrap install $GO_FLAGS -gcflags "$GO_GCFLAGS" -ldflags "$GO_LDFLAGS" -v std cmd
++CC="$CC_FOR_TARGET" "$GOTOOLDIR"/go_bootstrap install $GO_FLAGS -gcflags "$GO_GCFLAGS" -ldflags "$GO_LDFLAGS" -v std cmd
+ echo
+
+ rm -f "$GOTOOLDIR"/go_bootstrap
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6/gotooldir.patch b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6/gotooldir.patch
new file mode 100644
index 0000000..473a328
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6/gotooldir.patch
@@ -0,0 +1,26 @@
+Index: go/src/go/build/build.go
+===================================================================
+--- go.orig/src/go/build/build.go
++++ go/src/go/build/build.go
+@@ -1388,7 +1388,7 @@ func init() {
+ }
+
+ // ToolDir is the directory containing build tools.
+-var ToolDir = filepath.Join(runtime.GOROOT(), "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH)
++var ToolDir = envOr("GOTOOLDIR", filepath.Join(runtime.GOROOT(), "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH))
+
+ // IsLocalImport reports whether the import path is
+ // a local import path, like ".", "..", "./foo", or "../foo".
+Index: go/src/cmd/go/build.go
+===================================================================
+--- go.orig/src/cmd/go/build.go
++++ go/src/cmd/go/build.go
+@@ -1312,7 +1312,7 @@ func (b *builder) build(a *action) (err
+ }
+
+ cgoExe := tool("cgo")
+- if a.cgo != nil && a.cgo.target != "" {
++ if a.cgo != nil && a.cgo.target != "" && os.Getenv("GOTOOLDIR") == "" {
+ cgoExe = a.cgo.target
+ }
+ outGo, outObj, err := b.cgo(a.p, cgoExe, obj, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, cxxfiles, a.p.MFiles)
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6/split-host-and-target-build.patch b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6/split-host-and-target-build.patch
new file mode 100644
index 0000000..85fb240
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6/split-host-and-target-build.patch
@@ -0,0 +1,58 @@
+Index: go/src/make.bash
+===================================================================
+--- go.orig/src/make.bash
++++ go/src/make.bash
+@@ -143,12 +143,23 @@ if [ "$1" = "--no-clean" ]; then
+ buildall=""
+ shift
+ fi
+-./cmd/dist/dist bootstrap $buildall $GO_DISTFLAGS -v # builds go_bootstrap
+-# Delay move of dist tool to now, because bootstrap may clear tool directory.
+-mv cmd/dist/dist "$GOTOOLDIR"/dist
+-echo
+
+-if [ "$GOHOSTARCH" != "$GOARCH" -o "$GOHOSTOS" != "$GOOS" ]; then
++do_host_build="yes"
++do_target_build="yes"
++if [ "$1" = "--target-only" ]; then
++ do_host_build="no"
++ shift
++elif [ "$1" = "--host-only" ]; then
++ do_target_build="no"
++ shift
++fi
++
++if [ "$do_host_build" = "yes" ]; then
++ ./cmd/dist/dist bootstrap $buildall $GO_DISTFLAGS -v # builds go_bootstrap
++ # Delay move of dist tool to now, because bootstrap may clear tool directory.
++ mv cmd/dist/dist "$GOTOOLDIR"/dist
++ echo
++
+ echo "##### Building packages and commands for host, $GOHOSTOS/$GOHOSTARCH."
+ # CC_FOR_TARGET is recorded as the default compiler for the go tool. When building for the host, however,
+ # use the host compiler, CC, from `cmd/dist/dist env` instead.
+@@ -157,11 +168,20 @@ if [ "$GOHOSTARCH" != "$GOARCH" -o "$GOH
+ echo
+ fi
+
+-echo "##### Building packages and commands for $GOOS/$GOARCH."
+-CC="$CC_FOR_TARGET" "$GOTOOLDIR"/go_bootstrap install $GO_FLAGS -gcflags "$GO_GCFLAGS" -ldflags "$GO_LDFLAGS" -v std cmd
+-echo
++if [ "$do_target_build" = "yes" ]; then
++ GO_INSTALL="${GO_TARGET_INSTALL:-std cmd}"
++ echo "##### Building packages and commands for $GOOS/$GOARCH."
++ if [ "$GOHOSTOS" = "$GOOS" -a "$GOHOSTARCH" = "$GOARCH" -a "$do_host_build" = "yes" ]; then
++ rm -rf ./host-tools
++ mkdir ./host-tools
++ mv "$GOTOOLDIR"/* ./host-tools
++ GOTOOLDIR="$PWD/host-tools"
++ fi
++ GOTOOLDIR="$GOTOOLDIR" CC="$CC_FOR_TARGET" "$GOTOOLDIR"/go_bootstrap install $GO_FLAGS -gcflags "$GO_GCFLAGS" -ldflags "$GO_LDFLAGS" -v ${GO_INSTALL}
++ echo
+
+-rm -f "$GOTOOLDIR"/go_bootstrap
++ rm -f "$GOTOOLDIR"/go_bootstrap
++fi
+
+ if [ "$1" != "--no-banner" ]; then
+ "$GOTOOLDIR"/dist banner
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6/syslog.patch b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6/syslog.patch
new file mode 100644
index 0000000..ce82a4f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-1.6/syslog.patch
@@ -0,0 +1,57 @@
+diff -r -u go/src/log/syslog/syslog.go /home/achang/GOCOPY/go/src/log/syslog/syslog.go
+--- go/src/log/syslog/syslog.go 2013-11-28 13:38:28.000000000 -0800
++++ /home/achang/GOCOPY/go/src/log/syslog/syslog.go 2014-10-03 11:44:37.710403200 -0700
+@@ -33,6 +33,9 @@
+ const severityMask = 0x07
+ const facilityMask = 0xf8
+
++var writeTimeout = 1 * time.Second
++var connectTimeout = 1 * time.Second
++
+ const (
+ // Severity.
+
+@@ -100,6 +103,7 @@
+ type serverConn interface {
+ writeString(p Priority, hostname, tag, s, nl string) error
+ close() error
++ setWriteDeadline(t time.Time) error
+ }
+
+ type netConn struct {
+@@ -273,7 +277,11 @@
+ nl = "\n"
+ }
+
+- err := w.conn.writeString(p, w.hostname, w.tag, msg, nl)
++ err := w.conn.setWriteDeadline(time.Now().Add(writeTimeout))
++ if err != nil {
++ return 0, err
++ }
++ err = w.conn.writeString(p, w.hostname, w.tag, msg, nl)
+ if err != nil {
+ return 0, err
+ }
+@@ -305,6 +313,10 @@
+ return n.conn.Close()
+ }
+
++func (n *netConn) setWriteDeadline(t time.Time) error {
++ return n.conn.SetWriteDeadline(t)
++}
++
+ // NewLogger creates a log.Logger whose output is written to
+ // the system log service with the specified priority. The logFlag
+ // argument is the flag set passed through to log.New to create
+diff -r -u go/src/log/syslog/syslog_unix.go /home/achang/GOCOPY/go/src/log/syslog/syslog_unix.go
+--- go/src/log/syslog/syslog_unix.go 2013-11-28 13:38:28.000000000 -0800
++++ /home/achang/GOCOPY/go/src/log/syslog/syslog_unix.go 2014-10-03 11:44:39.010403175 -0700
+@@ -19,7 +19,7 @@
+ logPaths := []string{"/dev/log", "/var/run/syslog"}
+ for _, network := range logTypes {
+ for _, path := range logPaths {
+- conn, err := net.Dial(network, path)
++ conn, err := net.DialTimeout(network, path, connectTimeout)
+ if err != nil {
+ continue
+ } else {
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-common.inc b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-common.inc
new file mode 100644
index 0000000..f9587ea
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-common.inc
@@ -0,0 +1,20 @@
+SUMMARY = "Go programming language compiler"
+DESCRIPTION = " The Go programming language is an open source project to make \
+ programmers more productive. Go is expressive, concise, clean, and\
+ efficient. Its concurrency mechanisms make it easy to write programs\
+ that get the most out of multicore and networked machines, while its\
+ novel type system enables flexible and modular program construction.\
+ Go compiles quickly to machine code yet has the convenience of\
+ garbage collection and the power of run-time reflection. It's a\
+ fast, statically typed, compiled language that feels like a\
+ dynamically typed, interpreted language."
+HOMEPAGE = " http://golang.org/"
+LICENSE = "BSD-3-Clause"
+
+inherit go-osarchmap
+
+SRC_URI = "http://golang.org/dl/go${PV}.src.tar.gz"
+S = "${WORKDIR}/go"
+B = "${S}"
+
+INHIBIT_PACKAGE_DEBUG_SPLIT = "1"
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-cross.inc b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-cross.inc
new file mode 100644
index 0000000..613e9c7
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-cross.inc
@@ -0,0 +1,49 @@
+inherit cross
+
+DEPENDS += "go-native"
+
+export GOHOSTOS = "${BUILD_GOOS}"
+export GOHOSTARCH = "${BUILD_GOARCH}"
+export GOOS = "${TARGET_GOOS}"
+export GOARCH = "${TARGET_GOARCH}"
+export GOARM = "${TARGET_GOARM}"
+export GOROOT_BOOTSTRAP = "${STAGING_LIBDIR_NATIVE}/go"
+export GOROOT_FINAL = "${libdir}/go"
+export CGO_ENABLED = "1"
+export CC_FOR_TARGET="${TARGET_PREFIX}gcc ${TARGET_CC_ARCH} --sysroot=${STAGING_DIR_TARGET}"
+export CXX_FOR_TARGET="${TARGET_PREFIX}g++ ${TARGET_CC_ARCH} --sysroot=${STAGING_DIR_TARGET}"
+CC = "${@d.getVar('BUILD_CC', True).strip()}"
+
+do_configure[noexec] = "1"
+
+do_compile() {
+ export GOBIN="${B}/bin"
+ rm -rf ${GOBIN} ${B}/pkg
+ mkdir ${GOBIN}
+
+ export TMPDIR=${WORKDIR}/build-tmp
+ mkdir -p ${WORKDIR}/build-tmp
+
+ cd src
+ ./make.bash --host-only
+}
+
+do_install() {
+ install -d ${D}${libdir}/go
+ cp -a ${B}/pkg ${D}${libdir}/go/
+ install -d ${D}${libdir}/go/src
+ (cd ${S}/src; for d in *; do \
+ [ -d $d ] && cp -a ${S}/src/$d ${D}${libdir}/go/src/; \
+ done)
+ install -d ${D}${bindir}
+ for f in ${B}/bin/*
+ do
+ install -m755 $f ${D}${bindir}
+ done
+}
+
+do_package[noexec] = "1"
+do_packagedata[noexec] = "1"
+do_package_write_ipk[noexec] = "1"
+do_package_write_deb[noexec] = "1"
+do_package_write_rpm[noexec] = "1"
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-cross_1.5.bb b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-cross_1.5.bb
new file mode 100644
index 0000000..80b5a03
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-cross_1.5.bb
@@ -0,0 +1,2 @@
+require go-cross.inc
+require go-${PV}.inc
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-cross_1.6.bb b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-cross_1.6.bb
new file mode 100644
index 0000000..80b5a03
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-cross_1.6.bb
@@ -0,0 +1,2 @@
+require go-cross.inc
+require go-${PV}.inc
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-native.inc b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-native.inc
new file mode 100644
index 0000000..8b4be9e
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-native.inc
@@ -0,0 +1,54 @@
+inherit native
+
+export GOOS = "${BUILD_GOOS}"
+export GOARCH = "${BUILD_GOARCH}"
+export GOROOT_FINAL = "${STAGING_LIBDIR_NATIVE}/go"
+export CGO_ENABLED = "1"
+
+do_configure[noexec] = "1"
+
+do_compile() {
+ export GOBIN="${B}/bin"
+ rm -rf ${GOBIN}
+ mkdir ${GOBIN}
+
+ export TMPDIR=${WORKDIR}/build-tmp
+ mkdir -p ${WORKDIR}/build-tmp
+
+ cd src
+ ./make.bash --host-only
+}
+
+
+make_wrapper() {
+ rm -f ${D}${bindir}/$2
+ cat <<END >${D}${bindir}/$2
+#!/bin/bash
+here=\`dirname \$0\`
+export GOROOT="${GOROOT:-\`readlink -f \$here/../lib/go\`}"
+\$here/../lib/go/bin/$1 "\$@"
+END
+ chmod +x ${D}${bindir}/$2
+}
+
+do_install() {
+ install -d ${D}${libdir}/go
+ cp -a ${B}/pkg ${D}${libdir}/go/
+ install -d ${D}${libdir}/go/src
+ (cd ${S}/src; for d in *; do \
+ [ -d $d ] && cp -a ${S}/src/$d ${D}${libdir}/go/src/; \
+ done)
+ install -d ${D}${bindir} ${D}${libdir}/go/bin
+ for f in ${B}/bin/*
+ do
+ base=`basename $f`
+ install -m755 $f ${D}${libdir}/go/bin
+ make_wrapper $base $base
+ done
+}
+
+do_package[noexec] = "1"
+do_packagedata[noexec] = "1"
+do_package_write_ipk[noexec] = "1"
+do_package_write_deb[noexec] = "1"
+do_package_write_rpm[noexec] = "1"
diff --git a/import-layers/meta-virtualization/recipes-devtools/go-cross/go-native_1.4.bb b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-native_1.4.bb
new file mode 100644
index 0000000..cf186e7
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go-cross/go-native_1.4.bb
@@ -0,0 +1,2 @@
+require go-native.inc
+require go-${PV}.inc
diff --git a/import-layers/meta-virtualization/recipes-devtools/go/go-capability_git.bb b/import-layers/meta-virtualization/recipes-devtools/go/go-capability_git.bb
new file mode 100644
index 0000000..4f8f431
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go/go-capability_git.bb
@@ -0,0 +1,28 @@
+DESCRIPTION = "Utilities for manipulating POSIX capabilities in Go."
+HOMEPAGE = "https://github.com/syndtr/gocapability"
+SECTION = "devel/go"
+LICENSE = "BSD-2-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=a7304f5073e7be4ba7bffabbf9f2bbca"
+
+SRCNAME = "gocapability"
+
+PKG_NAME = "github.com/syndtr/${SRCNAME}"
+SRC_URI = "git://${PKG_NAME}.git"
+
+SRCREV = "8e4cdcb3c22b40d5e330ade0b68cb2e2a3cf6f98"
+
+S = "${WORKDIR}/git"
+
+do_install() {
+ install -d ${D}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${S}/* ${D}${prefix}/local/go/src/${PKG_NAME}/
+}
+
+SYSROOT_PREPROCESS_FUNCS += "go_capability_sysroot_preprocess"
+
+go_capability_sysroot_preprocess () {
+ install -d ${SYSROOT_DESTDIR}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${D}${prefix}/local/go/src/${PKG_NAME} ${SYSROOT_DESTDIR}${prefix}/local/go/src/$(dirname ${PKG_NAME})
+}
+
+FILES_${PN} += "${prefix}/local/go/src/${PKG_NAME}/*"
diff --git a/import-layers/meta-virtualization/recipes-devtools/go/go-cli_git.bb b/import-layers/meta-virtualization/recipes-devtools/go/go-cli_git.bb
new file mode 100644
index 0000000..21d01ac
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go/go-cli_git.bb
@@ -0,0 +1,29 @@
+DESCRIPTION = "A small package for building command line apps in Go"
+HOMEPAGE = "https://github.com/codegangsta/cli"
+SECTION = "devel/go"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=ed9b539ed65d73926f30ff1f1587dc44"
+
+SRCNAME = "cli"
+
+PKG_NAME = "github.com/codegangsta/${SRCNAME}"
+SRC_URI = "git://${PKG_NAME}.git"
+
+SRCREV = "27ecc97192df1bf053a22b04463f2b51b8b8373e"
+PV = "1.1.0+git${SRCREV}"
+
+S = "${WORKDIR}/git"
+
+do_install() {
+ install -d ${D}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${S}/* ${D}${prefix}/local/go/src/${PKG_NAME}/
+}
+
+SYSROOT_PREPROCESS_FUNCS += "go_cli_sysroot_preprocess"
+
+go_cli_sysroot_preprocess () {
+ install -d ${SYSROOT_DESTDIR}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${D}${prefix}/local/go/src/${PKG_NAME} ${SYSROOT_DESTDIR}${prefix}/local/go/src/$(dirname ${PKG_NAME})
+}
+
+FILES_${PN} += "${prefix}/local/go/src/${PKG_NAME}/*"
diff --git a/import-layers/meta-virtualization/recipes-devtools/go/go-context_git.bb b/import-layers/meta-virtualization/recipes-devtools/go/go-context_git.bb
new file mode 100644
index 0000000..15f6a8d
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go/go-context_git.bb
@@ -0,0 +1,28 @@
+DESCRIPTION = "A golang registry for global request variables."
+HOMEPAGE = "https://github.com/gorilla/context"
+SECTION = "devel/go"
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=c50f6bd9c1e15ed0bad3bea18e3c1b7f"
+
+SRCNAME = "context"
+
+PKG_NAME = "github.com/gorilla/${SRCNAME}"
+SRC_URI = "git://${PKG_NAME}.git"
+
+SRCREV = "14f550f51af52180c2eefed15e5fd18d63c0a64a"
+
+S = "${WORKDIR}/git"
+
+do_install() {
+ install -d ${D}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${S}/* ${D}${prefix}/local/go/src/${PKG_NAME}/
+}
+
+SYSROOT_PREPROCESS_FUNCS += "go_context_sysroot_preprocess"
+
+go_context_sysroot_preprocess () {
+ install -d ${SYSROOT_DESTDIR}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${D}${prefix}/local/go/src/${PKG_NAME} ${SYSROOT_DESTDIR}${prefix}/local/go/src/$(dirname ${PKG_NAME})
+}
+
+FILES_${PN} += "${prefix}/local/go/src/${PKG_NAME}/*"
diff --git a/import-layers/meta-virtualization/recipes-devtools/go/go-dbus_git.bb b/import-layers/meta-virtualization/recipes-devtools/go/go-dbus_git.bb
new file mode 100644
index 0000000..092bd50
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go/go-dbus_git.bb
@@ -0,0 +1,29 @@
+DESCRIPTION = "Native Go bindings for D-Bus"
+HOMEPAGE = "https://github.com/godbus/dbus"
+SECTION = "devel/go"
+LICENSE = "BSD-2-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=b5ac622301483800715d770434e27e5b"
+
+SRCNAME = "dbus"
+
+PKG_NAME = "github.com/godbus/${SRCNAME}"
+SRC_URI = "git://${PKG_NAME}.git"
+
+SRCREV = "88765d85c0fdadcd98a54e30694fa4e4f5b51133"
+PV = "2+git${SRCREV}"
+
+S = "${WORKDIR}/git"
+
+do_install() {
+ install -d ${D}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${S}/* ${D}${prefix}/local/go/src/${PKG_NAME}/
+}
+
+SYSROOT_PREPROCESS_FUNCS += "go_dbus_sysroot_preprocess"
+
+go_dbus_sysroot_preprocess () {
+ install -d ${SYSROOT_DESTDIR}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${D}${prefix}/local/go/src/${PKG_NAME} ${SYSROOT_DESTDIR}${prefix}/local/go/src/$(dirname ${PKG_NAME})
+}
+
+FILES_${PN} += "${prefix}/local/go/src/${PKG_NAME}/*"
diff --git a/import-layers/meta-virtualization/recipes-devtools/go/go-distribution-digest_git.bb b/import-layers/meta-virtualization/recipes-devtools/go/go-distribution-digest_git.bb
new file mode 100644
index 0000000..31d724c
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go/go-distribution-digest_git.bb
@@ -0,0 +1,34 @@
+DESCRIPTION = "The Docker toolset to pack, ship, store, and deliver content"
+HOMEPAGE = "https://github.com/docker/distribution"
+SECTION = "devel/go"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=d2794c0df5b907fdace235a619d80314"
+
+SRCNAME = "distribution"
+
+PKG_NAME = "github.com/docker/${SRCNAME}"
+SRC_URI = "git://${PKG_NAME}.git"
+
+SRCREV = "d957768537c5af40e4f4cd96871f7b2bde9e2923"
+
+S = "${WORKDIR}/git"
+
+do_unpackpost() {
+ rm -rf ${S}/[A-KM-Za-ce-z]* ${S}/doc*
+}
+
+addtask unpackpost after do_unpack before do_patch
+
+do_install() {
+ install -d ${D}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${S}/* ${D}${prefix}/local/go/src/${PKG_NAME}/
+}
+
+SYSROOT_PREPROCESS_FUNCS += "go_distribution_digeset_sysroot_preprocess"
+
+go_distribution_digeset_sysroot_preprocess () {
+ install -d ${SYSROOT_DESTDIR}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${D}${prefix}/local/go/src/${PKG_NAME} ${SYSROOT_DESTDIR}${prefix}/local/go/src/$(dirname ${PKG_NAME})
+}
+
+FILES_${PN} += "${prefix}/local/go/src/${PKG_NAME}/*"
diff --git a/import-layers/meta-virtualization/recipes-devtools/go/go-fsnotify_git.bb b/import-layers/meta-virtualization/recipes-devtools/go/go-fsnotify_git.bb
new file mode 100644
index 0000000..e18f574
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go/go-fsnotify_git.bb
@@ -0,0 +1,29 @@
+DESCRIPTION = "A golang registry for global request variables."
+HOMEPAGE = "https://github.com/go-fsnotify/fsnotify"
+SECTION = "devel/go"
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=c38914c9a7ab03bb2b96d4baaee10769"
+
+SRCNAME = "fsnotify"
+
+PKG_NAME = "github.com/go-fsnotify/${SRCNAME}"
+SRC_URI = "git://${PKG_NAME}.git"
+
+SRCREV = "ca50e738d35a862c379baf8fffbc3bfd080b3cff"
+PV = "1.0.4+git${SRCREV}"
+
+S = "${WORKDIR}/git"
+
+do_install() {
+ install -d ${D}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${S}/* ${D}${prefix}/local/go/src/${PKG_NAME}/
+}
+
+SYSROOT_PREPROCESS_FUNCS += "go_fsnotify_sysroot_preprocess"
+
+go_fsnotify_sysroot_preprocess () {
+ install -d ${SYSROOT_DESTDIR}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${D}${prefix}/local/go/src/${PKG_NAME} ${SYSROOT_DESTDIR}${prefix}/local/go/src/$(dirname ${PKG_NAME})
+}
+
+FILES_${PN} += "${prefix}/local/go/src/${PKG_NAME}/*"
diff --git a/import-layers/meta-virtualization/recipes-devtools/go/go-libtrust_git.bb b/import-layers/meta-virtualization/recipes-devtools/go/go-libtrust_git.bb
new file mode 100644
index 0000000..f2acfb4
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go/go-libtrust_git.bb
@@ -0,0 +1,28 @@
+DESCRIPTION = "Primitives for identity and authorization"
+HOMEPAGE = "https://github.com/docker/libtrust"
+SECTION = "devel/go"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=435b266b3899aa8a959f17d41c56def8"
+
+SRCNAME = "libtrust"
+
+PKG_NAME = "github.com/docker/${SRCNAME}"
+SRC_URI = "git://${PKG_NAME}.git"
+
+SRCREV = "230dfd18c2326f1e9d08238710e67a1040187d07"
+
+S = "${WORKDIR}/git"
+
+do_install() {
+ install -d ${D}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${S}/* ${D}${prefix}/local/go/src/${PKG_NAME}/
+}
+
+SYSROOT_PREPROCESS_FUNCS += "go_libtrust_sysroot_preprocess"
+
+go_libtrust_sysroot_preprocess () {
+ install -d ${SYSROOT_DESTDIR}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${D}${prefix}/local/go/src/${PKG_NAME} ${SYSROOT_DESTDIR}${prefix}/local/go/src/$(dirname ${PKG_NAME})
+}
+
+FILES_${PN} += "${prefix}/local/go/src/${PKG_NAME}/*"
diff --git a/import-layers/meta-virtualization/recipes-devtools/go/go-logrus_git.bb b/import-layers/meta-virtualization/recipes-devtools/go/go-logrus_git.bb
new file mode 100644
index 0000000..082c1a6
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go/go-logrus_git.bb
@@ -0,0 +1,29 @@
+DESCRIPTION = "A golang registry for global request variables."
+HOMEPAGE = "https://github.com/Sirupsen/logrus"
+SECTION = "devel/go"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=8dadfef729c08ec4e631c4f6fc5d43a0"
+
+SRCNAME = "logrus"
+
+PKG_NAME = "github.com/Sirupsen/${SRCNAME}"
+SRC_URI = "git://${PKG_NAME}.git"
+
+SRCREV = "3fc34d061b9c78a70db853c7cb6b0576b6d4f32d"
+PV = "0.7.1+git${SRCREV}"
+
+S = "${WORKDIR}/git"
+
+do_install() {
+ install -d ${D}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${S}/* ${D}${prefix}/local/go/src/${PKG_NAME}/
+}
+
+SYSROOT_PREPROCESS_FUNCS += "go_logrus_sysroot_preprocess"
+
+go_logrus_sysroot_preprocess () {
+ install -d ${SYSROOT_DESTDIR}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${D}${prefix}/local/go/src/${PKG_NAME} ${SYSROOT_DESTDIR}${prefix}/local/go/src/$(dirname ${PKG_NAME})
+}
+
+FILES_${PN} += "${prefix}/local/go/src/${PKG_NAME}/*"
diff --git a/import-layers/meta-virtualization/recipes-devtools/go/go-mux_git.bb b/import-layers/meta-virtualization/recipes-devtools/go/go-mux_git.bb
new file mode 100644
index 0000000..7a2025a
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go/go-mux_git.bb
@@ -0,0 +1,28 @@
+DESCRIPTION = "A powerful URL router and dispatcher for golang."
+HOMEPAGE = "https://github.com/gorilla/mux"
+SECTION = "devel/go"
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=c50f6bd9c1e15ed0bad3bea18e3c1b7f"
+
+SRCNAME = "mux"
+
+PKG_NAME = "github.com/gorilla/${SRCNAME}"
+SRC_URI = "git://${PKG_NAME}.git"
+
+SRCREV = "136d54f81f00414c45c3c68dd47e98cc97519c5e"
+
+S = "${WORKDIR}/git"
+
+do_install() {
+ install -d ${D}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${S}/* ${D}${prefix}/local/go/src/${PKG_NAME}/
+}
+
+SYSROOT_PREPROCESS_FUNCS += "go_mux_sysroot_preprocess"
+
+go_mux_sysroot_preprocess () {
+ install -d ${SYSROOT_DESTDIR}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${D}${prefix}/local/go/src/${PKG_NAME} ${SYSROOT_DESTDIR}${prefix}/local/go/src/$(dirname ${PKG_NAME})
+}
+
+FILES_${PN} += "${prefix}/local/go/src/${PKG_NAME}/*"
diff --git a/import-layers/meta-virtualization/recipes-devtools/go/go-patricia_git.bb b/import-layers/meta-virtualization/recipes-devtools/go/go-patricia_git.bb
new file mode 100644
index 0000000..18c188d
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go/go-patricia_git.bb
@@ -0,0 +1,29 @@
+DESCRIPTION = "A generic patricia trie (also called radix tree) implemented in Go (Golang)"
+HOMEPAGE = "https://github.com/gorilla/context"
+SECTION = "devel/go"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=9949b99212edd6b1e24ce702376c3baf"
+
+SRCNAME = "go-patricia"
+
+PKG_NAME = "github.com/tchap/${SRCNAME}"
+SRC_URI = "git://${PKG_NAME}.git"
+
+SRCREV = "f64d0a63cd3363481c898faa9339de04d12213f9"
+PV = "1.0.1+git${SRCPV}"
+
+S = "${WORKDIR}/git"
+
+do_install() {
+ install -d ${D}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${S}/* ${D}${prefix}/local/go/src/${PKG_NAME}/
+}
+
+SYSROOT_PREPROCESS_FUNCS += "go_patricia_sysroot_preprocess"
+
+go_patricia_sysroot_preprocess () {
+ install -d ${SYSROOT_DESTDIR}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${D}${prefix}/local/go/src/${PKG_NAME} ${SYSROOT_DESTDIR}${prefix}/local/go/src/$(dirname ${PKG_NAME})
+}
+
+FILES_${PN} += "${prefix}/local/go/src/${PKG_NAME}/*"
diff --git a/import-layers/meta-virtualization/recipes-devtools/go/go-pty_git.bb b/import-layers/meta-virtualization/recipes-devtools/go/go-pty_git.bb
new file mode 100644
index 0000000..bc94b31
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go/go-pty_git.bb
@@ -0,0 +1,28 @@
+DESCRIPTION = "PTY interface for Go"
+HOMEPAGE = "https://github.com/kr/pty"
+SECTION = "devel/go"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://License;md5=93958070863d769117fa33b129020050"
+
+SRCNAME = "pty"
+
+PKG_NAME = "github.com/kr/${SRCNAME}"
+SRC_URI = "git://${PKG_NAME}.git"
+
+SRCREV = "05017fcccf23c823bfdea560dcc958a136e54fb7"
+
+S = "${WORKDIR}/git"
+
+do_install() {
+ install -d ${D}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${S}/* ${D}${prefix}/local/go/src/${PKG_NAME}/
+}
+
+SYSROOT_PREPROCESS_FUNCS += "go_pty_sysroot_preprocess"
+
+go_pty_sysroot_preprocess () {
+ install -d ${SYSROOT_DESTDIR}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${D}${prefix}/local/go/src/${PKG_NAME} ${SYSROOT_DESTDIR}${prefix}/local/go/src/$(dirname ${PKG_NAME})
+}
+
+FILES_${PN} += "${prefix}/local/go/src/${PKG_NAME}/*"
diff --git a/import-layers/meta-virtualization/recipes-devtools/go/go-systemd_git.bb b/import-layers/meta-virtualization/recipes-devtools/go/go-systemd_git.bb
new file mode 100644
index 0000000..358e6bb
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/go/go-systemd_git.bb
@@ -0,0 +1,29 @@
+DESCRIPTION = "Go bindings to systemd socket activation, journal, D-Bus, and unit files"
+HOMEPAGE = "https://github.com/coreos/go-systemd"
+SECTION = "devel/go"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=19cbd64715b51267a47bf3750cc6a8a5"
+
+SRCNAME = "systemd"
+
+PKG_NAME = "github.com/coreos/go-${SRCNAME}"
+SRC_URI = "git://${PKG_NAME}.git"
+
+SRCREV = "f743bc15d6bddd23662280b4ad20f7c874cdd5ad"
+PV = "2+git${SRCREV}"
+
+S = "${WORKDIR}/git"
+
+do_install() {
+ install -d ${D}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${S}/* ${D}${prefix}/local/go/src/${PKG_NAME}/
+}
+
+SYSROOT_PREPROCESS_FUNCS += "go_systemd_sysroot_preprocess"
+
+go_systemd_sysroot_preprocess () {
+ install -d ${SYSROOT_DESTDIR}${prefix}/local/go/src/${PKG_NAME}
+ cp -r ${D}${prefix}/local/go/src/${PKG_NAME} ${SYSROOT_DESTDIR}${prefix}/local/go/src/$(dirname ${PKG_NAME})
+}
+
+FILES_${PN} += "${prefix}/local/go/src/${PKG_NAME}/*"
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/gunicorn_19.1.1.bb b/import-layers/meta-virtualization/recipes-devtools/python/gunicorn_19.1.1.bb
new file mode 100644
index 0000000..bbe03ea
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/gunicorn_19.1.1.bb
@@ -0,0 +1,15 @@
+SUMMARY = "WSGI HTTP Server for UNIX"
+DESCRIPTION = "\
+ Gunicorn ‘Green Unicorn’ is a Python WSGI HTTP Server for UNIX. It’s \
+ a pre-fork worker model ported from Ruby’s Unicorn project. The \
+ Gunicorn server is broadly compatible with various web frameworks, \
+ simply implemented, light on server resource usage, and fairly speedy. \
+ "
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=19a2e253a273e390cd1b91d19b6ee236"
+
+SRC_URI = "https://pypi.python.org/packages/source/g/gunicorn/${PN}-${PV}.tar.gz"
+SRC_URI[md5sum] = "eaa72bff5341c05169b76ce3dcbb8140"
+SRC_URI[sha256sum] = "82715511fb6246fad4ba66d812eb93416ae8371b464fa88bf3867c9c177daa14"
+
+inherit setuptools
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-backports-lzma/fix_paths.patch b/import-layers/meta-virtualization/recipes-devtools/python/python-backports-lzma/fix_paths.patch
new file mode 100644
index 0000000..c2b374f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-backports-lzma/fix_paths.patch
@@ -0,0 +1,17 @@
+---
+ setup.py | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/setup.py
++++ b/setup.py
+@@ -32,8 +32,8 @@
+ extens = [Extension('backports/lzma/_lzma',
+ ['backports/lzma/_lzmamodule.c'],
+ libraries = ['lzma'],
+- include_dirs = [os.path.join(home, 'include'), '/opt/local/include', '/usr/local/include'],
+- library_dirs = [os.path.join(home, 'lib'), '/opt/local/lib', '/usr/local/lib']
++ include_dirs = [],
++ library_dirs = []
+ )]
+
+ descr = "Backport of Python 3.3's 'lzma' module for XZ/LZMA compressed files."
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-backports-lzma_0.0.3.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-backports-lzma_0.0.3.bb
new file mode 100644
index 0000000..a3586c0
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-backports-lzma_0.0.3.bb
@@ -0,0 +1,32 @@
+HOMEPAGE = "https://github.com/peterjc/backports.lzma"
+SUMMARY = "\
+ Backport of Python 3.3's 'lzma' module for XZ/LZMA compressed files."
+DESCRIPTION = "\
+ This is a backport of the 'lzma' module included in Python 3.3 or later \
+ by Nadeem Vawda and Per Oyvind Karlsen, which provides a Python wrapper \
+ for XZ Utils (aka LZMA Utils v2) by Igor Pavlov. \
+ . \
+ In order to compile this, you will need to install XZ Utils from \
+ http://tukaani.org/xz/ \
+ "
+SECTION = "devel/python"
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://PKG-INFO;md5=db4345b3b9524aabc8fe8c65f235c6b2"
+
+SRC_URI[md5sum] = "c3d109746aefa86268e500c07d7e8e0f"
+SRC_URI[sha256sum] = "bac58aec8d39ac3d22250840fb24830d0e4a0ef05ad8f3f09172dc0cc80cdbca"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
+
+DEPENDS += "xz"
+
+SRCNAME = "backports.lzma"
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
+
+SRC_URI = "\
+ https://pypi.python.org/packages/source/b/backports.lzma/${SRCNAME}-${PV}.tar.gz \
+ file://fix_paths.patch \
+ "
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-blinker_1.3.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-blinker_1.3.bb
new file mode 100644
index 0000000..24e19b5
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-blinker_1.3.bb
@@ -0,0 +1,22 @@
+HOMEPAGE = "https://pypi.python.org/pypi/blinker"
+SUMMARY = "Fast, simple object-to-object and broadcast signaling"
+DESCRIPTION = " \
+ Blinker provides a fast dispatching system that allows any number of \
+ interested parties to subscribe to events, or “signals”. \
+ . \
+ Signal receivers can subscribe to specific senders or receive signals \
+ sent by any sender. \
+ "
+SECTION = "devel/python"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=8baf1d53a00de619f60052e4752a89af"
+
+SRCNAME = "blinker"
+SRC_URI = "https://pypi.python.org/packages/source/b/blinker/${SRCNAME}-${PV}.tar.gz"
+SRC_URI[md5sum] = "66e9688f2d287593a0e698cd8a5fbc57"
+SRC_URI[sha256sum] = "6811010809262261e41ab7b92f3f6d23f35cf816fbec2bc05077992eebec6e2f"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
+
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-boto_2.34.0.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-boto_2.34.0.bb
new file mode 100644
index 0000000..f94f324
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-boto_2.34.0.bb
@@ -0,0 +1,25 @@
+HOMEPAGE = "https://github.com/boto/boto"
+SUMMARY = "Amazon Web Services API"
+DESCRIPTION = "\
+ Boto is a Python package that provides interfaces to Amazon Web Services. \
+ Currently, all features work with Python 2.6 and 2.7. Work is under way to \
+ support Python 3.3+ in the same codebase. Modules are being ported one at \
+ a time with the help of the open source community, so please check below \
+ for compatibility with Python 3.3+. \
+ "
+SECTION = "devel/python"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://setup.py;md5=182ef81236d3fac2c6ed8e8d3c988ec8"
+
+PR = "r0"
+SRCNAME = "boto"
+
+SRC_URI = "https://pypi.python.org/packages/source/b/boto/${SRCNAME}-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "5556223d2d0cc4d06dd4829e671dcecd"
+SRC_URI[sha256sum] = "33baab022ecb803414ad0d6cf4041d010cfc2755ff8acc3bea7b32e77ba98be0"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
+
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-bugsnag_2.0.2.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-bugsnag_2.0.2.bb
new file mode 100644
index 0000000..edf880f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-bugsnag_2.0.2.bb
@@ -0,0 +1,26 @@
+HOMEPAGE = "https://bugsnag.com/"
+SUMMARY = "Automatic error monitoring for django, flask, etc."
+DESCRIPTION = "\
+ The official Python notifier for `Bugsnag <https://bugsnag.com/>`_. \
+ Provides support for automatically capturing and sending exceptions \
+ in your Django and other Python apps to Bugsnag, to help you find \
+ and solve your bugs as fast as possible. \
+ "
+SECTION = "devel/python"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://PKG-INFO;md5=f6df6ab9f1b615a140ebb2a48f61bddc"
+
+PR = "r0"
+SRCNAME = "bugsnag"
+
+SRC_URI = "https://pypi.python.org/packages/source/b/bugsnag/${SRCNAME}-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "e16360d86979890892cd388635f213e7"
+SRC_URI[sha256sum] = "093934b3cd1d36ba2b89cfe1673b14ba59043417fe500a02dbf6de0df43ea962"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
+
+DEPENDS += "python-webob python-flask python-blinker"
+
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-docker-registry-core_2.0.3.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-docker-registry-core_2.0.3.bb
new file mode 100644
index 0000000..7aa6825
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-docker-registry-core_2.0.3.bb
@@ -0,0 +1,29 @@
+HOMEPAGE = "https://pypi.python.org/pypi/docker-registry-core"
+SUMMARY = "Docker registry core package"
+DESCRIPTION = "core package for docker-registry (drivers) developers"
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=3b83ef96387f14655fc854ddc3c6bd57"
+
+SRCNAME = "docker-registry-core"
+
+SRC_URI = "https://pypi.python.org/packages/source/d/docker-registry-core/${SRCNAME}-${PV}.tar.gz"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+SRC_URI[md5sum] = "610ef9395f2e9a2f91c68d13325fce7b"
+SRC_URI[sha256sum] = "347e804f1f35b28dbe27bf8d7a0b630fca29d684032139bf26e3940572360360"
+
+inherit setuptools
+
+DEPENDS += "\
+ python-distribute \
+ python-boto (= 2.34.0) \
+ python-redis (= 2.10.3) \
+ python-simplejson (= 3.6.2) \
+ "
+
+# boto 2.34.0
+# redis 2.10.3
+# simplejson 3.6.2
+# setuptools 5.8
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-flask-cors_1.10.3.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-flask-cors_1.10.3.bb
new file mode 100644
index 0000000..c39e9b2
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-flask-cors_1.10.3.bb
@@ -0,0 +1,22 @@
+HOMEPAGE = "https://pypi.python.org/pypi/Flask-Cors/1.10.3"
+SUMMARY = "A Flask extension adding a decorator for CORS support"
+DESCRIPTION = "\
+ A Flask extension for handling Cross Origin Resource Sharing (CORS), making cross-origin AJAX possible \
+ "
+SECTION = "devel/python"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=4784781a5ee9fed9c50272e733e07685"
+
+DEPENDS += "python-six python-flask"
+
+PR = "r0"
+SRCNAME = "Flask-Cors"
+
+SRC_URI = "https://pypi.python.org/packages/source/F/Flask-Cors/${SRCNAME}-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "4f3c75ace0f724d1de167bd73745c965"
+SRC_URI[sha256sum] = "9e6927aa0a46f314bca0ec63eb871cee898a162adfdd5b65224db7a008287423"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-flask_0.10.1.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-flask_0.10.1.bb
new file mode 100644
index 0000000..263e53d
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-flask_0.10.1.bb
@@ -0,0 +1,20 @@
+DESCRIPTION = "A microframework based on Werkzeug, Jinja2 and good intentions"
+HOMEPAGE = "https://pypi.python.org/pypi/Flask/"
+SECTION = "devel/python"
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=79aa8b7bc4f781210d6b5c06d6424cb0"
+
+PR = "r0"
+SRCNAME = "Flask"
+
+SRC_URI = "https://pypi.python.org/packages/source/F/${SRCNAME}/${SRCNAME}-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "378670fe456957eb3c27ddaef60b2b24"
+SRC_URI[sha256sum] = "4c83829ff83d408b5e1d4995472265411d2c414112298f2eb4b359d9e4563373"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
+
+CLEANBROKEN = "1"
+
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-gevent/gevent-allow-ssl-v2-or-v3-certificates.patch b/import-layers/meta-virtualization/recipes-devtools/python/python-gevent/gevent-allow-ssl-v2-or-v3-certificates.patch
new file mode 100644
index 0000000..623d04f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-gevent/gevent-allow-ssl-v2-or-v3-certificates.patch
@@ -0,0 +1,29 @@
+From c2dc97478fcc3757e09d5d2997391960a8351d53 Mon Sep 17 00:00:00 2001
+From: Bruce Ashfield <bruce.ashfield@windriver.com>
+Date: Sun, 29 Mar 2015 22:34:28 -0400
+Subject: [PATCH] gevent: allow ssl v2 or v3 certificates
+
+Work around an issue with python 2.7 not always having SSLv3 available
+by allowing v2 or v3 certificates.
+
+Signed-off-by: Bruce Ashfield <bruce.ashfield@windriver.com>
+---
+ gevent/ssl.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gevent/ssl.py b/gevent/ssl.py
+index ce6434718d1b..93c0d642da5f 100644
+--- a/gevent/ssl.py
++++ b/gevent/ssl.py
+@@ -383,7 +383,7 @@ def wrap_socket(sock, keyfile=None, certfile=None,
+ ciphers=ciphers)
+
+
+-def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None):
++def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv23, ca_certs=None):
+ """Retrieve the certificate from the server at the specified address,
+ and return it as a PEM-encoded string.
+ If 'ca_certs' is specified, validate the server cert against it.
+--
+1.9.1
+
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-gevent/libev-conf.patch b/import-layers/meta-virtualization/recipes-devtools/python/python-gevent/libev-conf.patch
new file mode 100644
index 0000000..283705f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-gevent/libev-conf.patch
@@ -0,0 +1,19 @@
+Due to differences in library locations, the cross compile test can fail because it can't run
+the conftest binary (dynamically linked). Building it statically instead.
+
+Signed-off-by: Amy Fong <amy.fong@windriver.com>
+---
+ libev/configure | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/libev/configure
++++ b/libev/configure
+@@ -2730,7 +2730,7 @@
+ ac_ext=c
+ ac_cpp='$CPP $CPPFLAGS'
+ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_link='$CC -static -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ ac_compiler_gnu=$ac_cv_c_compiler_gnu
+ if test -n "$ac_tool_prefix"; then
+ # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-gevent_1.0.1.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-gevent_1.0.1.bb
new file mode 100644
index 0000000..47f1267
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-gevent_1.0.1.bb
@@ -0,0 +1,33 @@
+HOMEPAGE = "http://www.gevent.org"
+SUMMARY = "A coroutine-based Python networking library"
+DESCRIPTION = "\
+ gevent is a coroutine-based Python networking library that uses greenlet \
+ to provide a high-level synchronous API on top of the libevent event \
+ loop. \
+ "
+SECTION = "devel/python"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=2dbb33d00e1fd31c7041460a81ac0bd2"
+DEPENDS += "python-greenlet libevent"
+RDEPENDS_${PN} += "python-greenlet python-mime python-pprint python-re"
+
+SRCNAME = "gevent"
+
+SRC_URI = "http://pypi.python.org/packages/source/g/gevent/${SRCNAME}-${PV}.tar.gz"
+SRC_URI[md5sum] = "7b952591d1a0174d6eb6ac47bd975ab6"
+SRC_URI[sha256sum] = "4627e215d058f71d95e6b26d9e7be4c263788a4756bd2858a93775f6c072df43"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
+
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
+SRC_URI += "file://libev-conf.patch"
+SRC_URI += "file://gevent-allow-ssl-v2-or-v3-certificates.patch"
+
+# The python-gevent has no autoreconf ability
+# and the logic for detecting a cross compile is flawed
+# so always force a cross compile
+do_configure_append() {
+ sed -i -e 's/^cross_compiling=no/cross_compiling=yes/' ${S}/libev/configure
+}
\ No newline at end of file
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-itsdangerous_0.24.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-itsdangerous_0.24.bb
new file mode 100644
index 0000000..3e229f8
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-itsdangerous_0.24.bb
@@ -0,0 +1,22 @@
+DESCRIPTION = "Various helpers to pass trusted data to untrusted environments and back"
+HOMEPAGE = "https://pypi.python.org/pypi/itsdangerous/"
+SECTION = "devel/python"
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=b61841e2bf5f07884148e2a6f1bcab0c"
+
+PR = "r0"
+SRCNAME = "itsdangerous"
+
+SRC_URI = "https://pypi.python.org/packages/source/i/${SRCNAME}/${SRCNAME}-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "a3d55aa79369aef5345c036a8a26307f"
+SRC_URI[sha256sum] = "cbb3fcf8d3e33df861709ecaf89d9e6629cff0a217bc2848f1b41cd30d360519"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
+
+RDEPENDS_${PN} += "python-json python-netclient python-zlib python-datetime python-lang python-crypt"
+
+CLEANBROKEN = "1"
+
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-m2crypto/m2crypto-Fix-build-with-SWIG-3.0.5.patch b/import-layers/meta-virtualization/recipes-devtools/python/python-m2crypto/m2crypto-Fix-build-with-SWIG-3.0.5.patch
new file mode 100644
index 0000000..c408595
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-m2crypto/m2crypto-Fix-build-with-SWIG-3.0.5.patch
@@ -0,0 +1,158 @@
+From 8430e7202407fb1a0a104b0decdcc9da9e41a52b Mon Sep 17 00:00:00 2001
+From: Jackie Huang <jackie.huang@windriver.com>
+Date: Mon, 4 Apr 2016 23:28:15 -0400
+Subject: [PATCH] Fix build with SWIG 3.0.5
+
+See analysis and previous patches in
+https://github.com/martinpaljak/M2Crypto/issues/60 and
+https://github.com/swig/swig/issues/344, in particular this adds the
+build machinery to patch
+https://github.com/martinpaljak/M2Crypto/issues/60#issuecomment-75735489
+
+Fixes #47
+
+Author: Miloslav Trmac <mitr@redhat.com>
+
+Upstream-Status: Backport
+
+Signed-off-by: Jackie Huang <jackie.huang@windriver.com>
+---
+ M2Crypto/__init__.py | 4 ++--
+ M2Crypto/m2.py | 2 +-
+ SWIG/_lib.i | 4 ++++
+ SWIG/_pkcs7.i | 1 +
+ setup.py | 24 ++++++++++++++++++++++++
+ 5 files changed, 32 insertions(+), 3 deletions(-)
+
+diff --git a/M2Crypto/__init__.py b/M2Crypto/__init__.py
+index 647e057..280af94 100644
+--- a/M2Crypto/__init__.py
++++ b/M2Crypto/__init__.py
+@@ -19,7 +19,7 @@ Copyright 2008-2011 Heikki Toivonen. All rights reserved.
+ version_info = (0, 22)
+ version = '.'.join([str(_v) for _v in version_info])
+
+-import __m2crypto
++import _m2crypto
+ import m2
+ import ASN1
+ import AuthCookie
+@@ -47,4 +47,4 @@ import m2xmlrpclib
+ import threading
+ import util
+
+-__m2crypto.lib_init()
++_m2crypto.lib_init()
+diff --git a/M2Crypto/m2.py b/M2Crypto/m2.py
+index e4bb695..822143f 100644
+--- a/M2Crypto/m2.py
++++ b/M2Crypto/m2.py
+@@ -25,7 +25,7 @@ Portions created by Open Source Applications Foundation (OSAF) are
+ Copyright (C) 2004 OSAF. All Rights Reserved.
+ """
+
+-from __m2crypto import *
++from _m2crypto import *
+ lib_init()
+
+
+diff --git a/SWIG/_lib.i b/SWIG/_lib.i
+index 0d40698..6cc1a44 100644
+--- a/SWIG/_lib.i
++++ b/SWIG/_lib.i
+@@ -66,6 +66,7 @@ int ssl_verify_callback(int ok, X509_STORE_CTX *ctx) {
+ int cret;
+ int new_style_callback = 0, warning_raised_exception=0;
+ PyGILState_STATE gilstate;
++ PyObject *self = NULL; /* bug in SWIG_NewPointerObj as of 3.0.5 */
+
+ ssl = (SSL *)X509_STORE_CTX_get_app_data(ctx);
+
+@@ -151,6 +152,7 @@ int ssl_verify_callback(int ok, X509_STORE_CTX *ctx) {
+ void ssl_info_callback(const SSL *s, int where, int ret) {
+ PyObject *argv, *retval, *_SSL;
+ PyGILState_STATE gilstate;
++ PyObject *self = NULL; /* bug in SWIG_NewPointerObj as of 3.0.5 */
+
+ gilstate = PyGILState_Ensure();
+
+@@ -170,6 +172,7 @@ DH *ssl_set_tmp_dh_callback(SSL *ssl, int is_export, int keylength) {
+ PyObject *argv, *ret, *_ssl;
+ DH *dh;
+ PyGILState_STATE gilstate;
++ PyObject *self = NULL; /* bug in SWIG_NewPointerObj as of 3.0.5 */
+
+ gilstate = PyGILState_Ensure();
+
+@@ -193,6 +196,7 @@ RSA *ssl_set_tmp_rsa_callback(SSL *ssl, int is_export, int keylength) {
+ PyObject *argv, *ret, *_ssl;
+ RSA *rsa;
+ PyGILState_STATE gilstate;
++ PyObject *self = NULL; /* bug in SWIG_NewPointerObj as of 3.0.5 */
+
+ gilstate = PyGILState_Ensure();
+
+diff --git a/SWIG/_pkcs7.i b/SWIG/_pkcs7.i
+index 22d791a..20dfbaf 100644
+--- a/SWIG/_pkcs7.i
++++ b/SWIG/_pkcs7.i
+@@ -157,6 +157,7 @@ PyObject *smime_read_pkcs7(BIO *bio) {
+ BIO *bcont = NULL;
+ PKCS7 *p7;
+ PyObject *tuple, *_p7, *_BIO;
++ PyObject *self = NULL; /* bug in SWIG_NewPointerObj as of 3.0.5 */
+
+ if (BIO_method_type(bio) == BIO_TYPE_MEM) {
+ /* OpenSSL FAQ explains that this is needed for mem BIO to return EOF,
+diff --git a/setup.py b/setup.py
+index bac6f9f..f59dc18 100644
+--- a/setup.py
++++ b/setup.py
+@@ -19,6 +19,7 @@ from setuptools.command import build_ext
+
+ from distutils.core import Extension
+ from distutils.spawn import find_executable
++from distutils.file_util import copy_file
+
+
+ class _M2CryptoBuildExt(build_ext.build_ext):
+@@ -77,6 +78,15 @@ class _M2CryptoBuildExt(build_ext.build_ext):
+ [opensslIncludeDir, os.path.join(opensslIncludeDir, "openssl")]]
+ self.swig_opts.append('-includeall')
+ self.swig_opts.append('-modern')
++ self.swig_opts.append('-builtin')
++
++ # These two lines are a workaround for
++ # http://bugs.python.org/issue2624 , hard-coding that we are only
++ # building a single extension with a known path; a proper patch to
++ # distutils would be in the run phase, when extension name and path are
++ # known.
++ self.swig_opts.append('-outdir')
++ self.swig_opts.append(os.path.join(self.build_lib, 'M2Crypto'))
+
+ # Fedora does hat tricks.
+ if platform.linux_distribution()[0] in ['Fedora', 'CentOS']:
+@@ -98,6 +108,20 @@ class _M2CryptoBuildExt(build_ext.build_ext):
+
+ self.library_dirs += [os.path.join(self.openssl, opensslLibraryDir)]
+
++ def run(self):
++ '''Overloaded build_ext implementation to allow inplace=1 to work,
++ which is needed for (python setup.py test).'''
++ # This is another workaround for http://bugs.python.org/issue2624 + the
++ # corresponding lack of support in setuptools' test command. Note that
++ # just using self.inplace in finalize_options() above does not work
++ # because swig is not rerun if the __m2crypto.so extension exists.
++ # Again, hard-coding our extension name and location.
++ build_ext.build_ext.run(self)
++ if self.inplace:
++ copy_file(os.path.join(self.build_lib, 'M2Crypto', '_m2crypto.py'),
++ os.path.join('M2Crypto', '_m2crypto.py'),
++ verbose=self.verbose, dry_run=self.dry_run)
++
+ if sys.platform == 'darwin':
+ my_extra_compile_args = ["-Wno-deprecated-declarations"]
+ else:
+--
+1.9.1
+
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-m2crypto_0.22.3.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-m2crypto_0.22.3.bb
new file mode 100644
index 0000000..95d6eec
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-m2crypto_0.22.3.bb
@@ -0,0 +1,62 @@
+HOMEPAGE = "http://chandlerproject.org/bin/view/Projects/MeTooCrypto"
+SUMMARY = "A Python crypto and SSL toolkit"
+DESCRIPTION = "\
+ M2Crypto is the most complete Python wrapper for OpenSSL featuring RSA, \
+ DSA, DH, EC, HMACs, message digests, symmetric ciphers (including \
+ AES); SSL functionality to implement clients and servers; HTTPS \
+ extensions to Python's httplib, urllib, and xmlrpclib; unforgeable \
+ HMAC'ing AuthCookies for web session management; FTP/TLS client and \
+ server; S/MIME; ZServerSSL: A HTTPS server for Zope and ZSmime: An \
+ S/MIME messenger for Zope. M2Crypto can also be used to provide SSL \
+ for Twisted. \
+ "
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://PKG-INFO;md5=0ccca7097c1d29fa42e75e9c15c6ff2e"
+
+SRCNAME = "M2Crypto"
+SRC_URI = "http://pypi.python.org/packages/source/M/M2Crypto/${SRCNAME}-${PV}.tar.gz \
+ file://m2crypto-Fix-build-with-SWIG-3.0.5.patch \
+"
+
+SRC_URI[md5sum] = "573f21aaac7d5c9549798e72ffcefedd"
+SRC_URI[sha256sum] = "6071bfc817d94723e9b458a010d565365104f84aa73f7fe11919871f7562ff72"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
+
+DEPENDS += "openssl swig-native"
+
+DISTUTILS_BUILD_ARGS += "build_ext -I${STAGING_INCDIR}"
+
+inherit setuptools
+
+SWIG_FEATURES_x86-64 = "-D__x86_64__"
+SWIG_FEATURES ?= ""
+export SWIG_FEATURES
+
+# Get around a problem with swig, but only if the
+# multilib header file exists.
+#
+do_compile_prepend() {
+ sed -i -e 's/self.add_multiarch_paths.*$/# &/;' ${S}/setup.py
+ sed -i -e 's/opensslIncludeDir = .*$/opensslIncludeDir = os.getenv("STAGING_INCDIR")/;' ${S}/setup.py
+ sed -i -e 's/opensslLibraryDir = .*$/opensslLibraryDir = os.getenv("STAGING_LIBDIR")/;' ${S}/setup.py
+
+ if [ "${SITEINFO_BITS}" = "64" ];then
+ bit="64"
+ else
+ bit="32"
+ fi
+
+ if [ -e ${STAGING_INCDIR}/openssl/opensslconf-${bit}.h ]; then
+ for i in SWIG/_ec.i SWIG/_evp.i; do
+ sed -i -e "s/opensslconf.*\./opensslconf-${bit}\./" "$i"
+ done
+ elif [ -e ${STAGING_INCDIR}/openssl/opensslconf-n${bit}.h ] ;then
+ for i in SWIG/_ec.i SWIG/_evp.i; do
+ sed -i -e "s/opensslconf.*\./opensslconf-n${bit}\./" "$i"
+ done
+ fi
+}
+
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-newrelic_2.22.0.19.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-newrelic_2.22.0.19.bb
new file mode 100644
index 0000000..3891824
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-newrelic_2.22.0.19.bb
@@ -0,0 +1,26 @@
+HOMEPAGE = "http://www.newrelic.com"
+SUMMARY = "New Relic Python Agent"
+DESCRIPTION = "\
+ Python agent for the New Relic web application performance monitoring \
+ service. Check the release notes for what has changed in this version. \
+ "
+SECTION = "devel/python"
+LICENSE = "BSD-3-Clause & MIT & Python-2.0 & BSD-2-Clause & NewRelic"
+LIC_FILES_CHKSUM = "file://newrelic/LICENSE;md5=0f6cc160a8ed6759faa408a30b6ac978"
+
+PR = "r0"
+SRCNAME = "newrelic"
+
+SRC_URI = "https://pypi.python.org/packages/source/n/newrelic/${SRCNAME}-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "f8c9bf996d040a11847d14682b290eff"
+SRC_URI[sha256sum] = "aa8869413c21aff441a77582df1e0fdc0f67342760eb7560d33ed3bbed7edf7b"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
+
+FILES_${PN}-dbg += "\
+ ${PYTHON_SITEPACKAGES_DIR}/newrelic-${PV}/newrelic/*/.debug \
+ ${PYTHON_SITEPACKAGES_DIR}/newrelic-${PV}/newrelic/packages/*/.debug/ \
+ "
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-pyyaml_3.11.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-pyyaml_3.11.bb
new file mode 100644
index 0000000..cb1db8c
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-pyyaml_3.11.bb
@@ -0,0 +1,30 @@
+HOMEPAGE = "http://www.pyyaml.org"
+SUMMARY = "Python support for YAML"
+DESCRIPTION = "\
+ YAML is a data serialization format designed for human readability \
+ and interaction with scripting languages. PyYAML is a YAML parser \
+ and emitter for Python. \
+ . \
+ PyYAML features a complete YAML 1.1 parser, Unicode support, pickle \
+ support, capable extension API, and sensible error messages. PyYAML \
+ supports standard YAML tags and provides Python-specific tags that \
+ allow to represent an arbitrary Python object. \
+ . \
+ PyYAML is applicable for a broad range of tasks from complex \
+ configuration files to object serialization and persistance. \
+ "
+SECTION = "devel/python"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=6015f088759b10e0bc2bf64898d4ae17"
+
+SRCNAME = "PyYAML"
+SRC_URI = "http://pyyaml.org/download/pyyaml/${SRCNAME}-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "f50e08ef0fe55178479d3a618efe21db"
+SRC_URI[sha256sum] = "c36c938a872e5ff494938b33b14aaa156cb439ec67548fcab3535bb78b0846e8"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
+
+DEPENDS += "libyaml python-cython-native"
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-redis_2.10.3.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-redis_2.10.3.bb
new file mode 100644
index 0000000..9eda8a5
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-redis_2.10.3.bb
@@ -0,0 +1,20 @@
+HOMEPAGE = "https://pypi.python.org/pypi/redis/"
+SUMMARY = "Python client for Redis key-value store"
+DESCRIPTION = "The Python interface to the Redis key-value store."
+SECTION = "devel/python"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=51d9ad56299ab60ba7be65a621004f27"
+
+PR = "r0"
+SRCNAME = "redis"
+
+SRC_URI = "https://pypi.python.org/packages/source/r/redis/${SRCNAME}-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "7619221ad0cbd124a5687458ea3f5289"
+SRC_URI[sha256sum] = "a4fb37b02860f6b1617f6469487471fd086dd2d38bbce640c2055862b9c4019c"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
+
+RDEPENDS_${PN} = "redis"
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-requests_2.8.1.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-requests_2.8.1.bb
new file mode 100644
index 0000000..187f97a
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-requests_2.8.1.bb
@@ -0,0 +1,28 @@
+HOMEPAGE = "http://python-requests.org"
+SUMMARY = "Python HTTP for Humans."
+DESCRIPTION = "\
+ Requests is an Apache2 Licensed HTTP library, written in Python, \
+ for human beings. \
+ . \
+ Most existing Python modules for sending HTTP requests are extremely \
+ verbose and cumbersome. Python's builtin urllib2 module provides most \
+ of the HTTP capabilities you should need, but the api is thoroughly \
+ broken. It requires an enormous amount of work (even method overrides) \
+ to perform the simplest of tasks. \
+ . \
+ Things shouldn't be this way. Not in Python \
+ "
+SECTION = "devel/python"
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=58c7e163c9f8ee037246da101c6afd1e"
+
+SRCNAME = "requests"
+
+SRC_URI = "http://pypi.python.org/packages/source/r/requests/${SRCNAME}-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "a27ea3d72d7822906ddce5e252d6add9"
+SRC_URI[sha256sum] = "84fe8d5bf4dcdcc49002446c47a146d17ac10facf00d9086659064ac43b6c25b"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-simplejson_3.7.3.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-simplejson_3.7.3.bb
new file mode 100644
index 0000000..39dfce6
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-simplejson_3.7.3.bb
@@ -0,0 +1,31 @@
+HOMEPAGE = "http://cheeseshop.python.org/pypi/simplejson"
+SUMMARY = "Simple, fast, extensible JSON encoder/decoder for Python"
+DESCRIPTION = "\
+ JSON <http://json.org> encoder and decoder for Python 2.5+ \
+ and Python 3.3+. It is pure Python code with no dependencies, \
+ but includes an optional C extension for a serious speed boost \
+ "
+SECTION = "devel/python"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=c6338d7abd321c0b50a2a547e441c52e"
+PR = "r0"
+
+SRCNAME = "simplejson"
+
+SRC_URI = "https://pypi.python.org/packages/source/s/simplejson/${SRCNAME}-${PV}.tar.gz"
+SRC_URI[md5sum] = "117346e5ee4ed4434ffe485f8e58f5ed"
+SRC_URI[sha256sum] = "63d7f7b14a20f29f74325a69e6db45925eaf6e3a003eab46c0234fd050a8c93f"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
+
+RDEPENDS_${PN} = "\
+ python-core \
+ python-re \
+ python-io \
+ python-netserver \
+ python-numbers \
+"
+
+
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-six_1.10.0.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-six_1.10.0.bb
new file mode 100644
index 0000000..a84a4c4
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-six_1.10.0.bb
@@ -0,0 +1,17 @@
+DESCRIPTION = "Python 2 and 3 compatibility utilities"
+HOMEPAGE = "http://pypi.python.org/pypi/six/"
+SECTION = "devel/python"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=6f00d4a50713fa859858dd9abaa35b21"
+
+SRCNAME = "six"
+
+SRC_URI = "https://pypi.python.org/packages/source/s/${SRCNAME}/${SRCNAME}-${PV}.tar.gz \
+"
+
+SRC_URI[md5sum] = "34eed507548117b2ab523ab14b2f8b55"
+SRC_URI[sha256sum] = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-sphinx_1.3.1.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-sphinx_1.3.1.bb
new file mode 100644
index 0000000..fe8ad94
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-sphinx_1.3.1.bb
@@ -0,0 +1,17 @@
+DESCRIPTION = "Python documentation generator"
+HOMEPAGE = "http://sphinx-doc.org/"
+SECTION = "devel/python"
+LICENSE = "BSD"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=6dd095eaa1e7a662b279daf80ecad7e6"
+
+PR = "r0"
+SRCNAME = "Sphinx"
+
+SRC_URI = "http://pypi.python.org/packages/source/S/${SRCNAME}/${SRCNAME}-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "8786a194acf9673464c5455b11fd4332"
+SRC_URI[sha256sum] = "1a6e5130c2b42d2de301693c299f78cc4bd3501e78b610c08e45efc70e2b5114"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-sqlalchemy_1.0.8.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-sqlalchemy_1.0.8.bb
new file mode 100644
index 0000000..2d66ffd
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-sqlalchemy_1.0.8.bb
@@ -0,0 +1,16 @@
+DESCRIPTION = "Python SQL toolkit and Object Relational Mapper that gives \
+application developers the full power and flexibility of SQL"
+HOMEPAGE = "http://www.sqlalchemy.org/"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=cf755cb27ad4331d45dbb4db5172fd33"
+RDEPENDS_${PN} += "python-numbers"
+
+SRCNAME = "SQLAlchemy"
+SRC_URI = "https://pypi.python.org/packages/source/S/${SRCNAME}/${SRCNAME}-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "7cfd005be63945c96a78c67764ac3a85"
+SRC_URI[sha256sum] = "950c79c0abf9e9f99c43c627c51d40d14a946810a90c35e7cd827bfd0bffe46f"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-webob_1.4.1.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-webob_1.4.1.bb
new file mode 100644
index 0000000..18b3806
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-webob_1.4.1.bb
@@ -0,0 +1,23 @@
+DESCRIPTION = "WSGI request and response object"
+HOMEPAGE = "http://webob.org/"
+SECTION = "devel/python"
+LICENSE = "MIT"
+LIC_FILES_CHKSUM = "file://docs/license.txt;md5=8ed3584bcc78c16da363747ccabc5af5"
+
+PR = "r0"
+SRCNAME = "WebOb"
+
+SRC_URI = "http://pypi.python.org/packages/source/W/${SRCNAME}/${SRCNAME}-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "a5c6e8ba5431756e6a5d5ec56047ec94"
+SRC_URI[sha256sum] = "12f8b98390befc47336d2c0e5bad9cc48609d808eabb3f8675dc1027a3a9e9db"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
+
+RDEPENDS_${PN} += " \
+ python-sphinx \
+ python-nose \
+ "
+
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python-werkzeug_0.10.4.bb b/import-layers/meta-virtualization/recipes-devtools/python/python-werkzeug_0.10.4.bb
new file mode 100644
index 0000000..763e5b9
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python-werkzeug_0.10.4.bb
@@ -0,0 +1,31 @@
+DESCRIPTION = "The Swiss Army knife of Python web development"
+HOMEPAGE = "https://pypi.python.org/pypi/Werkzeug/"
+SECTION = "devel/python"
+LICENSE = "BSD-3-Clause"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=a68f5361a2b2ca9fdf26b38aaecb6faa"
+
+PR = "r0"
+SRCNAME = "Werkzeug"
+
+SRC_URI = "https://pypi.python.org/packages/source/W/${SRCNAME}/${SRCNAME}-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "66a488e0ac50a9ec326fe020b3083450"
+SRC_URI[sha256sum] = "9d2771e4c89be127bc4bac056ab7ceaf0e0064c723d6b6e195739c3af4fd5c1d"
+
+S = "${WORKDIR}/${SRCNAME}-${PV}"
+
+inherit setuptools
+
+RDEPENDS_${PN} += "python-io \
+ python-datetime \
+ python-email \
+ python-zlib \
+ python-pkgutil \
+ python-html \
+ python-shell \
+ python-pprint \
+ python-subprocess \
+ python-netserver"
+
+CLEANBROKEN = "1"
+
diff --git a/import-layers/meta-virtualization/recipes-devtools/python/python_2.%.bbappend b/import-layers/meta-virtualization/recipes-devtools/python/python_2.%.bbappend
new file mode 100644
index 0000000..55301c7
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-devtools/python/python_2.%.bbappend
@@ -0,0 +1,8 @@
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
+
+DEPENDS += " ncurses"
+
+do_compile_prepend() {
+ export LIBRARY_PATH=${STAGING_DIR_TARGET}/lib
+}
+
diff --git a/import-layers/meta-virtualization/recipes-extended/dev86/dev86_0.16.20.bb b/import-layers/meta-virtualization/recipes-extended/dev86/dev86_0.16.20.bb
new file mode 100644
index 0000000..38dbf8d
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/dev86/dev86_0.16.20.bb
@@ -0,0 +1,38 @@
+DESCRIPTION = "This is a cross development C compiler, assembler and linker environment for the production of 8086 executables (Optionally MSDOS COM)"
+HOMEPAGE = "http://www.debath.co.uk/dev86/"
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://COPYING;md5=8ca43cbc842c2336e835926c2166c28b"
+SECTION = "console/tools"
+PR="r0"
+
+SRC_URI="http://v3.sk/~lkundrak/dev86/archive/Dev86src-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "567cf460d132f9d8775dd95f9208e49a"
+SRC_URI[sha256sum] = "61817a378c8c8ba65f36c6792d457a305dc4eedae8cdc8b6233bf2bb28e5fe8d"
+
+S = "${WORKDIR}/dev86-${PV}"
+
+BBCLASSEXTEND = "native"
+EXTRA_OEMAKE = "VERSION=${PV} PREFIX=${prefix} DIST=${D}"
+
+do_compile() {
+
+ oe_runmake make.fil
+ oe_runmake -f make.fil bcc86 as86 ld86
+
+}
+
+do_install() {
+
+ if [ "${prefix}"=="" ] ; then
+ export prefix=/usr
+ fi
+
+ oe_runmake install-bcc
+ ln -s ../lib/bcc/bcc-cpp ${D}${prefix}/bin/bcc-cpp
+ ln -s ../lib/bcc/bcc-cc1 ${D}${prefix}/bin/bcc-cc1
+
+}
+COMPATIBLE_HOST = "(i.86|x86_64).*-linux"
+FILES_${PN} += "${libdir}/bcc"
+INSANE_SKIP_${PN} = "already-stripped"
diff --git a/import-layers/meta-virtualization/recipes-extended/iasl/iasl_20120215.bb b/import-layers/meta-virtualization/recipes-extended/iasl/iasl_20120215.bb
new file mode 100644
index 0000000..5ce0930
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/iasl/iasl_20120215.bb
@@ -0,0 +1,29 @@
+DESCRIPTION = "This is a cross development C compiler, assembler and linker environment for the production of 8086 executables (Optionally MSDOS COM)"
+HOMEPAGE = "http://www.acpica.org/"
+LICENSE = "Intel-ACPI"
+LIC_FILES_CHKSUM = "file://asldefine.h;endline=115;md5=d4d7cf809b8b5e03131327b3f718e8f0"
+SECTION = "console/tools"
+PR="r1"
+
+DEPENDS="flex bison"
+
+SRC_URI="https://acpica.org/sites/acpica/files/acpica-unix-${PV}.tar.gz"
+
+SRC_URI[md5sum] = "324c89e5bb9002e2711e0494290ceacc"
+SRC_URI[sha256sum] = "b2b497415f29ddbefe7be8b9429b62c1f1f6e1ec11456928e4e7da86578e5b8d"
+
+S="${WORKDIR}/acpica-unix-${PV}/source/compiler"
+
+NATIVE_INSTALL_WORKS = "1"
+BBCLASSEXTEND = "native"
+
+do_compile() {
+ CFLAGS="-Wno-error=redundant-decls" $MAKE
+}
+
+do_install() {
+ mkdir -p ${D}${prefix}/bin
+ cp ${S}/iasl ${D}${prefix}/bin
+}
+
+
diff --git a/import-layers/meta-virtualization/recipes-extended/images/cloud-image-compute.bb b/import-layers/meta-virtualization/recipes-extended/images/cloud-image-compute.bb
new file mode 100644
index 0000000..197624f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/images/cloud-image-compute.bb
@@ -0,0 +1,14 @@
+IMAGE_FEATURES += "ssh-server-openssh"
+EXTRA_IMAGE_FEATURES = "tools-debug debug-tweaks"
+
+IMAGE_INSTALL = "\
+ ${CORE_IMAGE_BASE_INSTALL} \
+ packagegroup-core-basic \
+ openvswitch \
+ libvirt \
+ openflow \
+ "
+
+inherit core-image
+
+IMAGE_FSTYPES = "tar.gz"
diff --git a/import-layers/meta-virtualization/recipes-extended/images/cloud-image-controller.bb b/import-layers/meta-virtualization/recipes-extended/images/cloud-image-controller.bb
new file mode 100644
index 0000000..0b2a67b
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/images/cloud-image-controller.bb
@@ -0,0 +1,34 @@
+IMAGE_FEATURES += "ssh-server-openssh package-management"
+EXTRA_IMAGE_FEATURES = "tools-debug debug-tweaks"
+
+IMAGE_INSTALL = "\
+ ${CORE_IMAGE_BASE_INSTALL} \
+ ${ROOTFS_PKGMANAGE_BOOTSTRAP} \
+ packagegroup-core-basic \
+ openvswitch \
+ openvswitch-controller \
+ openvswitch-switch \
+ openvswitch-brcompat \
+ criu \
+ libvirt \
+ libvirt-libvirtd \
+ libvirt-python \
+ libvirt-virsh \
+ openflow \
+ qemu \
+ kernel-modules \
+ dhcp-client \
+ perl-modules \
+ grub \
+ mysql5 \
+ python-twisted \
+ python-lxml \
+ "
+
+inherit core-image
+inherit image-vm
+
+IMAGE_FSTYPES = "vmdk tar.gz"
+
+# Ensure extra space for guest images
+#IMAGE_ROOTFS_EXTRA_SPACE = "41943040"
diff --git a/import-layers/meta-virtualization/recipes-extended/images/cloud-image-guest.bb b/import-layers/meta-virtualization/recipes-extended/images/cloud-image-guest.bb
new file mode 100644
index 0000000..e24bf0d
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/images/cloud-image-guest.bb
@@ -0,0 +1,18 @@
+IMAGE_FEATURES += "ssh-server-openssh package-management"
+EXTRA_IMAGE_FEATURES = "tools-debug debug-tweaks"
+
+IMAGE_INSTALL = "\
+ ${CORE_IMAGE_BASE_INSTALL} \
+ ${ROOTFS_PKGMANAGE_BOOTSTRAP} \
+ packagegroup-core-basic \
+ openflow \
+ qemu \
+ kernel-modules \
+ tcpdump \
+ dhcp-client \
+ "
+
+inherit core-image
+inherit image-vm
+
+IMAGE_FSTYPES += "vmdk"
diff --git a/import-layers/meta-virtualization/recipes-extended/images/kvm-image-minimal.bb b/import-layers/meta-virtualization/recipes-extended/images/kvm-image-minimal.bb
new file mode 100644
index 0000000..c96edca
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/images/kvm-image-minimal.bb
@@ -0,0 +1,23 @@
+DESCRIPTION = "A minimal kvm image"
+
+IMAGE_INSTALL = " \
+ packagegroup-core-boot \
+ ${ROOTFS_PKGMANAGE_BOOTSTRAP} \
+ qemu \
+ libvirt \
+ libvirt-libvirtd \
+ libvirt-virsh \
+ kernel-module-kvm \
+ kernel-module-kvm-intel \
+ kernel-module-kvm-amd \
+ "
+
+IMAGE_FEATURES += "ssh-server-openssh"
+
+IMAGE_LINGUAS = " "
+
+LICENSE = "MIT"
+
+inherit core-image
+
+IMAGE_ROOTFS_SIZE = "8192"
diff --git a/import-layers/meta-virtualization/recipes-extended/images/xen-bootimg.inc b/import-layers/meta-virtualization/recipes-extended/images/xen-bootimg.inc
new file mode 100644
index 0000000..093aa68
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/images/xen-bootimg.inc
@@ -0,0 +1,35 @@
+SYSLINUX_TIMEOUT = "10"
+SYSLINUX_LABEL = "boot"
+SYSLINUX_XEN_APPEND = "dom0_mem=1048576"
+SYSLINUX_KERNEL_APPEND = "ramdisk_size=32768 root=/dev/ram0 rw console=tty0 console=ttyS0,115200n8"
+#LABELS_append = " ${SYSLINUX_LABEL} "
+
+INITRD = "${DEPLOY_DIR_IMAGE}/${IMAGE_BASENAME}-${MACHINE}.cpio.lzma"
+
+do_bootimg[depends] += "${IMAGE_BASENAME}:do_rootfs"
+
+inherit bootimg
+
+syslinux_populate_append() {
+ install -m 0444 ${STAGING_LIBDIR}/syslinux/mboot.c32 ${HDDDIR}${SYSLINUXDIR}/mboot.c32
+}
+
+grubefi_populate_append() {
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/xen-${MACHINE}.gz ${DEST}${EFIDIR}/xen.gz
+}
+
+populate_append() {
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/xen-${MACHINE}.gz ${DEST}/xen.gz
+}
+
+build_syslinux_cfg() {
+ echo ALLOWOPTIONS 1 > ${SYSLINUXCFG}
+ echo SERIAL 0 115200 > ${SYSLINUXCFG}
+ echo DEFAULT ${SYSLINUX_LABEL} >> ${SYSLINUXCFG}
+ echo TIMEOUT ${SYSLINUX_TIMEOUT} >> ${SYSLINUXCFG}
+ echo PROMPT 1 >> ${SYSLINUXCFG}
+ echo LABEL ${SYSLINUX_LABEL} >> ${SYSLINUXCFG}
+ echo KERNEL mboot.c32 >> ${SYSLINUXCFG}
+ echo APPEND xen.gz ${SYSLINUX_XEN_APPEND} --- vmlinuz ${SYSLINUX_KERNEL_APPEND} --- initrd >> ${SYSLINUXCFG}
+}
+
diff --git a/import-layers/meta-virtualization/recipes-extended/images/xen-guest-image-minimal.bb b/import-layers/meta-virtualization/recipes-extended/images/xen-guest-image-minimal.bb
new file mode 100644
index 0000000..f13940c
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/images/xen-guest-image-minimal.bb
@@ -0,0 +1,16 @@
+DESCRIPTION = "A Xen guest image."
+
+inherit core-image
+
+IMAGE_INSTALL += " \
+ packagegroup-core-boot \
+ ${@bb.utils.contains('MACHINE_FEATURES', 'acpi', 'kernel-module-xen-acpi-processor', '', d)} \
+ "
+
+IMAGE_INSTALL += "${@base_contains('DISTRO_FEATURES', 'x11', ' xf86-video-fbdev', '', d)}"
+IMAGE_INSTALL += "${@base_contains('DISTRO_FEATURES', 'x11', ' xf86-video-vesa', '', d)}"
+
+LICENSE = "MIT"
+
+# Send console messages to xen console
+APPEND += "console=hvc0"
diff --git a/import-layers/meta-virtualization/recipes-extended/images/xen-image-minimal.bb b/import-layers/meta-virtualization/recipes-extended/images/xen-image-minimal.bb
new file mode 100644
index 0000000..26b6d06
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/images/xen-image-minimal.bb
@@ -0,0 +1,53 @@
+DESCRIPTION = "A minimal xen image"
+
+INITRD_IMAGE = "core-image-minimal-initramfs"
+
+IMAGE_INSTALL += " \
+ packagegroup-core-boot \
+ packagegroup-core-ssh-openssh \
+ ${@bb.utils.contains('MACHINE_FEATURES', 'acpi', 'kernel-module-xen-acpi-processor', '', d)} \
+ kernel-module-xen-blkback \
+ kernel-module-xen-gntalloc \
+ kernel-module-xen-gntdev \
+ kernel-module-xen-netback \
+ ${@bb.utils.contains('MACHINE_FEATURES', 'pci', 'kernel-module-xen-pciback', '', d)} \
+ kernel-module-xen-wdt \
+ xen-base \
+ qemu \
+ "
+
+LICENSE = "MIT"
+
+inherit core-image
+
+syslinux_iso_populate_append() {
+ install -m 0444 ${STAGING_DATADIR}/syslinux/libcom32.c32 ${ISODIR}${ISOLINUXDIR}
+ install -m 0444 ${STAGING_DATADIR}/syslinux/mboot.c32 ${ISODIR}${ISOLINUXDIR}
+}
+
+syslinux_hddimg_populate_append() {
+ install -m 0444 ${STAGING_DATADIR}/syslinux/libcom32.c32 ${HDDDIR}${SYSLINUXDIR}
+ install -m 0444 ${STAGING_DATADIR}/syslinux/mboot.c32 ${HDDDIR}${SYSLINUXDIR}
+}
+
+grubefi_populate_append() {
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/xen-${MACHINE}.gz ${DEST}${EFIDIR}/xen.gz
+}
+
+populate_append() {
+ install -m 0644 ${DEPLOY_DIR_IMAGE}/xen-${MACHINE}.gz ${DEST}/xen.gz
+}
+
+SYSLINUX_XEN_ARGS ?= "loglvl=all guest_loglvl=all console=com1,vga com1=115200,8n1"
+SYSLINUX_KERNEL_ARGS ?= "ramdisk_size=32768 root=/dev/ram0 rw console=hvc0 earlyprintk=xen console=tty0 panic=10 LABEL=boot debugshell=5"
+
+build_syslinux_cfg () {
+ echo "ALLOWOPTIONS 1" > ${SYSLINUXCFG}
+ echo "DEFAULT boot" >> ${SYSLINUXCFG}
+ echo "TIMEOUT 10" >> ${SYSLINUXCFG}
+ echo "PROMPT 1" >> ${SYSLINUXCFG}
+ echo "LABEL boot" >> ${SYSLINUXCFG}
+ echo " KERNEL mboot.c32" >> ${SYSLINUXCFG}
+ echo " APPEND /xen.gz ${SYSLINUX_XEN_ARGS} --- /vmlinuz ${SYSLINUX_KERNEL_ARGS} --- /initrd" >> ${SYSLINUXCFG}
+}
+
diff --git a/import-layers/meta-virtualization/recipes-extended/iptables/iptables_1.%.bbappend b/import-layers/meta-virtualization/recipes-extended/iptables/iptables_1.%.bbappend
new file mode 100644
index 0000000..52a4c4f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/iptables/iptables_1.%.bbappend
@@ -0,0 +1,8 @@
+RRECOMMENDS_${PN} += "kernel-module-ip6-tables \
+ kernel-module-ip6table-filter \
+ kernel-module-xt-tcpudp \
+ kernel-module-ipt-reject \
+ kernel-module-iptable-mangle \
+ kernel-module-xt-checksum \
+ kernel-module-xt-state \
+ "
diff --git a/import-layers/meta-virtualization/recipes-extended/ipxe/ipxe_git.bb b/import-layers/meta-virtualization/recipes-extended/ipxe/ipxe_git.bb
new file mode 100644
index 0000000..d52c567
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/ipxe/ipxe_git.bb
@@ -0,0 +1,31 @@
+DESCRIPTION = "Open source network boot firmware"
+HOMEPAGE = "http://ipxe.org"
+LICENSE = "GPLv2"
+DEPENDS = "binutils-native perl-native syslinux mtools-native cdrtools-native"
+LIC_FILES_CHKSUM = "file://../COPYING;md5=8ca43cbc842c2336e835926c2166c28b"
+
+SRCREV = "8c43891db4eb131d019360ccfb619f235b17eb58"
+PV = "gitr${SRCPV}"
+PR = "r0"
+
+SRC_URI = "git://git.ipxe.org/ipxe.git;protocol=git"
+
+FILES_${PN} = "/usr/share/firmware/*.rom"
+
+EXTRA_OEMAKE = "NO_WERROR=1"
+#PARALLEL_MAKE=""
+
+S = "${WORKDIR}/git/src"
+
+do_configure() {
+ sed -i s#^ISOLINUX_BIN[\ \\t]*=.*#ISOLINUX_BIN\ =\ ${STAGING_DIR_TARGET}/usr/lib/syslinux/isolinux.bin# arch/i386/Makefile
+}
+
+do_compile() {
+ oe_runmake
+}
+
+do_install() {
+ install -d ${D}/usr/share/firmware
+ install ${S}/bin/*.rom ${D}/usr/share/firmware/
+}
diff --git a/import-layers/meta-virtualization/recipes-extended/irqbalance/irqbalance.inc b/import-layers/meta-virtualization/recipes-extended/irqbalance/irqbalance.inc
new file mode 100644
index 0000000..53275d1
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/irqbalance/irqbalance.inc
@@ -0,0 +1,47 @@
+#
+# Copyright (C) 2013 Wind River Systems, Inc.
+#
+SUMMARY = "IRQ allocation daemon"
+DESCRIPTION = "A daemon to balance interrupts across multiple CPUs, \
+which can lead to better performance and IO balance on SMP systems."
+
+HOMEPAGE = "http://code.google.com/p/irqbalance/"
+BUGTRACKER = "http://code.google.com/p/irqbalance/issues/list"
+
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f \
+ file://irqbalance.c;beginline=6;endline=8;md5=b94e153694672307b503b1bc87dc9e24 \
+ "
+
+DEPENDS = "glib-2.0"
+
+INITSCRIPT_NAME = "irqbalanced"
+INITSCRIPT_PARAMS = "defaults"
+
+inherit autotools update-rc.d pkgconfig systemd
+
+SYSTEMD_PACKAGES = "irqbalance"
+SYSTEMD_SERVICE_irqbalance = "irqbalanced.service"
+
+EXTRA_OECONF = "--program-transform-name= \
+ "
+
+# let the recipes or appends define options
+#
+PACKAGECONFIG ?= ""
+
+# enable,disable,depends,rdepends
+#
+PACKAGECONFIG[numa] = "--enable-numa,--disable-numa,numactl,"
+PACKAGECONFIG[libcap-ng] = "--with-libcap-ng,--without-libcap-ng,libcap-ng,"
+
+do_install () {
+ oe_runmake 'DESTDIR=${D}' install
+ install -d ${D}${sysconfdir}/init.d
+ cat ${S}/irqbalance.init | sed -e's,/usr/sbin,${sbindir},g' > ${D}${sysconfdir}/init.d/irqbalanced
+ chmod 755 ${D}${sysconfdir}/init.d/irqbalanced
+
+ install -d ${D}${systemd_unitdir}/system
+ install -m 0644 ${WORKDIR}/irqbalanced.service ${D}${systemd_unitdir}/system
+ sed -i -e 's,@SBINDIR@,${sbindir},g' ${D}${systemd_unitdir}/system/irqbalanced.service
+}
diff --git a/import-layers/meta-virtualization/recipes-extended/irqbalance/irqbalance/add-initscript.patch b/import-layers/meta-virtualization/recipes-extended/irqbalance/irqbalance/add-initscript.patch
new file mode 100644
index 0000000..46a75a8
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/irqbalance/irqbalance/add-initscript.patch
@@ -0,0 +1,47 @@
+irqbalance: add basic init script
+
+The upstream irqbalance release package does not contain an
+init script so we create a basic one here.
+
+Signed-off-by: Mark Asselstine <mark.asselstine@windriver.com>
+---
+Index: irqbalance-0.56/irqbalance.init
+===================================================================
+--- /dev/null
++++ irqbalance-0.56/irqbalance.init
+@@ -0,0 +1,35 @@
++#!/bin/sh
++#
++# Copyright (c) 2012 Wind River Systems, Inc.
++#
++### BEGIN INIT INFO
++# Provides: irqbalance
++# Required-Start:
++# Required-Stop:
++# Default-Start: 2 3 4 5
++# Default-Stop: 0 1 6
++# Short-Description: IRQ allocation daemon
++### END INIT INFO
++
++case "$1" in
++ start)
++ echo -n "Starting irqbalance: "
++ start-stop-daemon -S -b -n irqbalance -a /usr/sbin/irqbalance
++ echo "done"
++ ;;
++ stop)
++ echo -n "Shutting down irqbalance: "
++ start-stop-daemon -K -n irqbalance
++ echo "done"
++ ;;
++ restart)
++ $0 stop
++ $0 start
++ ;;
++ *)
++ echo "Usage: $0 {start | stop | restart}" >&2
++ exit 1
++ ;;
++esac
++
++exit 0
diff --git a/import-layers/meta-virtualization/recipes-extended/irqbalance/irqbalance/fix-configure-libcap-ng.patch b/import-layers/meta-virtualization/recipes-extended/irqbalance/irqbalance/fix-configure-libcap-ng.patch
new file mode 100644
index 0000000..5be5243
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/irqbalance/irqbalance/fix-configure-libcap-ng.patch
@@ -0,0 +1,24 @@
+Subject: [PATCH] grasp withval for libcap-ng
+
+Upstream-Status: Pending
+
+Signed-off-by: Ming Liu <ming.liu@windriver.com>
+---
+ configure.ac | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff -urpN a/configure.ac b/configure.ac
+--- a/configure.ac
++++ b/configure.ac
+@@ -61,7 +61,10 @@ AS_IF(
+ AM_CONDITIONAL([LOCAL_GLIB], [test "x$local_glib2" = "xyes"])
+
+ AC_ARG_WITH([libcap-ng],
+- AS_HELP_STRING([libcap-ng], [Add libcap-ng-support @<:@default=auto@:>@]))
++ [AS_HELP_STRING([libcap-ng], [Add libcap-ng-support @<:@default=auto@:>@])],
++ [libcap_ng=$withval],
++ [libcap_ng=$withval]
++)
+
+ AS_IF(
+ [test "x$libcap_ng" != "xno"],
diff --git a/import-layers/meta-virtualization/recipes-extended/irqbalance/irqbalance/irqbalance-Add-status-and-reload-commands.patch b/import-layers/meta-virtualization/recipes-extended/irqbalance/irqbalance/irqbalance-Add-status-and-reload-commands.patch
new file mode 100644
index 0000000..d853d0b
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/irqbalance/irqbalance/irqbalance-Add-status-and-reload-commands.patch
@@ -0,0 +1,55 @@
+From dc7366896dd5a5bae82be4b11530bc9fdb7cbcdd Mon Sep 17 00:00:00 2001
+From: Yang Shi <yang.shi@windriver.com>
+Date: Mon, 26 Aug 2013 10:58:02 -0700
+Subject: [PATCH] irqbalance: Add status and reload commands
+
+Add status and reload commands for irqbalanced init script
+
+Signed-off-by: Yang Shi <yang.shi@windriver.com>
+---
+ irqbalance.init | 12 +++++++++---
+ 1 files changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/irqbalance.init b/irqbalance.init
+index d12d62c..f58bf55 100644
+--- a/irqbalance.init
++++ b/irqbalance.init
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+ #
+-# Copyright (c) 2012 Wind River Systems, Inc.
++# Copyright (c) 2013 Wind River Systems, Inc.
+ #
+ ### BEGIN INIT INFO
+ # Provides: irqbalance
+@@ -11,6 +11,8 @@
+ # Short-Description: IRQ allocation daemon
+ ### END INIT INFO
+
++. /etc/init.d/functions
++
+ case "$1" in
+ start)
+ echo -n "Starting irqbalance: "
+@@ -22,12 +24,16 @@ case "$1" in
+ start-stop-daemon -K -n irqbalance
+ echo "done"
+ ;;
+- restart)
++ restart|reload)
+ $0 stop
+ $0 start
+ ;;
++ status)
++ status irqbalance
++ echo "done"
++ ;;
+ *)
+- echo "Usage: $0 {start | stop | restart}" >&2
++ echo "Usage: $0 {start | stop | restart | reload | status}" >&2
+ exit 1
+ ;;
+ esac
+--
+1.7.5.4
+
diff --git a/import-layers/meta-virtualization/recipes-extended/irqbalance/irqbalance/irqbalanced.service b/import-layers/meta-virtualization/recipes-extended/irqbalance/irqbalance/irqbalanced.service
new file mode 100644
index 0000000..5b284fa
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/irqbalance/irqbalance/irqbalanced.service
@@ -0,0 +1,9 @@
+[Unit]
+Description=irqbalance daemon
+After=syslog.target
+
+[Service]
+ExecStart=@SBINDIR@/irqbalance --foreground
+
+[Install]
+WantedBy=multi-user.target
diff --git a/import-layers/meta-virtualization/recipes-extended/irqbalance/irqbalance_1.1.0.bb b/import-layers/meta-virtualization/recipes-extended/irqbalance/irqbalance_1.1.0.bb
new file mode 100644
index 0000000..16efa95
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/irqbalance/irqbalance_1.1.0.bb
@@ -0,0 +1,15 @@
+#
+# Copyright (C) 2015 Wind River Systems, Inc.
+#
+
+require irqbalance.inc
+
+SRC_URI[md5sum] = "b15d975336080bcac4be0c1752d43cf3"
+SRC_URI[sha256sum] = "91506e638b03bf27cf5da7dc250d58a753ce8a0288a20265fc7ff0266040706b"
+
+SRC_URI = "https://github.com/Irqbalance/irqbalance/archive/v${PV}.tar.gz;downloadfilename=irqbalance-${PV}.tar.gz \
+ file://add-initscript.patch \
+ file://irqbalance-Add-status-and-reload-commands.patch \
+ file://fix-configure-libcap-ng.patch \
+ file://irqbalanced.service \
+ "
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-1.3.2/0001-to-fix-build-error.patch b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-1.3.2/0001-to-fix-build-error.patch
new file mode 100644
index 0000000..089ee33
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-1.3.2/0001-to-fix-build-error.patch
@@ -0,0 +1,46 @@
+From 8353dc1e642011199c3b3ba057d51d8768e4cd54 Mon Sep 17 00:00:00 2001
+From: Lei Maohui <leimaohui@cn.fujitsu.com>
+Date: Fri, 31 Jul 2015 03:17:07 +0900
+Subject: [PATCH] to fix build error
+
+The error likes as following
+
+| Generating internals/command.html.tmp
+| /bin/sh: line 3: internals/command.html.tmp: No such file or directory
+| rm: Generating internals/locking.html.tmp
+| cannot remove `internals/command.html.tmp': No such file or directory
+| make[3]: *** [internals/command.html.tmp] Error 1
+| make[3]: *** Waiting for unfinished jobs....
+
+Signed-off-by: Lei Maohui <leimaohui@cn.fujitsu.com>
+[ywei: rebased to libvirt-1.3.2]
+Signed-off-by: Yunguo Wei <yunguo.wei@windriver.com>
+---
+ docs/Makefile.am | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/docs/Makefile.am b/docs/Makefile.am
+index 1b4353b..c199551 100644
+--- a/docs/Makefile.am
++++ b/docs/Makefile.am
+@@ -164,7 +164,7 @@ EXTRA_DIST= \
+ todo.pl hvsupport.pl todo.cfg-example \
+ $(schema_DATA)
+
+-acl_generated = aclperms.htmlinc
++acl.html:: $(srcdir)/aclperms.htmlinc
+
+ $(srcdir)/aclperms.htmlinc: $(top_srcdir)/src/access/viraccessperm.h \
+ $(srcdir)/genaclperms.pl Makefile.am
+@@ -221,7 +221,7 @@ $(srcdir)/hvsupport.html.in: $(srcdir)/hvsupport.pl $(api_DATA) \
+ convert -rotate 90 $< $@
+
+ %.html.tmp: %.html.in site.xsl subsite.xsl page.xsl \
+- sitemap.html.in $(acl_generated)
++ sitemap.html.in
+ @if [ -x $(XSLTPROC) ] ; then \
+ echo "Generating $@"; \
+ name=`echo $@ | sed -e 's/.tmp//'`; \
+--
+1.9.1
+
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-python.inc b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-python.inc
new file mode 100644
index 0000000..c1dafe9
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt-python.inc
@@ -0,0 +1,55 @@
+inherit pythonnative python-dir
+
+export STAGING_INCDIR
+export STAGING_LIBDIR
+export BUILD_SYS
+export HOST_SYS
+
+RDEPENDS_${PN}-python += "python"
+PACKAGECONFIG_${PN}-python[xen] = ",,,xen-python"
+
+PACKAGES += "${PN}-python-staticdev ${PN}-python-dev ${PN}-python-dbg ${PN}-python"
+
+FILES_${PN}-python-staticdev += "${PYTHON_SITEPACKAGES_DIR}/*.a"
+FILES_${PN}-python-dev += "${PYTHON_SITEPACKAGES_DIR}/*.la"
+FILES_${PN}-python-dbg += "${PYTHON_SITEPACKAGES_DIR}/.debug/"
+FILES_${PN}-python = "${bindir}/* ${libdir}/* ${libdir}/${PYTHON_DIR}/*"
+
+SRC_URI += "http://libvirt.org/sources/python/libvirt-python-${PV}.tar.gz;name=libvirt_python"
+SRC_URI += "file://libvirt_api_xml_path.patch;patchdir=../libvirt-python-${PV}"
+
+SRC_URI[libvirt_python.md5sum] = "ed018c714d7ddbe93221c796dff283ed"
+SRC_URI[libvirt_python.sha256sum] = "6d35ae9e7801573393b9c92471f39e6700d479f10b641df81d041b469f160bf8"
+
+export LIBVIRT_API_PATH = "${S}/docs/libvirt-api.xml"
+export LIBVIRT_CFLAGS = "-I${S}/include"
+export LIBVIRT_LIBS = "-L${B}/src/.libs -lvirt -ldl"
+export LDFLAGS="-L${B}/src/.libs"
+
+LIBVIRT_INSTALL_ARGS = "--root=${D} \
+ --prefix=${prefix} \
+ --install-lib=${PYTHON_SITEPACKAGES_DIR} \
+ --install-data=${datadir}"
+
+python __anonymous () {
+ pkgconfig = d.getVar('PACKAGECONFIG', True)
+ if ('python') in pkgconfig.split():
+ d.setVar('LIBVIRT_PYTHON_ENABLE', '1')
+ else:
+ d.setVar('LIBVIRT_PYTHON_ENABLE', '0')
+}
+
+do_compile_append() {
+ if [ "${LIBVIRT_PYTHON_ENABLE}" = "1" ]; then
+ cd ${WORKDIR}/libvirt-python-${PV} && \
+ ${STAGING_BINDIR_NATIVE}/python-native/python setup.py build
+ fi
+}
+
+do_install_append() {
+ if [ "${LIBVIRT_PYTHON_ENABLE}" = "1" ]; then
+ cd ${WORKDIR}/${PN}-python-${PV} && \
+ ${STAGING_BINDIR_NATIVE}/python-native/python setup.py install \
+ --install-lib=${D}/${PYTHON_SITEPACKAGES_DIR} ${LIBVIRT_INSTALL_ARGS}
+ fi
+}
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/Revert-build-add-prefix-to-SYSTEMD_UNIT_DIR.patch b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/Revert-build-add-prefix-to-SYSTEMD_UNIT_DIR.patch
new file mode 100644
index 0000000..16c3a16
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/Revert-build-add-prefix-to-SYSTEMD_UNIT_DIR.patch
@@ -0,0 +1,64 @@
+From dd915e7f70e676aea93f750c75d16ce646e71e4b Mon Sep 17 00:00:00 2001
+From: Mark Asselstine <mark.asselstine@windriver.com>
+Date: Wed, 9 Sep 2015 11:52:44 -0400
+Subject: [PATCH] Revert "build: add $(prefix) to SYSTEMD_UNIT_DIR"
+
+This reverts upstream commit 00e9d6071b2450659ce01bc984f64ecb5d7ba62d
+with minor context changes to allow it to apply.
+
+Yocto based builds use /lib/systemd/system for systemd unit files and
+since upstream has chosen to use a mechanism for setting
+SYSTEMD_UNIT_DIR which is not flexible we have to resort to reverting
+this upstream commit to avoid having ${prefix} added to the path.
+
+Signed-off-by: Mark Asselstine <mark.asselstine@windriver.com>
+[ywei: rebased to libvirt-1.3.2]
+Signed-off-by: Yunguo Wei <yunguo.wei@windriver.com>
+---
+ daemon/Makefile.am | 2 +-
+ src/Makefile.am | 2 +-
+ tools/Makefile.am | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/daemon/Makefile.am b/daemon/Makefile.am
+index 2dbe81b..41ea2db 100644
+--- a/daemon/Makefile.am
++++ b/daemon/Makefile.am
+@@ -445,7 +445,7 @@ endif ! LIBVIRT_INIT_SCRIPT_UPSTART
+
+ if LIBVIRT_INIT_SCRIPT_SYSTEMD
+
+-SYSTEMD_UNIT_DIR = $(prefix)/lib/systemd/system
++SYSTEMD_UNIT_DIR = /lib/systemd/system
+ BUILT_SOURCES += libvirtd.service libvirtd.socket
+
+ install-init-systemd: install-sysconfig libvirtd.service libvirtd.socket
+diff --git a/src/Makefile.am b/src/Makefile.am
+index a316b4d..d271291 100644
+--- a/src/Makefile.am
++++ b/src/Makefile.am
+@@ -2594,7 +2594,7 @@ EXTRA_DIST += \
+ if WITH_LIBVIRTD
+ if LIBVIRT_INIT_SCRIPT_SYSTEMD
+
+-SYSTEMD_UNIT_DIR = $(prefix)/lib/systemd/system
++SYSTEMD_UNIT_DIR = /lib/systemd/system
+
+ BUILT_SOURCES += virtlockd.service virtlockd.socket \
+ virtlogd.service virtlogd.socket
+diff --git a/tools/Makefile.am b/tools/Makefile.am
+index b3227a7..0e58f73 100644
+--- a/tools/Makefile.am
++++ b/tools/Makefile.am
+@@ -356,7 +356,7 @@ libvirt-guests.init: libvirt-guests.init.in libvirt-guests.sh
+
+
+ EXTRA_DIST += libvirt-guests.service.in
+-SYSTEMD_UNIT_DIR = $(prefix)/lib/systemd/system
++SYSTEMD_UNIT_DIR = /lib/systemd/system
+
+ if LIBVIRT_INIT_SCRIPT_SYSTEMD
+ install-systemd: libvirt-guests.service install-sysconfig libvirt-guests.sh
+--
+1.9.1
+
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/libvirt-1.0.3-fix-thread-safety-in-lxc-callback-handling.patch b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/libvirt-1.0.3-fix-thread-safety-in-lxc-callback-handling.patch
new file mode 100644
index 0000000..558d5ef
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/libvirt-1.0.3-fix-thread-safety-in-lxc-callback-handling.patch
@@ -0,0 +1,63 @@
+From ad5d9cee87357f9f38f62583119606ef95ba10df Mon Sep 17 00:00:00 2001
+From: Bogdan Purcareata <bogdan.purcareata@freescale.com>
+Date: Fri, 24 May 2013 16:46:00 +0300
+Subject: [PATCH] Fix thread safety in LXC callback handling
+
+Signed-off-by: Bogdan Purcareata <bogdan.purcareata@freescale.com>
+---
+ src/lxc/lxc_process.c | 18 +++++++++++++++++-
+ 1 file changed, 17 insertions(+), 1 deletion(-)
+
+diff --git a/src/lxc/lxc_process.c b/src/lxc/lxc_process.c
+index aaa81a7..0eadc67 100644
+--- a/src/lxc/lxc_process.c
++++ b/src/lxc/lxc_process.c
+@@ -609,8 +609,13 @@ static void virLXCProcessMonitorExitNotify(virLXCMonitorPtr mon ATTRIBUTE_UNUSED
+ virLXCProtocolExitStatus status,
+ virDomainObjPtr vm)
+ {
++ virLXCDriverPtr driver = lxc_driver;
+ virLXCDomainObjPrivatePtr priv = vm->privateData;
+
++ lxcDriverLock(driver);
++ virObjectLock(vm);
++ lxcDriverUnlock(driver);
++
+ switch (status) {
+ case VIR_LXC_PROTOCOL_EXIT_STATUS_SHUTDOWN:
+ priv->stopReason = VIR_DOMAIN_EVENT_STOPPED_SHUTDOWN;
+@@ -628,6 +633,8 @@ static void virLXCProcessMonitorExitNotify(virLXCMonitorPtr mon ATTRIBUTE_UNUSED
+ }
+ VIR_DEBUG("Domain shutoff reason %d (from status %d)",
+ priv->stopReason, status);
++
++ virObjectUnlock(vm);
+ }
+
+ /* XXX a little evil */
+@@ -636,12 +643,21 @@ static void virLXCProcessMonitorInitNotify(virLXCMonitorPtr mon ATTRIBUTE_UNUSED
+ pid_t initpid,
+ virDomainObjPtr vm)
+ {
+- virLXCDomainObjPrivatePtr priv = vm->privateData;
++ virLXCDriverPtr driver = lxc_driver;
++ virLXCDomainObjPrivatePtr priv;
++
++ lxcDriverLock(driver);
++ virObjectLock(vm);
++ lxcDriverUnlock(driver);
++
++ priv = vm->privateData;
+ priv->initpid = initpid;
+ virDomainAuditInit(vm, initpid);
+
+ if (virDomainSaveStatus(lxc_driver->caps, lxc_driver->stateDir, vm) < 0)
+ VIR_WARN("Cannot update XML with PID for LXC %s", vm->def->name);
++
++ virObjectUnlock(vm);
+ }
+
+ static virLXCMonitorCallbacks monitorCallbacks = {
+--
+1.7.11.7
+
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/libvirt-use-pkg-config-to-locate-libcap.patch b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/libvirt-use-pkg-config-to-locate-libcap.patch
new file mode 100644
index 0000000..65b046e
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/libvirt-use-pkg-config-to-locate-libcap.patch
@@ -0,0 +1,45 @@
+From 3e271f6db12ffe34843428ec2f0bca7a8fe3aa65 Mon Sep 17 00:00:00 2001
+From: Bruce Ashfield <bruce.ashfield@windriver.com>
+Date: Wed, 8 Apr 2015 13:03:03 -0400
+Subject: [PATCH] libvirt: use pkg-config to locate libcap
+
+libvirt wants to use pcap-config to locate the exisence and location
+of libpcap. oe-core stubs this script and replaces it with pkg-config,
+which can lead to the host pcap-config triggering and either breaking
+the build or introducing host contamination.
+
+To fix this issue, we patch configure to use 'pkg-config libcap' to
+locate the correct libraries.
+
+Signed-off-by: Bruce Ashfield <bruce.ashfield@windriver.com>
+---
+ configure.ac | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/configure.ac b/configure.ac
+index f37047599b76..5f9b84363b96 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -1553,7 +1553,7 @@ fi
+ AM_CONDITIONAL([HAVE_NUMAD], [test "$with_numad" != "no"])
+
+ dnl pcap lib
+-LIBPCAP_CONFIG="pcap-config"
++LIBPCAP_CONFIG="pkg-config libpcap"
+ LIBPCAP_CFLAGS=""
+ LIBPCAP_LIBS=""
+ LIBPCAP_FOUND="no"
+@@ -1563,8 +1563,8 @@ AC_ARG_WITH([libpcap], [AS_HELP_STRING([--with-libpcap=@<:@PFX@:>@],
+ if test "$with_qemu" = "yes"; then
+ case $with_libpcap in
+ no) LIBPCAP_CONFIG= ;;
+- ''|yes) LIBPCAP_CONFIG="pcap-config" ;;
+- *) LIBPCAP_CONFIG="$with_libpcap/bin/pcap-config" ;;
++ ''|yes) LIBPCAP_CONFIG="pkg-config libpcap" ;;
++ *) LIBPCAP_CONFIG="$with_libpcap/bin/pkg-config libpcap" ;;
+ esac
+ AS_IF([test "x$LIBPCAP_CONFIG" != "x"], [
+ AC_MSG_CHECKING(libpcap $LIBPCAP_CONFIG >= $LIBPCAP_REQUIRED )
+--
+2.1.0
+
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/libvirt_api_xml_path.patch b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/libvirt_api_xml_path.patch
new file mode 100644
index 0000000..3c2622c
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/libvirt_api_xml_path.patch
@@ -0,0 +1,85 @@
+Adding support for LIBVIRT_CFLAGS and LIBVIRT_LIBS
+
+Signed-off-by: Amy Fong <amy.fong@windriver.com>
+
+
+Adding a support for LIBVIRT_API_PATH evironment variable, which can
+control where the script should look for the 'libvirt-api.xml' file.
+This allows building libvirt-python against different libvirt than the
+one installed in the system. This may be used for example in autotest
+or by packagers without the need to install libvirt into the system.
+
+Signed-off-by: Martin Kletzander <mkletzan redhat com>
+[ywei: rebased to 1.3.2]
+Signed-off-by: Yunguo Wei <yunguo.wei@windriver.com>
+---
+ setup.py | 25 ++++++++++++++++++++++---
+ 1 file changed, 22 insertions(+), 3 deletions(-)
+
+Index: libvirt-python-1.2.4/setup.py
+===================================================================
+--- libvirt-python-1.2.4.orig/setup.py
++++ libvirt-python-1.2.4/setup.py
+@@ -40,13 +40,7 @@
+ "libvirt"])
+
+ def have_libvirt_lxc():
+- try:
+- spawn([get_pkgcfg(),
+- "--atleast-version=%s" % MIN_LIBVIRT_LXC,
+- "libvirt"])
+- return True
+- except DistutilsExecError:
+- return False
++ return True
+
+ def get_pkgconfig_data(args, mod, required=True):
+ """Run pkg-config to and return content associated with it"""
+@@ -68,7 +62,17 @@
+ """Check with pkg-config that libvirt is present and extract
+ the API XML file paths we need from it"""
+
+- libvirt_api = get_pkgconfig_data(["--variable", "libvirt_api"], "libvirt")
++ libvirt_api = os.getenv("LIBVIRT_API_PATH")
++
++ if libvirt_api:
++ if not libvirt_api.endswith("-api.xml"):
++ raise ValueError("Invalid path '%s' for API XML" % libvirt_api)
++ if not os.path.exists(libvirt_api):
++ raise ValueError("API XML '%s' does not exist, "
++ "have you built libvirt?" % libvirt_api)
++ else:
++ libvirt_api = get_pkgconfig_data(["--variable", "libvirt_api"],
++ "libvirt")
+
+ offset = libvirt_api.index("-api.xml")
+ libvirt_qemu_api = libvirt_api[0:offset] + "-qemu-api.xml"
+@@ -88,8 +92,17 @@
+
+ c_modules = []
+ py_modules = []
+- ldflags = get_pkgconfig_data(["--libs-only-L"], "libvirt", False).split()
+- cflags = get_pkgconfig_data(["--cflags"], "libvirt", False).split()
++ libvirt_cflags = os.getenv("LIBVIRT_CFLAGS")
++ if libvirt_cflags:
++ cflags = libvirt_cflags.split()
++ else:
++ cflags = get_pkgconfig_data(["--cflags"], "libvirt", False).split()
++
++ libvirt_libs = os.getenv("LIBVIRT_LIBS")
++ if libvirt_libs:
++ ldflags = libvirt_libs.split()
++ else:
++ ldflags = get_pkgconfig_data(["--libs-only-L"], "libvirt", False).split()
+
+ module = Extension('libvirtmod',
+ sources = ['libvirt-override.c', 'build/libvirt.c', 'typewrappers.c', 'libvirt-utils.c'],
+@@ -138,7 +151,7 @@
+ class my_build(build):
+
+ def run(self):
+- check_minimum_libvirt_version()
++# check_minimum_libvirt_version()
+ apis = get_api_xml_files()
+
+ self.spawn([sys.executable, "generator.py", "libvirt", apis[0]])
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/libvirtd.conf b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/libvirtd.conf
new file mode 100644
index 0000000..c0462b4
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/libvirtd.conf
@@ -0,0 +1,393 @@
+# Master libvirt daemon configuration file
+#
+# For further information consult http://libvirt.org/format.html
+#
+# NOTE: the tests/daemon-conf regression test script requires
+# that each "PARAMETER = VALUE" line in this file have the parameter
+# name just after a leading "#".
+
+#################################################################
+#
+# Network connectivity controls
+#
+
+# Flag listening for secure TLS connections on the public TCP/IP port.
+# NB, must pass the --listen flag to the libvirtd process for this to
+# have any effect.
+#
+# It is necessary to setup a CA and issue server certificates before
+# using this capability.
+#
+# This is enabled by default, uncomment this to disable it
+listen_tls = 0
+
+# Listen for unencrypted TCP connections on the public TCP/IP port.
+# NB, must pass the --listen flag to the libvirtd process for this to
+# have any effect.
+#
+# Using the TCP socket requires SASL authentication by default. Only
+# SASL mechanisms which support data encryption are allowed. This is
+# DIGEST_MD5 and GSSAPI (Kerberos5)
+#
+# This is disabled by default, uncomment this to enable it.
+listen_tcp = 1
+
+
+
+# Override the port for accepting secure TLS connections
+# This can be a port number, or service name
+#
+#tls_port = "16514"
+
+# Override the port for accepting insecure TCP connections
+# This can be a port number, or service name
+#
+#tcp_port = "16509"
+
+
+# Override the default configuration which binds to all network
+# interfaces. This can be a numeric IPv4/6 address, or hostname
+#
+#listen_addr = "192.168.0.1"
+
+
+# Flag toggling mDNS advertizement of the libvirt service.
+#
+# Alternatively can disable for all services on a host by
+# stopping the Avahi daemon
+#
+# This is enabled by default, uncomment this to disable it
+#mdns_adv = 0
+
+# Override the default mDNS advertizement name. This must be
+# unique on the immediate broadcast network.
+#
+# The default is "Virtualization Host HOSTNAME", where HOSTNAME
+# is subsituted for the short hostname of the machine (without domain)
+#
+#mdns_name = "Virtualization Host Joe Demo"
+
+
+#################################################################
+#
+# UNIX socket access controls
+#
+
+# Set the UNIX domain socket group ownership. This can be used to
+# allow a 'trusted' set of users access to management capabilities
+# without becoming root.
+#
+# This is restricted to 'root' by default.
+#unix_sock_group = "libvirt"
+
+# Set the UNIX socket permissions for the R/O socket. This is used
+# for monitoring VM status only
+#
+# Default allows any user. If setting group ownership may want to
+# restrict this to:
+#unix_sock_ro_perms = "0777"
+
+# Set the UNIX socket permissions for the R/W socket. This is used
+# for full management of VMs
+#
+# Default allows only root. If PolicyKit is enabled on the socket,
+# the default will change to allow everyone (eg, 0777)
+#
+# If not using PolicyKit and setting group ownership for access
+# control then you may want to relax this to:
+#unix_sock_rw_perms = "0770"
+
+# Set the name of the directory in which sockets will be found/created.
+#unix_sock_dir = "/var/run/libvirt"
+
+#################################################################
+#
+# Authentication.
+#
+# - none: do not perform auth checks. If you can connect to the
+# socket you are allowed. This is suitable if there are
+# restrictions on connecting to the socket (eg, UNIX
+# socket permissions), or if there is a lower layer in
+# the network providing auth (eg, TLS/x509 certificates)
+#
+# - sasl: use SASL infrastructure. The actual auth scheme is then
+# controlled from /etc/sasl2/libvirt.conf. For the TCP
+# socket only GSSAPI & DIGEST-MD5 mechanisms will be used.
+# For non-TCP or TLS sockets, any scheme is allowed.
+#
+# - polkit: use PolicyKit to authenticate. This is only suitable
+# for use on the UNIX sockets. The default policy will
+# require a user to supply their own password to gain
+# full read/write access (aka sudo like), while anyone
+# is allowed read/only access.
+#
+# Set an authentication scheme for UNIX read-only sockets
+# By default socket permissions allow anyone to connect
+#
+# To restrict monitoring of domains you may wish to enable
+# an authentication mechanism here
+#auth_unix_ro = "none"
+
+# Set an authentication scheme for UNIX read-write sockets
+# By default socket permissions only allow root. If PolicyKit
+# support was compiled into libvirt, the default will be to
+# use 'polkit' auth.
+#
+# If the unix_sock_rw_perms are changed you may wish to enable
+# an authentication mechanism here
+#auth_unix_rw = "none"
+
+# Change the authentication scheme for TCP sockets.
+#
+# If you don't enable SASL, then all TCP traffic is cleartext.
+# Don't do this outside of a dev/test scenario. For real world
+# use, always enable SASL and use the GSSAPI or DIGEST-MD5
+# mechanism in /etc/sasl2/libvirt.conf
+#auth_tcp = "sasl"
+
+# Change the authentication scheme for TLS sockets.
+#
+# TLS sockets already have encryption provided by the TLS
+# layer, and limited authentication is done by certificates
+#
+# It is possible to make use of any SASL authentication
+# mechanism as well, by using 'sasl' for this option
+#auth_tls = "none"
+
+
+
+#################################################################
+#
+# TLS x509 certificate configuration
+#
+
+
+# Override the default server key file path
+#
+#key_file = "/etc/pki/libvirt/private/serverkey.pem"
+
+# Override the default server certificate file path
+#
+#cert_file = "/etc/pki/libvirt/servercert.pem"
+
+# Override the default CA certificate path
+#
+#ca_file = "/etc/pki/CA/cacert.pem"
+
+# Specify a certificate revocation list.
+#
+# Defaults to not using a CRL, uncomment to enable it
+#crl_file = "/etc/pki/CA/crl.pem"
+
+
+
+#################################################################
+#
+# Authorization controls
+#
+
+
+# Flag to disable verification of our own server certificates
+#
+# When libvirtd starts it performs some sanity checks against
+# its own certificates.
+#
+# Default is to always run sanity checks. Uncommenting this
+# will disable sanity checks which is not a good idea
+#tls_no_sanity_certificate = 1
+
+# Flag to disable verification of client certificates
+#
+# Client certificate verification is the primary authentication mechanism.
+# Any client which does not present a certificate signed by the CA
+# will be rejected.
+#
+# Default is to always verify. Uncommenting this will disable
+# verification - make sure an IP whitelist is set
+#tls_no_verify_certificate = 1
+
+
+# A whitelist of allowed x509 Distinguished Names
+# This list may contain wildcards such as
+#
+# "C=GB,ST=London,L=London,O=Red Hat,CN=*"
+#
+# See the POSIX fnmatch function for the format of the wildcards.
+#
+# NB If this is an empty list, no client can connect, so comment out
+# entirely rather than using empty list to disable these checks
+#
+# By default, no DN's are checked
+#tls_allowed_dn_list = ["DN1", "DN2"]
+
+
+# A whitelist of allowed SASL usernames. The format for usernames
+# depends on the SASL authentication mechanism. Kerberos usernames
+# look like username@REALM
+#
+# This list may contain wildcards such as
+#
+# "*@EXAMPLE.COM"
+#
+# See the POSIX fnmatch function for the format of the wildcards.
+#
+# NB If this is an empty list, no client can connect, so comment out
+# entirely rather than using empty list to disable these checks
+#
+# By default, no Username's are checked
+#sasl_allowed_username_list = ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM" ]
+
+
+
+#################################################################
+#
+# Processing controls
+#
+
+# The maximum number of concurrent client connections to allow
+# over all sockets combined.
+#max_clients = 20
+
+
+# The minimum limit sets the number of workers to start up
+# initially. If the number of active clients exceeds this,
+# then more threads are spawned, upto max_workers limit.
+# Typically you'd want max_workers to equal maximum number
+# of clients allowed
+#min_workers = 5
+#max_workers = 20
+
+
+# The number of priority workers. If all workers from above
+# pool will stuck, some calls marked as high priority
+# (notably domainDestroy) can be executed in this pool.
+#prio_workers = 5
+
+# Total global limit on concurrent RPC calls. Should be
+# at least as large as max_workers. Beyond this, RPC requests
+# will be read into memory and queued. This directly impact
+# memory usage, currently each request requires 256 KB of
+# memory. So by default upto 5 MB of memory is used
+#
+# XXX this isn't actually enforced yet, only the per-client
+# limit is used so far
+#max_requests = 20
+
+# Limit on concurrent requests from a single client
+# connection. To avoid one client monopolizing the server
+# this should be a small fraction of the global max_requests
+# and max_workers parameter
+#max_client_requests = 5
+
+#################################################################
+#
+# Logging controls
+#
+
+# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
+# basically 1 will log everything possible
+#log_level = 3
+
+# Logging filters:
+# A filter allows to select a different logging level for a given category
+# of logs
+# The format for a filter is:
+# x:name
+# where name is a match string e.g. remote or qemu
+# the x prefix is the minimal level where matching messages should be logged
+# 1: DEBUG
+# 2: INFO
+# 3: WARNING
+# 4: ERROR
+#
+# Multiple filter can be defined in a single @filters, they just need to be
+# separated by spaces.
+#
+# e.g:
+# log_filters="3:remote 4:event"
+# to only get warning or errors from the remote layer and only errors from
+# the event layer.
+
+# Logging outputs:
+# An output is one of the places to save logging information
+# The format for an output can be:
+# x:stderr
+# output goes to stderr
+# x:syslog:name
+# use syslog for the output and use the given name as the ident
+# x:file:file_path
+# output to a file, with the given filepath
+# In all case the x prefix is the minimal level, acting as a filter
+# 1: DEBUG
+# 2: INFO
+# 3: WARNING
+# 4: ERROR
+#
+# Multiple output can be defined, they just need to be separated by spaces.
+# e.g.:
+# log_outputs="3:syslog:libvirtd"
+# to log all warnings and errors to syslog under the libvirtd ident
+
+# Log debug buffer size: default 64
+# The daemon keeps an internal debug log buffer which will be dumped in case
+# of crash or upon receiving a SIGUSR2 signal. This setting allows to override
+# the default buffer size in kilobytes.
+# If value is 0 or less the debug log buffer is deactivated
+#log_buffer_size = 64
+
+
+##################################################################
+#
+# Auditing
+#
+# This setting allows usage of the auditing subsystem to be altered:
+#
+# audit_level == 0 -> disable all auditing
+# audit_level == 1 -> enable auditing, only if enabled on host (default)
+# audit_level == 2 -> enable auditing, and exit if disabled on host
+#
+#audit_level = 2
+#
+# If set to 1, then audit messages will also be sent
+# via libvirt logging infrastructure. Defaults to 0
+#
+#audit_logging = 1
+
+###################################################################
+# UUID of the host:
+# Provide the UUID of the host here in case the command
+# 'dmidecode -s system-uuid' does not provide a valid uuid. In case
+# 'dmidecode' does not provide a valid UUID and none is provided here, a
+# temporary UUID will be generated.
+# Keep the format of the example UUID below. UUID must not have all digits
+# be the same.
+
+# NB This default all-zeros UUID will not work. Replace
+# it with the output of the 'uuidgen' command and then
+# uncomment this entry
+#host_uuid = "00000000-0000-0000-0000-000000000000"
+
+###################################################################
+# Keepalive protocol:
+# This allows libvirtd to detect broken client connections or even
+# dead client. A keepalive message is sent to a client after
+# keepalive_interval seconds of inactivity to check if the client is
+# still responding; keepalive_count is a maximum number of keepalive
+# messages that are allowed to be sent to the client without getting
+# any response before the connection is considered broken. In other
+# words, the connection is automatically closed approximately after
+# keepalive_interval * (keepalive_count + 1) seconds since the last
+# message received from the client. If keepalive_interval is set to
+# -1, libvirtd will never send keepalive requests; however clients
+# can still send them and the deamon will send responses. When
+# keepalive_count is set to 0, connections will be automatically
+# closed after keepalive_interval seconds of inactivity without
+# sending any keepalive messages.
+#
+#keepalive_interval = 5
+#keepalive_count = 5
+#
+# If set to 1, libvirtd will refuse to talk to clients that do not
+# support keepalive protocol. Defaults to 0.
+#
+#keepalive_required = 1
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/libvirtd.sh b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/libvirtd.sh
new file mode 100755
index 0000000..29dbf39
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/libvirtd.sh
@@ -0,0 +1,103 @@
+#!/bin/sh
+### BEGIN INIT INFO
+# Provides: libvirtd
+# Required-Start: $local_fs $network dbus
+# Required-Stop: $local_fs $network dbus
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+### END INIT INFO
+
+if [ -f /lib/lsb/init-functions ]
+then
+ . /lib/lsb/init-functions
+else
+ # int log_begin_message (char *message)
+ log_begin_msg () {
+ if [ -z "$1" ]; then
+ return 1
+ fi
+ echo " * $@"
+ }
+
+ # int log_end_message (int exitstatus)
+ log_end_msg () {
+
+ # If no arguments were passed, return
+ [ -z "$1" ] && return 1
+
+ # Only do the fancy stuff if we have an appropriate terminal
+ # and if /usr is already mounted
+ TPUT=/usr/bin/tput
+ EXPR=/usr/bin/expr
+ if [ -x $TPUT ] && [ -x $EXPR ] && $TPUT hpa 60 >/dev/null 2>&1; then
+ COLS=`$TPUT cols`
+ if [ -n "$COLS" ]; then
+ COL=`$EXPR $COLS - 7`
+ else
+ COL=73
+ fi
+ UP=`$TPUT cuu1`
+ END=`$TPUT hpa $COL`
+ START=`$TPUT hpa 0`
+ RED=`$TPUT setaf 1`
+ NORMAL=`$TPUT op`
+ if [ $1 -eq 0 ]; then
+ echo "$UP$END[ ok ]"
+ else
+ echo -e "$UP$START $RED*$NORMAL$END[${RED}fail${NORMAL}]"
+ fi
+ else
+ if [ $1 -eq 0 ]; then
+ echo " ...done."
+ else
+ echo " ...fail!"
+ fi
+ fi
+ return $1
+ }
+
+ log_warning_msg () {
+ if log_use_fancy_output; then
+ YELLOW=`$TPUT setaf 3`
+ NORMAL=`$TPUT op`
+ echo "$YELLOW*$NORMAL $@"
+ else
+ echo "$@"
+ fi
+ }
+
+fi
+
+case "$1" in
+ start)
+ if [ -e /var/run/libvirtd.pid ]; then
+ if [ -d /proc/$(cat /var/run/libvirtd.pid) ]; then
+ echo "virtualization library already started; not starting."
+ else
+ echo "Removing stale PID file /var/run/libvirtd.pid."
+ rm -f /var/run/libvirtd.pid
+ fi
+ fi
+ log_begin_msg "Starting virtualization library daemon: libvirtd"
+ if [ ! -e /var/run/libvirtd.pid ]; then
+ start-stop-daemon -K -x /usr/bin/dnsmasq --pidfile /var/run/libvirt/network/default.pid
+ fi
+ start-stop-daemon --start --quiet --pidfile /var/run/libvirtd.pid --exec /usr/sbin/libvirtd -- --daemon --listen
+ log_end_msg $?
+ ;;
+ stop)
+ log_begin_msg "Stopping virtualization library daemon: libvirtd"
+ start-stop-daemon --stop --quiet --retry 3 --exec /usr/sbin/libvirtd --pidfile /var/run/libvirtd.pid
+ log_end_msg $?
+ rm -f /var/run/libvirtd.pid
+ ;;
+ restart)
+ $0 stop
+ sleep 1
+ $0 start
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart}"
+ exit 1
+ ;;
+esac
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/qemu-fix-crash-in-qemuOpen.patch b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/qemu-fix-crash-in-qemuOpen.patch
new file mode 100644
index 0000000..3cf9e83
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/qemu-fix-crash-in-qemuOpen.patch
@@ -0,0 +1,39 @@
+From 74bff2509080912ea8abf1de8fd95fa2412b659a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?J=C3=A1n=20Tomko?= <jtomko@redhat.com>
+Date: Thu, 11 Apr 2013 11:37:25 +0200
+Subject: [PATCH] qemu: fix crash in qemuOpen
+
+commit 74bff2509080912ea8abf1de8fd95fa2412b659a from upsteam
+git://libvirt.org/libvirt.git
+
+If the path part of connection URI is not present, cfg is used
+unitialized.
+
+https://bugzilla.redhat.com/show_bug.cgi?id=950855
+---
+ src/qemu/qemu_driver.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
+index 2c0d7d1..0d41e39 100644
+--- a/src/qemu/qemu_driver.c
++++ b/src/qemu/qemu_driver.c
+@@ -1026,6 +1026,7 @@ static virDrvOpenStatus qemuOpen(virConnectPtr conn,
+ goto cleanup;
+ }
+
++ cfg = virQEMUDriverGetConfig(qemu_driver);
+ if (conn->uri->path == NULL) {
+ virReportError(VIR_ERR_INTERNAL_ERROR,
+ _("no QEMU URI path given, try %s"),
+@@ -1033,7 +1034,6 @@ static virDrvOpenStatus qemuOpen(virConnectPtr conn,
+ goto cleanup;
+ }
+
+- cfg = virQEMUDriverGetConfig(qemu_driver);
+ if (cfg->privileged) {
+ if (STRNEQ(conn->uri->path, "/system") &&
+ STRNEQ(conn->uri->path, "/session")) {
+--
+1.7.1
+
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/run-ptest b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/run-ptest
new file mode 100644
index 0000000..a434b18
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/run-ptest
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+make -C tests -k check-TESTS
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/runptest.patch b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/runptest.patch
new file mode 100644
index 0000000..a33f569
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/runptest.patch
@@ -0,0 +1,113 @@
+Add 'install-ptest' rule.
+Change TESTS_ENVIRONMENT to allow running outside build dir.
+
+Signed-off-by: Mihaela Sendrea <mihaela.sendrea@enea.com>
+Upstream-status: Pending
+
+Index: libvirt-1.2.4/tests/Makefile.am
+===================================================================
+--- libvirt-1.2.4.orig/tests/Makefile.am
++++ libvirt-1.2.4/tests/Makefile.am
+@@ -31,9 +31,11 @@
+ -I$(top_srcdir)/src/conf \
+ $(GETTEXT_CPPFLAGS)
+
++PTEST_DIR ?= $(libdir)/libvirt/ptest
++
+ AM_CFLAGS = \
+- -Dabs_builddir="\"$(abs_builddir)\"" \
+- -Dabs_srcdir="\"$(abs_srcdir)\"" \
++ -Dabs_builddir="\"$(PTEST_DIR)/tests\"" \
++ -Dabs_srcdir="\"$(PTEST_DIR)/tests\"" \
+ $(LIBXML_CFLAGS) \
+ $(LIBNL_CFLAGS) \
+ $(GNUTLS_CFLAGS) \
+@@ -48,7 +50,7 @@
+
+ if WITH_DRIVER_MODULES
+ INCLUDES += \
+- -DTEST_DRIVER_DIR=\"$(top_builddir)/src/.libs\"
++ -DTEST_DRIVER_DIR=\"$(PTEST_DIR)/src/.libs\"
+ endif WITH_DRIVER_MODULES
+
+ PROBES_O =
+@@ -409,20 +411,19 @@
+ # Also, BSD sh doesn't like 'a=b b=$$a', so we can't use an
+ # intermediate shell variable, but must do all the expansion in make
+
+-lv_abs_top_builddir=$(shell cd '$(top_builddir)' && pwd)
+ path_add = $(subst :,$(PATH_SEPARATOR),\
+- $(subst !,$(lv_abs_top_builddir)/,!daemon:!tools:!tests))
++ $(subst !,$(PTEST_DIR)/,!daemon:!tools:!tests))
+
+ VIR_TEST_EXPENSIVE ?= $(VIR_TEST_EXPENSIVE_DEFAULT)
+ TESTS_ENVIRONMENT = \
+- abs_top_builddir=$(lv_abs_top_builddir) \
+- abs_top_srcdir=`cd '$(top_srcdir)'; pwd` \
+- abs_builddir=$(abs_builddir) \
+- abs_srcdir=$(abs_srcdir) \
+- CONFIG_HEADER="$(lv_abs_top_builddir)/config.h" \
++ abs_top_builddir="$(PTEST_DIR)" \
++ abs_top_srcdir="$(PTEST_DIR)" \
++ abs_builddir="$(PTEST_DIR)/tests" \
++ abs_srcdir="$(PTEST_DIR)/tests" \
++ CONFIG_HEADER="$(PTEST_DIR)/config.h" \
+ PATH="$(path_add)$(PATH_SEPARATOR)$$PATH" \
+ SHELL="$(SHELL)" \
+- LIBVIRT_DRIVER_DIR="$(lv_abs_top_builddir)/src/.libs" \
++ LIBVIRT_DRIVER_DIR="$(PTEST_DIR)/src/.libs" \
+ LIBVIRT_AUTOSTART=0 \
+ LC_ALL=C \
+ VIR_TEST_EXPENSIVE=$(VIR_TEST_EXPENSIVE) \
+@@ -1137,5 +1138,51 @@
+ EXTRA_DIST += objectlocking.ml
+ endif ! WITH_CIL
+
++buildtest-TESTS: $(TESTS) $(test_libraries) $(test_helpers)
++
++PTESTS = $(TESTS) $(test_helpers) test-lib.sh schematestutils.sh
++
++install-ptest:
++ list='$(TESTS) $(test_helpers) test-lib.sh schematestutils.sh'
++ install -d $(DEST_DIR)/tools
++ @(if [ -d ../tools/.libs ] ; then cd ../tools/.libs; fi; \
++ install * $(DEST_DIR)/tools)
++ install -d $(DEST_DIR)/src/network
++ cp ../src/network/*.xml $(DEST_DIR)/src/network
++ install -d $(DEST_DIR)/src/cpu
++ cp ../src/cpu/*.xml $(DEST_DIR)/src/cpu
++ install ../src/libvirt_iohelper $(DEST_DIR)/src
++ install -D ../daemon/libvirtd $(DEST_DIR)/daemon/libvirtd
++ install -D ../daemon/libvirtd.conf $(DEST_DIR)/daemon/libvirtd.conf
++ @(if [ -d ../daemon/.libs ] ; then cd ../daemon/.libs; fi; \
++ install * $(DEST_DIR)/daemon)
++ install -d $(DEST_DIR)/src/.libs
++ @(if [ -d ../src/.libs ] ; then cd ../src/.libs; fi; \
++ install * $(DEST_DIR)/src/.libs)
++ install -d $(DEST_DIR)/docs/schemas
++ cp ../docs/schemas/*.rng $(DEST_DIR)/docs/schemas
++ cp -r ../build-aux $(DEST_DIR)
++ install -d $(DEST_DIR)/examples/xml
++ cp -r ../examples/xml/test $(DEST_DIR)/examples/xml
++ install -d $(DEST_DIR)/tests/.libs
++ find . -type d -name "*xml2xml*" -exec cp -r {} $(DEST_DIR)/tests \;
++ find . -type d -name "*data" -exec cp -r {} $(DEST_DIR)/tests \;
++ @(for file in $(PTESTS); do \
++ if [ -f .libs/$$file ]; then \
++ install .libs/$$file $(DEST_DIR)/tests; \
++ else \
++ install $$file $(DEST_DIR)/tests; \
++ fi; \
++ done;)
++ @(if [ -d .libs ]; then install .libs/*.so $(DEST_DIR)/tests/.libs; fi;)
++ cp ../config.h $(DEST_DIR)
++ cp Makefile $(DEST_DIR)/tests
++ sed -i -e 's/^Makefile:/_Makefile:/' $(DEST_DIR)/tests/Makefile
++ cp ../Makefile $(DEST_DIR)
++ sed -i -e 's|^Makefile:|_Makefile:|' $(DEST_DIR)/Makefile
++ sed -i -e 's|$(BUILD_DIR)|$(PTEST_DIR)|g' $(DEST_DIR)/tests/Makefile
++ sed -i -e 's|$(BUILD_DIR)|$(PTEST_DIR)|g' $(DEST_DIR)/Makefile
++ sed -i -e 's|^\(.*\.log:\) \(.*EXEEXT.*\)|\1|g' $(DEST_DIR)/tests/Makefile
++
+ CLEANFILES = *.cov *.gcov .libs/*.gcda .libs/*.gcno *.gcno *.gcda *.cmi *.cmx \
+ objectlocking-files.txt
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/tests-allow-separated-src-and-build-dirs.patch b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/tests-allow-separated-src-and-build-dirs.patch
new file mode 100644
index 0000000..3964865
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/tests-allow-separated-src-and-build-dirs.patch
@@ -0,0 +1,57 @@
+From 884b6e3724b75cd92766d5386455983e3557a286 Mon Sep 17 00:00:00 2001
+From: Mark Asselstine <mark.asselstine@windriver.com>
+Date: Fri, 2 May 2014 13:45:05 -0400
+Subject: [PATCH] tests: allow separated src and build dirs
+
+Fixup Makefile.am to search for static files back in srcdir.
+
+Signed-off-by: Mark Asselstine <mark.asselstine@windriver.com>
+---
+ tests/Makefile.am | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/tests/Makefile.am b/tests/Makefile.am
+index 0da514b..b8d7c19 100644
+--- a/tests/Makefile.am
++++ b/tests/Makefile.am
+@@ -1006,22 +1006,22 @@ install-ptest:
+ @(if [ -d ../tools/.libs ] ; then cd ../tools/.libs; fi; \
+ install * $(DEST_DIR)/tools)
+ install -d $(DEST_DIR)/src/network
+- cp ../src/network/*.xml $(DEST_DIR)/src/network
++ cp $(top_srcdir)/src/network/*.xml $(DEST_DIR)/src/network
+ install -d $(DEST_DIR)/src/cpu
+- cp ../src/cpu/*.xml $(DEST_DIR)/src/cpu
++ cp $(top_srcdir)/src/cpu/*.xml $(DEST_DIR)/src/cpu
+ install ../src/libvirt_iohelper $(DEST_DIR)/src
+ install -D ../daemon/libvirtd $(DEST_DIR)/daemon/libvirtd
+- install -D ../daemon/libvirtd.conf $(DEST_DIR)/daemon/libvirtd.conf
++ install -D $(top_srcdir)/daemon/libvirtd.conf $(DEST_DIR)/daemon/libvirtd.conf
+ @(if [ -d ../daemon/.libs ] ; then cd ../daemon/.libs; fi; \
+ install * $(DEST_DIR)/daemon)
+ install -d $(DEST_DIR)/src/.libs
+ @(if [ -d ../src/.libs ] ; then cd ../src/.libs; fi; \
+ install * $(DEST_DIR)/src/.libs)
+ install -d $(DEST_DIR)/docs/schemas
+- cp ../docs/schemas/*.rng $(DEST_DIR)/docs/schemas
+- cp -r ../build-aux $(DEST_DIR)
++ cp $(top_srcdir)/docs/schemas/*.rng $(DEST_DIR)/docs/schemas
++ cp -r $(top_srcdir)/build-aux $(DEST_DIR)
+ install -d $(DEST_DIR)/examples/xml
+- cp -r ../examples/xml/test $(DEST_DIR)/examples/xml
++ cp -r $(top_srcdir)/examples/xml/test $(DEST_DIR)/examples/xml
+ install -d $(DEST_DIR)/tests/.libs
+ find . -type d -name "*xml2xml*" -exec cp -r {} $(DEST_DIR)/tests \;
+ find . -type d -name "*data" -exec cp -r {} $(DEST_DIR)/tests \;
+@@ -1029,7 +1029,7 @@ install-ptest:
+ if [ -f .libs/$$file ]; then \
+ install .libs/$$file $(DEST_DIR)/tests; \
+ else \
+- install $$file $(DEST_DIR)/tests; \
++ install $(srcdir)/$$file $(DEST_DIR)/tests; \
+ fi; \
+ done;)
+ @(if [ -d .libs ]; then install .libs/*.so $(DEST_DIR)/tests/.libs; fi;)
+--
+1.8.3.2
+
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/tools-add-libvirt-net-rpc-to-virt-host-validate-when.patch b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/tools-add-libvirt-net-rpc-to-virt-host-validate-when.patch
new file mode 100644
index 0000000..a7fc727
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt/tools-add-libvirt-net-rpc-to-virt-host-validate-when.patch
@@ -0,0 +1,89 @@
+From a790bfe8fa7b24b68cb6f9a1b7205fda2c6d558e Mon Sep 17 00:00:00 2001
+From: Bruce Ashfield <bruce.ashfield@windriver.com>
+Date: Fri, 2 Aug 2013 11:38:43 -0400
+Subject: [PATCH] tools: add libvirt-net-rpc to virt-host-validate when TLS is
+ enabled
+
+When gnu-tls is enabled for libvirt references to virNetTLSInit are
+generated in libvirt. Any binaries linking against libvirt, must also
+link against libvirt-net-rpc which provides the implementation.
+
+Signed-off-by: Bruce Ashfield <bruce.ashfield@windriver.com>
+[ywei: rebased to libvirt-1.3.2]
+Signed-off-by: Yunguo Wei <yunguo.wei@windriver.com>
+---
+ examples/Makefile.am | 19 +++++++++++++++++++
+ tools/Makefile.am | 13 +++++++++++++
+ 2 files changed, 32 insertions(+)
+
+diff --git a/examples/Makefile.am b/examples/Makefile.am
+index 2adcefb..fff49d4 100644
+--- a/examples/Makefile.am
++++ b/examples/Makefile.am
+@@ -39,6 +39,10 @@ INCLUDES = -I$(top_builddir)/include -I$(top_srcdir)/include -I$(top_srcdir) \
+ LDADD = $(STATIC_BINARIES) $(WARN_CFLAGS) $(COVERAGE_LDFLAGS) \
+ $(top_builddir)/src/libvirt.la $(top_builddir)/gnulib/lib/libgnu.la
+
++if WITH_GNUTLS
++LDADD += $(top_builddir)/src/libvirt-net-rpc.la
++endif
++
+ noinst_PROGRAMS=dominfo/info1 dommigrate/dommigrate domsuspend/suspend \
+ domtop/domtop hellolibvirt/hellolibvirt object-events/event-test \
+ openauth/openauth rename/rename
+@@ -52,6 +56,21 @@ object_events_event_test_SOURCES = object-events/event-test.c
+ openauth_openauth_SOURCES = openauth/openauth.c
+ rename_rename_SOURCES = rename/rename.c
+
++if WITH_GNUTLS
++dominfo_info1_LDADD = $(top_builddir)/src/libvirt-net-rpc.la \
++ $(LDADD) \
++ $(NULL)
++domsuspend_suspend_LDADD = $(top_builddir)/src/libvirt-net-rpc.la \
++ $(LDADD) \
++ $(NULL)
++hellolibvirt_hellolibvirt_LDADD = $(top_builddir)/src/libvirt-net-rpc.la \
++ $(LDADD) \
++ $(NULL)
++openauth_openauth_LDADD = $(top_builddir)/src/libvirt-net-rpc.la \
++ $(LDADD) \
++ $(NULL)
++endif
++
+ if WITH_APPARMOR_PROFILES
+ apparmordir = $(sysconfdir)/apparmor.d/
+ apparmor_DATA = \
+diff --git a/tools/Makefile.am b/tools/Makefile.am
+index 0be3567..b3227a7 100644
+--- a/tools/Makefile.am
++++ b/tools/Makefile.am
+@@ -169,6 +169,13 @@ virt_host_validate_LDADD = \
+ ../gnulib/lib/libgnu.la \
+ $(NULL)
+
++if WITH_GNUTLS
++virt_host_validate_LDADD += ../src/libvirt-net-rpc.la \
++ ../gnulib/lib/libgnu.la \
++ $(NULL)
++endif
++
++
+ virt_host_validate_CFLAGS = \
+ $(LIBXML_CFLAGS) \
+ $(WARN_CFLAGS) \
+@@ -256,6 +263,12 @@ virt_admin_CFLAGS = \
+ $(READLINE_CFLAGS)
+ BUILT_SOURCES =
+
++if WITH_GNUTLS
++virsh_LDADD += ../src/libvirt-net-rpc.la \
++ ../gnulib/lib/libgnu.la \
++ $(NULL)
++endif
++
+ if WITH_WIN_ICON
+ virsh_LDADD += virsh_win_icon.$(OBJEXT)
+
+--
+1.9.1
+
diff --git a/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt_1.3.2.bb b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt_1.3.2.bb
new file mode 100644
index 0000000..42066c1
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/libvirt/libvirt_1.3.2.bb
@@ -0,0 +1,287 @@
+DESCRIPTION = "A toolkit to interact with the virtualization capabilities of recent versions of Linux."
+HOMEPAGE = "http://libvirt.org"
+LICENSE = "LGPLv2.1+"
+LICENSE_${PN}-ptest = "GPLv2+ & LGPLv2.1"
+LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \
+ file://COPYING.LESSER;md5=4b54a1fd55a448865a0b32d41598759d"
+SECTION = "console/tools"
+PR = "r1"
+
+DEPENDS = "bridge-utils gnutls libxml2 lvm2 avahi parted curl libpcap util-linux e2fsprogs pm-utils \
+ iptables dnsmasq readline libtasn1 libxslt-native"
+
+# libvirt-guests.sh needs gettext.sh
+#
+RDEPENDS_${PN} = "gettext-runtime"
+
+RDEPENDS_${PN}-ptest += "make gawk"
+
+RDEPENDS_libvirt-libvirtd += "bridge-utils iptables pm-utils dnsmasq netcat-openbsd"
+RDEPENDS_libvirt-libvirtd_append_x86-64 = " dmidecode"
+RDEPENDS_libvirt-libvirtd_append_x86 = " dmidecode"
+
+#connman blocks the 53 port and libvirtd can't start its DNS service
+RCONFLICTS_${PN}_libvirtd = "connman"
+
+SRC_URI = "http://libvirt.org/sources/libvirt-${PV}.tar.gz;name=libvirt \
+ file://tools-add-libvirt-net-rpc-to-virt-host-validate-when.patch \
+ file://libvirtd.sh \
+ file://libvirtd.conf \
+ file://runptest.patch \
+ file://run-ptest \
+ file://tests-allow-separated-src-and-build-dirs.patch \
+ file://libvirt-use-pkg-config-to-locate-libcap.patch \
+ file://0001-to-fix-build-error.patch \
+ file://Revert-build-add-prefix-to-SYSTEMD_UNIT_DIR.patch \
+ "
+
+SRC_URI[libvirt.md5sum] = "b48b06bbc7efbe9973ed0f3f223d6da2"
+SRC_URI[libvirt.sha256sum] = "e3c6fc2683178660b371efb3ac7a1103a3f4b78efac7ffe560bc5917974ccf05"
+
+inherit autotools gettext update-rc.d pkgconfig ptest systemd
+
+CACHED_CONFIGUREVARS += "\
+ac_cv_path_XMLLINT=/usr/bin/xmllint \
+ac_cv_path_XMLCATLOG=/usr/bin/xmlcatalog \
+ac_cv_path_AUGPARSE=/usr/bin/augparse \
+ac_cv_path_DNSMASQ=/usr/bin/dnsmasq \
+ac_cv_path_BRCTL=/usr/sbin/brctl \
+ac_cv_path_TC=/sbin/tc \
+ac_cv_path_UDEVADM=/sbin/udevadm \
+ac_cv_path_MODPROBE=/sbin/modprobe \
+ac_cv_path_IP_PATH=/bin/ip \
+ac_cv_path_IPTABLES_PATH=/usr/sbin/iptables \
+ac_cv_path_IP6TABLES_PATH=/usr/sbin/ip6tables \
+ac_cv_path_MOUNT=/bin/mount \
+ac_cv_path_UMOUNT=/bin/umount \
+ac_cv_path_MKFS=/usr/sbin/mkfs \
+ac_cv_path_SHOWMOUNT=/usr/sbin/showmount \
+ac_cv_path_PVCREATE=/usr/sbin/pvcreate \
+ac_cv_path_VGCREATE=/usr/sbin/vgcreate \
+ac_cv_path_LVCREATE=/usr/sbin/lvcreate \
+ac_cv_path_PVREMOVE=/usr/sbin/pvremove \
+ac_cv_path_VGREMOVE=/usr/sbin/vgremove \
+ac_cv_path_LVREMOVE=/usr/sbin/lvremove \
+ac_cv_path_LVCHANGE=/usr/sbin/lvchange \
+ac_cv_path_VGCHANGE=/usr/sbin/vgchange \
+ac_cv_path_VGSCAN=/usr/sbin/vgscan \
+ac_cv_path_PVS=/usr/sbin/pvs \
+ac_cv_path_VGS=/usr/sbin/vgs \
+ac_cv_path_LVS=/usr/sbin/lvs \
+ac_cv_path_PARTED=/usr/sbin/parted \
+ac_cv_path_DMSETUP=/usr/sbin/dmsetup"
+
+# Ensure that libvirt uses polkit rather than policykit, whether the host has
+# pkcheck installed or not, and ensure the path is correct per our config.
+CACHED_CONFIGUREVARS += "ac_cv_path_PKCHECK_PATH=${bindir}/pkcheck"
+
+# Some other possible paths we are not yet setting
+#ac_cv_path_RPCGEN=
+#ac_cv_path_XSLTPROC=
+#ac_cv_path_RADVD=
+#ac_cv_path_UDEVSETTLE=
+#ac_cv_path_EBTABLES_PATH=
+#ac_cv_path_PKG_CONFIG=
+#ac_cv_path_ac_pt_PKG_CONFIG
+#ac_cv_path_POLKIT_AUTH=
+#ac_cv_path_DTRACE=
+#ac_cv_path_ISCSIADM=
+#ac_cv_path_MSGFMT=
+#ac_cv_path_GMSGFMT=
+#ac_cv_path_XGETTEXT=
+#ac_cv_path_MSGMERGE=
+#ac_cv_path_SCRUB=
+#ac_cv_path_PYTHON=
+
+ALLOW_EMPTY_${PN} = "1"
+
+PACKAGES =+ "${PN}-libvirtd ${PN}-virsh"
+
+ALLOW_EMPTY_${PN}-libvirtd = "1"
+
+FILES_${PN}-libvirtd = " \
+ ${sysconfdir}/init.d \
+ ${sysconfdir}/sysctl.d \
+ ${sysconfdir}/logrotate.d \
+ ${sysconfdir}/libvirt/libvirtd.conf \
+ /usr/lib/sysctl.d/60-libvirtd.conf \
+ ${sbindir}/libvirtd \
+ ${systemd_unitdir}/system/* \
+ ${@base_contains('DISTRO_FEATURES', 'sysvinit', '', '${libexecdir}/libvirt-guests.sh', d)} \
+ "
+
+FILES_${PN}-virsh = "${bindir}/virsh"
+FILES_${PN} += "${libdir}/libvirt/connection-driver \
+ ${datadir}/augeas \
+ ${datadir}/polkit-1"
+
+FILES_${PN}-dbg += "${libdir}/libvirt/connection-driver/.debug ${libdir}/libvirt/lock-driver/.debug"
+FILES_${PN}-staticdev += "${libdir}/*.a ${libdir}/libvirt/connection-driver/*.a ${libdir}/libvirt/lock-driver/*.a"
+
+CONFFILES_${PN} += "${sysconfdir}/libvirt/libvirt.conf \
+ ${sysconfdir}/libvirt/lxc.conf \
+ ${sysconfdir}/libvirt/qemu-lockd.conf \
+ ${sysconfdir}/libvirt/qemu.conf \
+ ${sysconfdir}/libvirt/virt-login-shell.conf \
+ ${sysconfdir}/libvirt/virtlockd.conf"
+
+CONFFILES_${PN}-libvirtd = "${sysconfdir}/logrotate.d/libvirt ${sysconfdir}/logrotate.d/libvirt.lxc \
+ ${sysconfdir}/logrotate.d/libvirt.qemu ${sysconfdir}/logrotate.d/libvirt.uml \
+ ${sysconfdir}/libvirt/libvirtd.conf \
+ /usr/lib/sysctl.d/libvirtd.conf"
+
+INITSCRIPT_PACKAGES = "${PN}-libvirtd"
+INITSCRIPT_NAME_${PN}-libvirtd = "libvirtd"
+INITSCRIPT_PARAMS_${PN}-libvirtd = "defaults 72"
+
+SYSTEMD_PACKAGES = "${PN}-libvirtd"
+SYSTEMD_SERVICE_${PN}-libvirtd = " \
+ libvirtd.socket \
+ libvirtd.service \
+ virtlockd.service \
+ libvirt-guests.service \
+ virtlockd.socket \
+ "
+
+
+PRIVATE_LIBS_${PN}-ptest = " \
+ libvirt-lxc.so.0 \
+ libvirt.so.0 \
+ libvirt-qemu.so.0 \
+ lockd.so \
+ libvirt_driver_secret.so \
+ libvirt_driver_nodedev.so \
+ libvirt_driver_vbox.so \
+ libvirt_driver_interface.so \
+ libvirt_driver_uml.so \
+ libvirt_driver_network.so \
+ libvirt_driver_nwfilter.so \
+ libvirt_driver_qemu.so \
+ libvirt_driver_storage.so \
+ libvirt_driver_lxc.so \
+ "
+
+# xen-minimal config
+#PACKAGECONFIG ??= "xen libxl xen-inotify test remote libvirtd"
+
+# full config
+PACKAGECONFIG ??= "qemu yajl uml openvz vmware vbox esx iproute2 lxc test \
+ remote macvtap libvirtd netcf udev python ebtables \
+ ${@base_contains('DISTRO_FEATURES', 'selinux', 'selinux audit libcap-ng', '', d)} \
+ ${@base_contains('DISTRO_FEATURES', 'xen', 'xen libxl xen-inotify', '', d)} \
+ ${@base_contains('DISTRO_FEATURES', 'x11', 'polkit', '', d)} \
+ "
+
+# enable,disable,depends,rdepends
+#
+PACKAGECONFIG[qemu] = "--with-qemu,--without-qemu,qemu,"
+PACKAGECONFIG[yajl] = "--with-yajl,--without-yajl,yajl,yajl"
+PACKAGECONFIG[xen] = "--with-xen,--without-xen,xen,"
+PACKAGECONFIG[xenapi] = "--with-xenapi,--without-xenapi,,"
+PACKAGECONFIG[libxl] = "--with-libxl=${STAGING_DIR_TARGET}/lib,--without-libxl,libxl,"
+PACKAGECONFIG[xen-inotify] = "--with-xen-inotify,--without-xen-inotify,xen,"
+PACKAGECONFIG[uml] = "--with-uml, --without-uml,,"
+PACKAGECONFIG[openvz] = "--with-openvz,--without-openvz,,"
+PACKAGECONFIG[vmware] = "--with-vmware,--without-vmware,,"
+PACKAGECONFIG[phyp] = "--with-phyp,--without-phyp,,"
+PACKAGECONFIG[vbox] = "--with-vbox,--without-vbox,,"
+PACKAGECONFIG[esx] = "--with-esx,--without-esx,,"
+PACKAGECONFIG[hyperv] = "--with-hyperv,--without-hyperv,,"
+PACKAGECONFIG[polkit] = "--with-polkit,--without-polkit,polkit,polkit"
+PACKAGECONFIG[lxc] = "--with-lxc,--without-lxc, lxc,"
+PACKAGECONFIG[test] = "--with-test=yes,--with-test=no,,"
+PACKAGECONFIG[remote] = "--with-remote,--without-remote,,"
+PACKAGECONFIG[macvtap] = "--with-macvtap=yes,--with-macvtap=no,libnl,libnl"
+PACKAGECONFIG[libvirtd] = "--with-libvirtd,--without-libvirtd,,"
+PACKAGECONFIG[netcf] = "--with-netcf,--without-netcf,netcf,netcf"
+PACKAGECONFIG[dtrace] = "--with-dtrace,--without-dtrace,,"
+PACKAGECONFIG[udev] = "--with-udev --with-pciaccess,--without-udev,udev libpciaccess,"
+PACKAGECONFIG[selinux] = "--with-selinux,--without-selinux,libselinux,"
+PACKAGECONFIG[ebtables] = "ac_cv_path_EBTABLES_PATH=/sbin/ebtables,ac_cv_path_EBTABLES_PATH=,ebtables,ebtables"
+PACKAGECONFIG[python] = ",,python,"
+PACKAGECONFIG[sasl] = "--with-sasl,--without-sasl,cyrus-sasl,cyrus-sasl"
+PACKAGECONFIG[iproute2] = "ac_cv_path_IP_PATH=/sbin/ip,ac_cv_path_IP_PATH=,iproute2,iproute2"
+PACKAGECONFIG[numactl] = "--with-numactl,--without-numactl,numactl,"
+PACKAGECONFIG[fuse] = "--with-fuse,--without-fuse,fuse,"
+PACKAGECONFIG[audit] = "--with-audit,--without-audit,audit,"
+PACKAGECONFIG[libcap-ng] = "--with-capng,--without-capng,libcap-ng,"
+
+# Enable the Python tool support
+require libvirt-python.inc
+
+do_install_append() {
+ install -d ${D}/etc/init.d
+ install -d ${D}/etc/libvirt
+
+ install -m 0755 ${WORKDIR}/libvirtd.sh ${D}/etc/init.d/libvirtd
+ install -m 0644 ${WORKDIR}/libvirtd.conf ${D}/etc/libvirt/libvirtd.conf
+
+ if ${@bb.utils.contains('DISTRO_FEATURES','sysvinit','true','false',d)}; then
+ # This will wind up in the libvirtd package, but will NOT be invoked by default.
+ #
+ mv ${D}/${libexecdir}/libvirt-guests.sh ${D}/${sysconfdir}/init.d
+ fi
+
+ if ${@bb.utils.contains('DISTRO_FEATURES','systemd','true','false',d)}; then
+ # This variable is used by libvirtd.service to start libvirtd in the right mode
+ sed -i '/#LIBVIRTD_ARGS="--listen"/a LIBVIRTD_ARGS="--listen --daemon"' ${D}/${sysconfdir}/sysconfig/libvirtd
+
+ # We can't use 'notify' when we don't support 'sd_notify' dbus capabilities.
+ sed -i -e 's/Type=notify/Type=forking/' \
+ -e '/Type=forking/a PIDFile=${localstatedir}/run/libvirtd.pid' \
+ ${D}/${systemd_unitdir}/system/libvirtd.service
+ fi
+
+ # The /var/run/libvirt directories created by the Makefile
+ # are wiped out in volatile, we need to create these at boot.
+ rm -rf ${D}${localstatedir}/run
+ install -d ${D}${sysconfdir}/default/volatiles
+ echo "d root root 0755 ${localstatedir}/run/libvirt none" \
+ > ${D}${sysconfdir}/default/volatiles/99_libvirt
+ echo "d root root 0755 ${localstatedir}/run/libvirt/lockd none" \
+ >> ${D}${sysconfdir}/default/volatiles/99_libvirt
+ echo "d root root 0755 ${localstatedir}/run/libvirt/lxc none" \
+ >> ${D}${sysconfdir}/default/volatiles/99_libvirt
+ echo "d root root 0755 ${localstatedir}/run/libvirt/network none" \
+ >> ${D}${sysconfdir}/default/volatiles/99_libvirt
+ echo "d root root 0755 ${localstatedir}/run/libvirt/qemu none" \
+ >> ${D}${sysconfdir}/default/volatiles/99_libvirt
+
+ # Add hook support for libvirt
+ mkdir -p ${D}/etc/libvirt/hooks
+
+ # remove .la references to our working diretory
+ for i in `find ${D}${libdir} -type f -name *.la`; do
+ sed -i -e 's#-L${B}/src/.libs##g' $i
+ done
+}
+
+EXTRA_OECONF += " \
+ --with-init-script=systemd \
+ "
+
+EXTRA_OEMAKE = "BUILD_DIR=${B} DEST_DIR=${D}${PTEST_PATH} PTEST_DIR=${PTEST_PATH}"
+
+do_compile_ptest() {
+ oe_runmake -C tests buildtest-TESTS
+}
+
+do_install_ptest() {
+ oe_runmake -C tests install-ptest
+
+ # remove .la files for ptest, they aren't required and can trigger QA errors
+ for i in `find ${D}${PTEST_PATH} -type f -name *.la`; do
+ rm -f $i
+ done
+}
+
+pkg_postinst_libvirt() {
+ if [ -z "$D" ] && [ -e /etc/init.d/populate-volatile.sh ] ; then
+ /etc/init.d/populate-volatile.sh update
+ fi
+}
+
+python () {
+ if not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d):
+ d.setVar("INHIBIT_UPDATERCD_BBCLASS", "1")
+}
diff --git a/import-layers/meta-virtualization/recipes-extended/multipath-tools/multipath-tools_git.bb b/import-layers/meta-virtualization/recipes-extended/multipath-tools/multipath-tools_git.bb
new file mode 100644
index 0000000..9ba5cd6
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/multipath-tools/multipath-tools_git.bb
@@ -0,0 +1,46 @@
+SUMMARY = "Tools to Manage Multipathed Devices with the device-mapper"
+DESCRIPTION = "This package provides the tools to manage multipathed devices by \
+instructing the device-mapper multipath module what to do"
+
+HOMEPAGE = "http://christophe.varoqui.free.fr/"
+DEPENDS = "readline libaio lvm2 udev"
+LICENSE = "GPLv2"
+
+LIC_FILES_CHKSUM = "file://COPYING;md5=7be2873b6270e45abacc503abbe2aa3d"
+S="${WORKDIR}/git"
+
+
+SRC_URI = "git://git.opensvc.com/multipath-tools/.git;protocol=http"
+
+SRCREV = "d3683ab18b386e9b3b54b59a122c689e9ebdf5cf"
+PV = "0.4.9+gitr${SRCPV}"
+
+inherit autotools-brokensep
+
+EXTRA_OEMAKE="LIB=${libdir} exec_prefix=${exec_prefix} libdir=${libdir}"
+
+PACKAGES =+ "libmpathpersist mpathpersist kpartx libmultipath multipath multipathd libmultipath-dev libmpathpersist-dev"
+
+
+RDEPENDS_${PN} += "libmpathpersist mpathpersist kpartx libmultipath multipath multipathd udev"
+
+do_install_append () {
+ ln -sf libmpathpersist.so.0 ${D}${libdir}/libmpathpersist.so
+ ln -sf libmultipath.so.0 ${D}${libdir}/libmultipath.so
+}
+
+ALLOW_EMPTY_${PN} = "1"
+FILES_${PN} = ""
+
+FILES_libmpathpersist = "${libdir}/libmpathpersist*.so.0"
+FILES_mpathpersist = "${sbindir}/mpathpersist"
+FILES_kpartx = "${sbindir}/kpartx ${base_libdir}/udev/"
+FILES_libmultipath = "${libdir}/libcheck*.so ${libdir}/libpri*.so ${libdir}/libmultipath*.so.0"
+FILES_multipath = "${sbindir}/multipath ${sysconfdir}"
+FILES_multipathd = "${sbindir}/multipathd ${base_libdir}"
+
+#put the symbol link lib in -dev
+FILES_libmultipath-dev = "${libdir}/libmultipath*.so"
+FILES_libmpathpersist-dev = "${libdir}/libmpathpersist*.so"
+
+
diff --git a/import-layers/meta-virtualization/recipes-extended/seabios/seabios/hostcc.patch b/import-layers/meta-virtualization/recipes-extended/seabios/seabios/hostcc.patch
new file mode 100644
index 0000000..f665e1a
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/seabios/seabios/hostcc.patch
@@ -0,0 +1,21 @@
+diff -ur a/Makefile b/Makefile
+--- a/Makefile 2015-02-02 22:02:58.651041951 -0500
++++ b/Makefile 2015-02-02 23:08:13.884514003 -0500
+@@ -8,7 +8,7 @@
+ OUT=out/
+
+ # Common command definitions
+-export HOSTCC := $(CC)
++export HOSTCC ?= $(CC)
+ export CONFIG_SHELL := sh
+ export KCONFIG_AUTOHEADER := autoconf.h
+ export KCONFIG_CONFIG := $(CURDIR)/.config
+@@ -22,7 +22,7 @@
+ OBJDUMP=$(CROSS_PREFIX)objdump
+ STRIP=$(CROSS_PREFIX)strip
+ PYTHON=python
+-CPP=cpp
++CPP=$(CROSS_PREFIX)cpp
+ IASL:=iasl
+ LD32BIT_FLAG:=-melf_i386
+
diff --git a/import-layers/meta-virtualization/recipes-extended/seabios/seabios_1.9.1.bb b/import-layers/meta-virtualization/recipes-extended/seabios/seabios_1.9.1.bb
new file mode 100644
index 0000000..88f2963
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/seabios/seabios_1.9.1.bb
@@ -0,0 +1,41 @@
+DESCRIPTION = "SeaBIOS"
+HOMEPAGE = "http://www.coreboot.org/SeaBIOS"
+LICENSE = "LGPLv3"
+SECTION = "firmware"
+
+SRC_URI = " \
+ http://code.coreboot.org/p/seabios/downloads/get/${PN}-${PV}.tar.gz \
+ file://hostcc.patch \
+ "
+
+LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504 \
+ file://COPYING.LESSER;md5=6a6a8e020838b23406c81b19c1d46df6 \
+ "
+
+SRC_URI[md5sum] = "e21b6f3e54d492be837ef20d66617f81"
+SRC_URI[sha256sum] = "8a2a562fe5cfd37fe8327cdc4a60accdfe441e235b29e6999fdeb442ba98d608"
+
+FILES_${PN} = "/usr/share/firmware"
+
+DEPENDS = "util-linux-native file-native bison-native flex-native gettext-native iasl-native python-native"
+
+TUNE_CCARGS = ""
+EXTRA_OEMAKE += "HOSTCC='${BUILD_CC}'"
+EXTRA_OEMAKE += "CROSS_PREFIX=${TARGET_PREFIX}"
+
+do_configure() {
+ oe_runmake defconfig
+}
+
+do_compile() {
+ unset CPP
+ unset CPPFLAGS
+ oe_runmake
+}
+
+do_install() {
+ oe_runmake
+ install -d ${D}/usr/share/firmware
+ install -m 0644 out/bios.bin ${D}/usr/share/firmware/
+}
+
diff --git a/import-layers/meta-virtualization/recipes-extended/vgabios/vgabios_0.7a.bb b/import-layers/meta-virtualization/recipes-extended/vgabios/vgabios_0.7a.bb
new file mode 100644
index 0000000..0ed8bb4
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/vgabios/vgabios_0.7a.bb
@@ -0,0 +1,31 @@
+DESCRIPTION = "Plex86/Bochs LGPL VGABios"
+HOMEPAGE = "http://www.nongnu.org/vgabios/"
+LICENSE = "LGPLv2.1"
+SECTION = "firmware"
+
+DEPENDS = "dev86-native"
+
+LIC_FILES_CHKSUM = "file://COPYING;md5=dcf3c825659e82539645da41a7908589"
+
+SRC_URI = "http://savannah.gnu.org/download/vgabios/${PN}-${PV}.tgz"
+
+SRC_URI[md5sum] = "2c0fe5c0ca08082a9293e3a7b23dc900"
+SRC_URI[sha256sum] = "9d24c33d4bfb7831e2069cf3644936a53ef3de21d467872b54ce2ea30881b865"
+
+PR = "r0"
+
+FILES_${PN} = "/usr/share/firmware/${PN}-${PV}*.bin"
+FILES_${PN}-dbg = "/usr/share/firmware/${PN}-${PV}*.debug.bin"
+
+S = "${WORKDIR}/${PN}-${PV}"
+
+do_configure() {
+ echo "Skip do_configure"
+}
+
+do_install() {
+ install -d ${D}/usr/share/firmware
+ install -m 0644 VGABIOS-lgpl-latest.bin ${D}/usr/share/firmware/${PN}-${PV}.bin
+ install -m 0644 VGABIOS-lgpl-latest.cirrus.bin ${D}/usr/share/firmware/${PN}-${PV}.cirrus.bin
+}
+
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/xen-arch.inc b/import-layers/meta-virtualization/recipes-extended/xen/xen-arch.inc
new file mode 100644
index 0000000..844d47d
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/xen-arch.inc
@@ -0,0 +1,18 @@
+
+valid_xen_archs = " \
+ x86_64 x86_32 \
+ arm32 arm64 \
+ "
+
+def map_xen_arch(a, d):
+ import re
+ valid_archs = d.getVar('valid_xen_archs', True).split()
+
+ if re.match("i.86", a): return "x86_32"
+ elif re.match("x86.64", a): return "x86_64"
+ elif re.match("arm.*", a): return "arm32"
+ elif re.match("aarch64.*", a): return "arm64"
+ elif a in valid_archs: return a
+ else:
+ return "INVALID"
+
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/xen.inc b/import-layers/meta-virtualization/recipes-extended/xen/xen.inc
new file mode 100644
index 0000000..de8c168
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/xen.inc
@@ -0,0 +1,924 @@
+DESCRIPTION = "Xen hypervisor"
+HOMEPAGE = "http://xen.org"
+LICENSE = "GPLv2"
+SECTION = "console/tools"
+
+LIC_FILES_CHKSUM = "file://COPYING;md5=a6260c12cd5de27e80d89ae18e48d20a"
+
+COMPATIBLE_HOST = '(x86_64.*).*-linux|aarch64.*-linux'
+
+inherit autotools-brokensep gettext setuptools update-rc.d systemd deploy
+
+require xen-arch.inc
+
+PACKAGECONFIG ??= " \
+ sdl \
+ ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'systemd', '', d)} \
+ ${@bb.utils.contains('XEN_TARGET_ARCH', 'x86_64', 'hvm', '', d)} \
+ "
+
+FLASK_POLICY_FILE ?= "xenpolicy-${PV}"
+
+PACKAGECONFIG[sdl] = "--enable-sdl,--disable-sdl,virtual/libsdl,"
+PACKAGECONFIG[xsm] = "--enable-xsmpolicy,--disable-xsmpolicy,checkpolicy-native,"
+PACKAGECONFIG[systemd] = "--enable-systemd,--disable-systemd,systemd,"
+PACKAGECONFIG[hvm] = "--with-system-seabios="${STAGING_DIR_HOST}/usr/share/firmware/bios.bin",--disable-seabios,seabios ipxe vgabios,"
+
+DEPENDS = " \
+ bison-native \
+ dev86-native \
+ flex-native \
+ file-native \
+ iasl-native \
+ util-linux-native \
+ xz-native \
+ bridge-utils \
+ curl \
+ dtc \
+ gettext \
+ glib-2.0 \
+ gnutls \
+ iproute2 \
+ libnl \
+ ncurses \
+ openssl \
+ pciutils \
+ pixman \
+ procps \
+ python \
+ libaio \
+ lzo \
+ util-linux \
+ xz \
+ yajl \
+ zlib \
+ "
+
+# inherit setuptools adds python to RDEPENDS, override it
+RDEPENDS_${PN} = ""
+
+RDEPENDS_${PN}-base = "\
+ bash perl xz \
+ ${PN}-blktap \
+ ${PN}-console \
+ ${PN}-libblktapctl \
+ ${PN}-libxenguest \
+ ${PN}-libxenlight \
+ ${PN}-libxenvchan \
+ ${PN}-libxenctrl \
+ ${PN}-libxlutil \
+ ${PN}-libvhd \
+ ${PN}-libxenstat \
+ ${PN}-libxenstore \
+ ${PN}-libfsimage \
+ ${PN}-fsimage \
+ ${PN}-scripts-block \
+ ${PN}-scripts-network \
+ ${PN}-xen-watchdog \
+ ${PN}-xencommons \
+ ${PN}-xendomains \
+ ${PN}-xenstore \
+ ${PN}-xenstored \
+ ${PN}-xl \
+ "
+
+RDEPENDS_${PN}-dev = ""
+
+RRECOMMENDS_${PN}-base = " \
+ ${PN}-libblktap \
+ ${PN}-flask \
+ ${PN}-hvmloader \
+ ${PN}-xenpaging \
+ "
+
+RDEPENDS_${PN}-devd = " \
+ ${PN}-xl \
+ "
+
+RDEPENDS_${PN}-fsimage = " \
+ libext2fs \
+ "
+
+RDEPENDS_${PN}-misc = " \
+ perl \
+ python \
+ "
+
+RDEPENDS_${PN}-remus = " \
+ bash \
+ python \
+ "
+
+RDEPENDS_${PN}-scripts-block = "\
+ bash \
+ ${PN}-scripts-common \
+ ${PN}-volatiles \
+ "
+
+RDEPENDS_${PN}-scripts-common = "bash"
+
+RDEPENDS_${PN}-scripts-network = "\
+ bash \
+ bridge-utils \
+ ${PN}-scripts-common \
+ ${PN}-volatiles \
+ "
+
+RDEPENDS_${PN}-xencommons = "\
+ bash \
+ ${PN}-console \
+ ${PN}-xenstore \
+ ${PN}-xenstored \
+ ${PN}-xl \
+ ${PN}-scripts-common \
+ "
+
+RDEPENDS_${PN}-xendomains = "\
+ bash \
+ ${PN}-console \
+ ${PN}-scripts-block \
+ ${PN}-scripts-common \
+ ${PN}-xenstored \
+ "
+
+RDEPENDS_${PN}-xl = "libgcc"
+
+RDEPENDS_${PN}-xentrace = "python"
+
+RDEPENDS_${PN}-xen-watchdog = "bash"
+
+PACKAGES = "\
+ ${PN}-base \
+ ${PN}-blktap \
+ ${PN}-console \
+ ${PN}-dbg \
+ ${PN}-dev \
+ ${PN}-devd \
+ ${PN}-doc \
+ ${PN}-efi \
+ ${PN}-flask \
+ ${PN}-flask-tools \
+ ${PN}-fsimage \
+ ${PN}-gdbsx \
+ ${PN}-hvmloader \
+ ${PN}-hypervisor \
+ ${PN}-init-xenstore-dom \
+ ${PN}-kdd \
+ ${PN}-libblktap \
+ ${PN}-libblktapctl \
+ ${PN}-libblktapctl-dev \
+ ${PN}-libblktap-dev \
+ ${PN}-libfsimage \
+ ${PN}-libfsimage-dev \
+ ${PN}-libvhd \
+ ${PN}-libvhd-dev \
+ ${PN}-libxencall \
+ ${PN}-libxencall-dev \
+ ${PN}-libxenctrl \
+ ${PN}-libxenctrl-dev \
+ ${PN}-libxenevtchn \
+ ${PN}-libxenevtchn-dev \
+ ${PN}-libxenforeignmemory \
+ ${PN}-libxenforeignmemory-dev \
+ ${PN}-libxengnttab \
+ ${PN}-libxengnttab-dev \
+ ${PN}-libxenguest \
+ ${PN}-libxenguest-dev \
+ ${PN}-libxenlight \
+ ${PN}-libxenlight-dev \
+ ${PN}-libxenstat \
+ ${PN}-libxenstat-dev \
+ ${PN}-libxenstore \
+ ${PN}-libxenstore-dev \
+ ${PN}-libxentoollog \
+ ${PN}-libxentoollog-dev \
+ ${PN}-libxenvchan \
+ ${PN}-libxenvchan-dev \
+ ${PN}-libxlutil \
+ ${PN}-libxlutil-dev \
+ ${PN}-misc \
+ ${PN}-pygrub \
+ ${PN}-python \
+ ${PN}-qemu \
+ ${PN}-remus \
+ ${PN}-scripts-block \
+ ${PN}-scripts-common \
+ ${PN}-scripts-network \
+ ${PN}-staticdev \
+ ${PN}-volatiles \
+ ${PN}-xcutils \
+ ${PN}-xencommons \
+ ${PN}-xend \
+ ${PN}-xend-examples \
+ ${PN}-xendomains \
+ ${PN}-xenmon \
+ ${PN}-xenpaging \
+ ${PN}-xenpmd \
+ ${PN}-xenstat \
+ ${PN}-xenstore \
+ ${PN}-xenstored \
+ ${PN}-xentrace \
+ ${PN}-xen-watchdog \
+ ${PN}-xl \
+ ${PN}-xl-examples \
+ ${PN}-xm \
+ ${PN}-xm-examples \
+ "
+
+FILES_${PN}-dbg += "\
+ ${libdir}/.debug \
+ ${libdir}/debug/* \
+ ${libdir}/xen/bin/.debug \
+ ${libdir}/python2.7/site-packages/.debug \
+ ${libdir}/python2.7/site-packages/xen/lowlevel/.debug \
+ ${libdir}/fs/xfs/.debug \
+ ${libdir}/fs/ufs/.debug \
+ ${libdir}/fs/ext2fs-lib/.debug \
+ ${libdir}/fs/fat/.debug \
+ ${libdir}/fs/zfs/.debug \
+ ${libdir}/fs/reiserfs/.debug \
+ ${libdir}/fs/iso9660/.debug \
+ ${libdir}/fs/*/.debug \
+ ${sbindir}/.debug \
+ ${libdir}exec/.debug \
+ ${libdir}/xen/libexec/.debug \
+ ${bindir}/.debug \
+ ${libdir}/python2.7/dist-packages/.debug \
+ ${libdir}/python2.7/dist-packages/xen/lowlevel/.debug \
+ "
+
+FILES_${PN}-dev = "\
+ ${includedir} \
+ "
+
+FILES_${PN}-doc = "\
+ ${sysconfdir}/xen/README \
+ ${sysconfdir}/xen/README.incompatibilities \
+ ${datadir}/doc \
+ ${datadir}/man \
+ "
+
+FILES_${PN}-staticdev += "\
+ ${libdir}/libblktapctl.a \
+ ${libdir}/libxenguest.a \
+ ${libdir}/libxenlight.a \
+ ${libdir}/libxenvchan.a \
+ ${libdir}/libxenctrl.a \
+ ${libdir}/libxlutil.a \
+ ${libdir}/libvhd.a \
+ ${libdir}/libxenstat.a \
+ ${libdir}/libxenstore.a \
+ ${libdir}/libblktap.a \
+ "
+
+FILES_${PN}-libblktapctl = "${libdir}/libblktapctl.so.*"
+FILES_${PN}-libblktapctl-dev = "${libdir}/libblktapctl.so"
+
+FILES_${PN}-libxencall = "${libdir}/libxencall.so.*"
+FILES_${PN}-libxencall-dev = "${libdir}/libxencall.so"
+
+FILES_${PN}-libxenctrl = "${libdir}/libxenctrl.so.*"
+FILES_${PN}-libxenctrl-dev = "${libdir}/libxenctrl.so"
+
+FILES_${PN}-libxenevtchn = "${libdir}/libxenevtchn.so.*"
+FILES_${PN}-libxenevtchn-dev = "${libdir}/libxenevtchn.so"
+
+FILES_${PN}-libxenforeignmemory = "${libdir}/libxenforeignmemory.so.*"
+FILES_${PN}-libxenforeignmemory-dev = "${libdir}/libxenforeignmemory.so"
+
+FILES_${PN}-libxengnttab = "${libdir}/libxengnttab.so.*"
+FILES_${PN}-libxengnttab-dev = "${libdir}/libxengnttab.so"
+
+FILES_${PN}-libxenguest = "${libdir}/libxenguest.so.*"
+FILES_${PN}-libxenguest-dev = "${libdir}/libxenguest.so"
+
+FILES_${PN}-libxenlight = "${libdir}/libxenlight.so.*"
+FILES_${PN}-libxenlight-dev = " \
+ ${libdir}/libxenlight.so \
+ ${datadir}/pkgconfig/xenlight.pc \
+ "
+
+FILES_${PN}-libxenstat = "${libdir}/libxenstat.so.*"
+FILES_${PN}-libxenstat-dev = "${libdir}/libxenstat.so"
+
+FILES_${PN}-libxenstore = "${libdir}/libxenstore.so.*"
+FILES_${PN}-libxenstore-dev = "${libdir}/libxenstore.so"
+
+FILES_${PN}-libxentoollog = "${libdir}/libxentoollog.so.*"
+FILES_${PN}-libxentoollog-dev = "${libdir}/libxentoollog.so"
+
+FILES_${PN}-libxenvchan = "${libdir}/libxenvchan.so.*"
+FILES_${PN}-libxenvchan-dev = "${libdir}/libxenvchan.so"
+
+FILES_${PN}-libxlutil = "${libdir}/libxlutil.so.*"
+FILES_${PN}-libxlutil-dev = " \
+ ${libdir}/libxlutil.so \
+ ${datadir}/pkgconfig/xlutil.pc \
+ "
+FILES_${PN}-libvhd = "${libdir}/libvhd.so.*"
+FILES_${PN}-libvhd-dev = "${libdir}/libvhd.so"
+
+FILES_${PN}-libblktap = "${libdir}/libblktap.so.*"
+FILES_${PN}-libblktap-dev = "${libdir}/libblktap.so"
+
+FILES_${PN}-libfsimage = "${libdir}/libfsimage.so.*"
+FILES_${PN}-libfsimage-dev = "${libdir}/libfsimage.so"
+
+FILES_${PN}-fsimage = "${libdir}/fs/*/*fsimage.so"
+
+FILES_${PN}-hypervisor = "\
+ /boot/xen-* \
+ /boot/xen \
+ /boot/xen-*.gz \
+ /boot/xen.gz \
+ /boot/xen-syms-* \
+ "
+
+FILES_${PN}-init-xenstore-dom = "${libdir}/xen/bin/init-xenstore-domain"
+
+FILES_${PN}-efi = "\
+ ${exec_prefix}/lib64 \
+ ${exec_prefix}/lib64/xen* \
+ "
+
+FILES_${PN}-base = "\
+ ${sysconfdir}/xen/auto \
+ ${sysconfdir}/xen/cpupool \
+ ${localstatedir}/xen/dump \
+ "
+
+FILES_${PN}-blktap = "\
+ ${sbindir}/blktapctrl \
+ ${sbindir}/img2qcow \
+ ${sbindir}/lock-util \
+ ${sbindir}/qcow2raw \
+ ${sbindir}/qcow-create \
+ ${sbindir}/tap-ctl \
+ ${sbindir}/tapdisk \
+ ${sbindir}/tapdisk2 \
+ ${sbindir}/tapdisk-client \
+ ${sbindir}/tapdisk-diff \
+ ${sbindir}/tapdisk-stream \
+ ${sbindir}/td-util \
+ ${sbindir}/vhd-update \
+ ${sbindir}/vhd-util \
+ "
+
+FILES_${PN}-console = "\
+ ${libdir}/xen/bin/xenconsole \
+ ${sbindir}/xenconsoled \
+ "
+
+FILES_${PN}-devd = "\
+ ${sysconfdir}/init.d/xendriverdomain \
+ "
+
+FILES_${PN}-flask = "\
+ /boot/${FLASK_POLICY_FILE} \
+ "
+
+FILES_${PN}-flask-tools = "\
+ ${sbindir}/flask-get-bool \
+ ${sbindir}/flask-getenforce \
+ ${sbindir}/flask-label-pci \
+ ${sbindir}/flask-loadpolicy \
+ ${sbindir}/flask-set-bool \
+ ${sbindir}/flask-setenforce \
+ "
+
+FILES_${PN}-gdbsx = "\
+ ${sbindir}/gdbsx \
+ "
+
+INSANE_SKIP_${PN}-hvmloader = "arch"
+FILES_${PN}-hvmloader = "\
+ ${libdir}/xen/boot/hvmloader \
+ "
+
+FILES_${PN}-kdd = "\
+ ${sbindir}/kdd \
+ "
+
+FILES_${PN}-misc = "\
+ ${bindir}/xencons \
+ ${bindir}/xencov_split \
+ ${bindir}/xen-detect \
+ ${libdir}/xen/bin/xenpvnetboot \
+ ${sbindir}/gtracestat \
+ ${sbindir}/gtraceview \
+ ${sbindir}/xen-bugtool \
+ ${sbindir}/xencov \
+ ${sbindir}/xenperf \
+ ${sbindir}/xenpm \
+ ${sbindir}/xsview \
+ ${sbindir}/xen-tmem-list-parse \
+ ${sbindir}/xen-python-path \
+ ${sbindir}/xen-ringwatch \
+ ${sbindir}/xen-hptool \
+ ${sbindir}/xen-hvmcrash \
+ ${sbindir}/xen-hvmctx \
+ ${sbindir}/xenlockprof \
+ ${sbindir}/xen-lowmemd \
+ ${sbindir}/xen-mfndump \
+ ${libdir}/xen/bin/verify-stream-v2 \
+ ${libdir}/xen/bin/convert-legacy-stream \
+ "
+
+FILES_${PN}-pygrub = "\
+ ${bindir}/pygrub \
+ ${libdir}/xen/bin/pygrub \
+ "
+
+FILES_${PN}-python = "\
+ ${libdir}/python2.7 \
+ "
+
+INSANE_SKIP_${PN} = "already-stripped"
+INSANE_SKIP_${PN}-qemu = "arch"
+FILES_${PN}-qemu = " \
+ ${datadir}/xen/qemu \
+ ${libdir}/xen/bin/qemu-system-i386 \
+ ${libdir}/xen/bin/qemu-system-x86_64 \
+ ${libdir}/xen/bin/qemu-img \
+ ${libdir}/xen/bin/qemu-nbd \
+ ${libdir}/xen/bin/qemu-ga \
+ ${libdir}/xen/bin/qemu-io \
+ ${libdir}/xen/bin/qemu-dm \
+ ${libdir}/xen/bin/virtfs-proxy-helper \
+ /usr/libexec/qemu-bridge-helper \
+ ${libdir}/xen/libexec/qemu-bridge-helper \
+ /usr/etc/qemu \
+ /usr/etc/qemu/target-x86_64.conf \
+ ${libdir}/xen/etc/qemu/target-x86_64.conf \
+ ${datadir}/qemu-xen \
+ ${datadir}/qemu-xen/qemu \
+ ${datadir}/qemu-xen/qemu/bamboo.dtb \
+ ${datadir}/qemu-xen/qemu/pxe-pcnet.rom \
+ ${datadir}/qemu-xen/qemu/vgabios-vmware.bin \
+ ${datadir}/qemu-xen/qemu/pxe-eepro100.rom \
+ ${datadir}/qemu-xen/qemu/pxe-e1000.rom \
+ ${datadir}/qemu-xen/qemu/openbios-ppc \
+ ${datadir}/qemu-xen/qemu/multiboot.bin \
+ ${datadir}/qemu-xen/qemu/vgabios-cirrus.bin \
+ ${datadir}/qemu-xen/qemu/bios.bin \
+ ${datadir}/qemu-xen/qemu/vgabios-stdvga.bin \
+ ${datadir}/qemu-xen/qemu/palcode-clipper \
+ ${datadir}/qemu-xen/qemu/pxe-ne2k_pci.rom \
+ ${datadir}/qemu-xen/qemu/spapr-rtas.bin \
+ ${datadir}/qemu-xen/qemu/slof.bin \
+ ${datadir}/qemu-xen/qemu/vgabios-qxl.bin \
+ ${datadir}/qemu-xen/qemu/pxe-rtl8139.rom \
+ ${datadir}/qemu-xen/qemu/openbios-sparc64 \
+ ${datadir}/qemu-xen/qemu/pxe-virtio.rom \
+ ${datadir}/qemu-xen/qemu/kvmvapic.bin \
+ ${datadir}/qemu-xen/qemu/openbios-sparc32 \
+ ${datadir}/qemu-xen/qemu/petalogix-s3adsp1800.dtb \
+ ${datadir}/qemu-xen/qemu/sgabios.bin \
+ ${datadir}/qemu-xen/qemu/linuxboot.bin \
+ ${datadir}/qemu-xen/qemu/qemu-icon.bmp \
+ ${datadir}/qemu-xen/qemu/ppc_rom.bin \
+ ${datadir}/qemu-xen/qemu/vgabios.bin \
+ ${datadir}/qemu-xen/qemu/s390-zipl.rom \
+ ${datadir}/qemu-xen/qemu/petalogix-ml605.dtb \
+ ${datadir}/qemu-xen/qemu/keymaps \
+ ${datadir}/qemu-xen/qemu/keymaps/common \
+ ${datadir}/qemu-xen/qemu/keymaps/th \
+ ${datadir}/qemu-xen/qemu/keymaps/is \
+ ${datadir}/qemu-xen/qemu/keymaps/en-gb \
+ ${datadir}/qemu-xen/qemu/keymaps/ar \
+ ${datadir}/qemu-xen/qemu/keymaps/fr-be \
+ ${datadir}/qemu-xen/qemu/keymaps/ru \
+ ${datadir}/qemu-xen/qemu/keymaps/hu \
+ ${datadir}/qemu-xen/qemu/keymaps/de-ch \
+ ${datadir}/qemu-xen/qemu/keymaps/no \
+ ${datadir}/qemu-xen/qemu/keymaps/fr \
+ ${datadir}/qemu-xen/qemu/keymaps/pl \
+ ${datadir}/qemu-xen/qemu/keymaps/fr-ca \
+ ${datadir}/qemu-xen/qemu/keymaps/de \
+ ${datadir}/qemu-xen/qemu/keymaps/fr-ch \
+ ${datadir}/qemu-xen/qemu/keymaps/bepo \
+ ${datadir}/qemu-xen/qemu/keymaps/lv \
+ ${datadir}/qemu-xen/qemu/keymaps/ja \
+ ${datadir}/qemu-xen/qemu/keymaps/da \
+ ${datadir}/qemu-xen/qemu/keymaps/lt \
+ ${datadir}/qemu-xen/qemu/keymaps/hr \
+ ${datadir}/qemu-xen/qemu/keymaps/es \
+ ${datadir}/qemu-xen/qemu/keymaps/modifiers \
+ ${datadir}/qemu-xen/qemu/keymaps/sl \
+ ${datadir}/qemu-xen/qemu/keymaps/it \
+ ${datadir}/qemu-xen/qemu/keymaps/nl \
+ ${datadir}/qemu-xen/qemu/keymaps/fo \
+ ${datadir}/qemu-xen/qemu/keymaps/mk \
+ ${datadir}/qemu-xen/qemu/keymaps/pt-br \
+ ${datadir}/qemu-xen/qemu/keymaps/tr \
+ ${datadir}/qemu-xen/qemu/keymaps/sv \
+ ${datadir}/qemu-xen/qemu/keymaps/fi \
+ ${datadir}/qemu-xen/qemu/keymaps/en-us \
+ ${datadir}/qemu-xen/qemu/keymaps/et \
+ ${datadir}/qemu-xen/qemu/keymaps/nl-be \
+ ${datadir}/qemu-xen/qemu/keymaps/pt \
+ ${bindir}/qemu-nbd-xen \
+ ${bindir}/qemu-img-xen \
+ "
+
+FILES_${PN}-remus = "\
+ ${bindir}/remus \
+ ${sysconfdir}/xen/scripts/remus-netbuf-setup \
+ "
+
+FILES_${PN}-scripts-network = " \
+ ${sysconfdir}/xen/scripts/network-bridge \
+ ${sysconfdir}/xen/scripts/network-nat \
+ ${sysconfdir}/xen/scripts/network-route \
+ ${sysconfdir}/xen/scripts/qemu-ifup \
+ ${sysconfdir}/xen/scripts/vif2 \
+ ${sysconfdir}/xen/scripts/vif-bridge \
+ ${sysconfdir}/xen/scripts/vif-common.sh \
+ ${sysconfdir}/xen/scripts/vif-nat \
+ ${sysconfdir}/xen/scripts/vif-openvswitch \
+ ${sysconfdir}/xen/scripts/vif-route \
+ ${sysconfdir}/xen/scripts/vif-setup \
+ "
+
+FILES_${PN}-scripts-block = " \
+ ${sysconfdir}/xen/scripts/blktap \
+ ${sysconfdir}/xen/scripts/block \
+ ${sysconfdir}/xen/scripts/block-common.sh \
+ ${sysconfdir}/xen/scripts/block-enbd \
+ ${sysconfdir}/xen/scripts/block-iscsi \
+ ${sysconfdir}/xen/scripts/block-nbd \
+ ${sysconfdir}/xen/scripts/block-drbd-probe \
+ ${sysconfdir}/xen/scripts/block-tap \
+ ${sysconfdir}/xen/scripts/vscsi \
+ "
+
+FILES_${PN}-scripts-common = " \
+ ${sysconfdir}/xen/scripts/external-device-migrate \
+ ${sysconfdir}/xen/scripts/hotplugpath.sh \
+ ${sysconfdir}/xen/scripts/locking.sh \
+ ${sysconfdir}/xen/scripts/logging.sh \
+ ${sysconfdir}/xen/scripts/xen-hotplug-cleanup \
+ ${sysconfdir}/xen/scripts/xen-hotplug-common.sh \
+ ${sysconfdir}/xen/scripts/xen-network-common.sh \
+ ${sysconfdir}/xen/scripts/xen-script-common.sh \
+ "
+
+FILES_${PN}-volatiles = "\
+ ${sysconfdir}/default/volatiles/99_xen \
+ ${sysconfdir}/tmpfiles.d/xen.conf \
+ "
+
+FILES_${PN}-xcutils = "\
+ ${libdir}/xen/bin/lsevtchn \
+ ${libdir}/xen/bin/readnotes \
+ ${libdir}/xen/bin/xc_restore \
+ ${libdir}/xen/bin/xc_save \
+ "
+
+FILES_${PN}-xend-examples = "\
+ ${sysconfdir}/xen/xend-config.sxp \
+ ${sysconfdir}/xen/xend-pci-permissive.sxp \
+ ${sysconfdir}/xen/xend-pci-quirks.sxp \
+ "
+
+FILES_${PN}-xenpaging = "\
+ ${libdir}/xen/bin/xenpaging \
+ ${localstatedir}/lib/xen/xenpaging \
+ "
+
+FILES_${PN}-xenpmd = "\
+ ${sbindir}/xenpmd \
+ "
+
+FILES_${PN}-xenstat = "\
+ ${sbindir}/xentop \
+ "
+
+FILES_${PN}-xenstore = "\
+ ${bindir}/xenstore \
+ ${bindir}/xenstore-chmod \
+ ${bindir}/xenstore-control \
+ ${bindir}/xenstore-exists \
+ ${bindir}/xenstore-list \
+ ${bindir}/xenstore-ls \
+ ${bindir}/xenstore-read \
+ ${bindir}/xenstore-rm \
+ ${bindir}/xenstore-watch \
+ ${bindir}/xenstore-write \
+ "
+
+FILES_${PN}-xenstored = "\
+ ${sbindir}/xenstored \
+ ${localstatedir}/lib/xenstored \
+ "
+
+FILES_${PN}-xentrace = "\
+ ${bindir}/xentrace \
+ ${bindir}/xentrace_format \
+ ${bindir}/xentrace_setsize \
+ ${libdir}/xen/bin/xenctx \
+ ${bindir}/xenalyze \
+ ${sbindir}/xentrace \
+ ${sbindir}/xentrace_setsize \
+ "
+
+FILES_${PN}-xen-watchdog = "\
+ ${sbindir}/xenwatchdogd \
+ ${sysconfdir}/init.d/xen-watchdog \
+ ${systemd_unitdir}/system/xen-watchdog.service \
+ "
+
+FILES_${PN}-xl = "\
+ ${sysconfdir}/bash_completion.d/xl.sh \
+ ${sysconfdir}/xen/xl.conf \
+ ${libdir}/xen/bin/libxl-save-helper \
+ ${sbindir}/xl \
+ ${libdir}/xen/bin/xen-init-dom0 \
+ "
+
+FILES_${PN}-xl-examples = "\
+ ${sysconfdir}/xen/xlexample.hvm \
+ ${sysconfdir}/xen/xlexample.pvlinux \
+ "
+
+FILES_${PN}-xm-examples = "\
+ ${sysconfdir}/xen/xmexample1 \
+ ${sysconfdir}/xen/xmexample2 \
+ ${sysconfdir}/xen/xmexample3 \
+ ${sysconfdir}/xen/xmexample.hvm \
+ ${sysconfdir}/xen/xmexample.hvm-stubdom \
+ ${sysconfdir}/xen/xmexample.nbd \
+ ${sysconfdir}/xen/xmexample.pv-grub \
+ ${sysconfdir}/xen/xmexample.vti \
+ "
+
+FILES_${PN}-xenmon = "\
+ ${sbindir}/xenbaked \
+ ${sbindir}/xentrace_setmask \
+ ${sbindir}/xenmon.py \
+ "
+
+FILES_${PN}-xm = "\
+ ${sysconfdir}/xen/xm-config.xml \
+ ${datadir}/xen/create.dtd \
+ ${sbindir}/xm \
+ "
+
+FILES_${PN}-xencommons += "\
+ ${sysconfdir}/default/xencommons \
+ ${sysconfdir}/init.d/xencommons \
+ ${systemd_unitdir}/modules-load.d/xen.conf \
+ ${systemd_unitdir}/system/proc-xen.mount \
+ ${systemd_unitdir}/system/xen-qemu-dom0-disk-backend.service \
+ ${systemd_unitdir}/system/xenconsoled.service \
+ ${systemd_unitdir}/system/xen-init-dom0.service \
+ ${systemd_unitdir}/system/xenstored.service \
+ ${systemd_unitdir}/system/xenstored.socket \
+ ${systemd_unitdir}/system/xenstored_ro.socket \
+ ${systemd_unitdir}/system/var-lib-xenstored.mount \
+ "
+
+FILES_${PN}-xend += " \
+ ${sysconfdir}/init.d/xend \
+ ${sbindir}/xend \
+ "
+
+FILES_${PN}-xendomains += "\
+ ${libdir}/xen/bin/xendomains \
+ ${sysconfdir}/default/xendomains \
+ ${sysconfdir}/init.d/xendomains \
+ ${sysconfdir}/sysconfig/xendomains \
+ ${systemd_unitdir}/system/xendomains.service \
+ "
+
+# configure init.d scripts
+INITSCRIPT_PACKAGES = "${PN}-xend ${PN}-xencommons ${PN}-xen-watchdog ${PN}-xendomains ${PN}-devd"
+INITSCRIPT_NAME_${PN}-xencommons = "xencommons"
+INITSCRIPT_PARAMS_${PN}-xencommons = "defaults 80"
+INITSCRIPT_NAME_${PN}-xen-watchdog = "xen-watchdog"
+INITSCRIPT_PARAMS_${PN}-xen-watchdog = "defaults 81"
+INITSCRIPT_NAME_${PN}-xend = "xend"
+INITSCRIPT_PARAMS_${PN}-xend = "defaults 82"
+INITSCRIPT_NAME_${PN}-xendomains = "xendomains"
+INITSCRIPT_PARAMS_${PN}-xendomains = "defaults 83"
+INITSCRIPT_NAME_${PN}-devd = "xendriverdomain"
+INITSCRIPT_PARAMS_${PN}-devd = "defaults 82"
+
+# systemd packages
+SYSTEMD_PACKAGES = "${PN}-xen-watchdog ${PN}-xencommons ${PN}-xendomains"
+SYSTEMD_SERVICE_${PN}-watchdog = "xen-watchdog.service"
+SYSTEMD_SERVICE_${PN}-xencommons = " \
+ proc-xen.mount \
+ var-lib-xenstored.mount \
+ xen-qemu-dom0-disk-backend.service \
+ xenconsoled.service \
+ xen-init-dom0.service \
+ xenstored.service \
+ xenstored.socket \
+ xenstored_ro.socket \
+ "
+SYSTEMD_SERVICE_${PN}-xendomains = "xendomains.service"
+
+#### REQUIRED ENVIRONMENT VARIABLES ####
+export BUILD_SYS
+export HOST_SYS
+export STAGING_INCDIR
+export STAGING_LIBDIR
+
+# specify xen hypervisor to build/target
+export XEN_TARGET_ARCH = "${@map_xen_arch(d.getVar('TARGET_ARCH', True), d)}"
+export XEN_COMPILE_ARCH = "${@map_xen_arch(d.getVar('BUILD_ARCH', True), d)}"
+
+python () {
+ if d.getVar('XEN_TARGET_ARCH', True) == 'INVALID':
+ raise bb.parse.SkipPackage('Cannot map `%s` to a xen architecture' % d.getVar('TARGET_ARCH', True))
+}
+
+# Yocto appends ${PN} to libexecdir by default and Xen appends 'xen' as well
+# the result is a nested xen/xen/ so let's avoid that by shunning Yocto's
+# extra ${PN} appended.
+libexecdir = "${libdir}"
+
+# hardcoded as Linux, as the only compatible hosts are Linux.
+export XEN_OS = "Linux"
+
+# this is used for the header (#!${bindir}/python) of the install python scripts
+export PYTHONPATH="${bindir}/python"
+
+# seabios forcefully sets HOSTCC to CC - fixup to allow it to build native conf executable
+export HOSTCC="${BUILD_CC}"
+
+# make xen requires CROSS_COMPILE set by hand as it does not abide by ./configure
+export CROSS_COMPILE="${TARGET_PREFIX}"
+
+# overide LDFLAGS to allow xen to build without: "x86_64-oe-linux-ld: unrecognized option '-Wl,-O1'"
+export LDFLAGS=""
+
+# Yocto injects -mfpmath=sse for some machine types into the CFLAGS which
+# conflicts with -mno-sse so instead we strip -mfpmath=sse instead of
+# patching the build to be ok with this
+TUNE_CCARGS := "${@oe_filter_out('-mfpmath=sse', '${TUNE_CCARGS}', d)}"
+
+EXTRA_OECONF += " \
+ --exec-prefix=/usr \
+ --prefix=/usr \
+ --host=${HOST_SYS} \
+ --with-systemd=${systemd_unitdir}/system \
+ --with-systemd-modules-load=${systemd_unitdir}/modules-load.d \
+ --disable-stubdom \
+ --disable-ioemu-stubdom \
+ --disable-pv-grub \
+ --disable-xenstore-stubdom \
+ --disable-rombios \
+ --disable-ocamltools \
+ --with-initddir=${INIT_D_DIR} \
+ --with-sysconfig-leaf-dir=default \
+ --with-system-qemu=/usr/bin/qemu-system-i386 \
+ --disable-qemu-traditional \
+ "
+
+EXTRA_OEMAKE += "STDVGA_ROM=${STAGING_DIR_HOST}/usr/share/firmware/vgabios-0.7a.bin"
+EXTRA_OEMAKE += "CIRRUSVGA_ROM=${STAGING_DIR_HOST}/usr/share/firmware/vgabios-0.7a.cirrus.bin"
+EXTRA_OEMAKE += "SEABIOS_ROM=${STAGING_DIR_HOST}/usr/share/firmware/bios.bin"
+EXTRA_OEMAKE += "ETHERBOOT_ROMS=${STAGING_DIR_HOST}/usr/share/firmware/rtl8139.rom"
+
+# prevent the Xen build scripts from fetching things during the build
+# all dependencies should be reflected in the Yocto recipe
+EXTRA_OEMAKE += "WGET=/bin/false"
+EXTRA_OEMAKE += "GIT=/bin/false"
+
+# check for XSM in package config to allow XSM_ENABLE to be set
+python () {
+ pkgconfig = d.getVar('PACKAGECONFIG', True)
+ if ('xsm') in pkgconfig.split():
+ d.setVar('XSM_ENABLED', '1')
+ else:
+ d.setVar('XSM_ENABLED', '0')
+}
+
+do_post_patch() {
+ # fixup AS/CC/CCP/etc variable within StdGNU.mk
+ for i in LD CC CPP CXX; do
+ sed -i "s/^\($i\s\s*\).*=/\1?=/" ${S}/config/StdGNU.mk
+ done
+ # fixup environment passing in some makefiles
+ sed -i 's#\(\w*\)=\(\$.\w*.\)#\1="\2"#' ${S}/tools/firmware/Makefile
+
+ # libsystemd-daemon -> libsystemd for newer systemd versions
+ sed -i 's#libsystemd-daemon#libsystemd#' ${S}/tools/configure
+}
+
+addtask post_patch after do_patch before do_configure
+
+do_stubs() {
+ # no stubs-32.h in our 64-bit sysroot - hack it into tools/include/gnu
+ if ! test -f ${STAGING_DIR_TARGET}/usr/include/gnu/stubs-32.h ; then
+ if test -f ${STAGING_DIR_TARGET}/usr/include/gnu/stubs-64.h ; then
+ test -d ${S}/tools/include/gnu || mkdir ${S}/tools/include/gnu
+ cat ${STAGING_DIR_TARGET}/usr/include/gnu/stubs-64.h | grep -v stub_bdflush | grep -v stub_getmsg | grep -v stub_putmsg > ${S}/tools/include/gnu/stubs-32.h
+ echo \#define __stub___kernel_cosl >> ${S}/tools/include/gnu/stubs-32.h
+ echo \#define __stub___kernel_sinl >> ${S}/tools/include/gnu/stubs-32.h
+ echo \#define __stub___kernel_tanl >> ${S}/tools/include/gnu/stubs-32.h
+ fi
+ fi
+}
+
+addtask stubs after do_configure before do_compile
+
+do_configure() {
+
+ #./configure --enable-xsmpolicy does not set XSM_ENABLE must be done manually
+ if [ "${XSM_ENABLED}" = "1" ]; then
+ echo "XSM_ENABLE := y" > ${S}/.config
+ fi
+
+ # do configure
+ oe_runconf
+}
+
+do_compile() {
+ oe_runmake
+}
+
+do_install() {
+ oe_runmake DESTDIR="${D}" install
+
+ # remove installed volatiles
+ rm -rf ${D}${localstatedir}/run \
+ ${D}${localstatedir}/lock \
+ ${D}${localstatedir}/log \
+ ${D}${localstatedir}/volatile \
+ ${D}${localstatedir}/lib/xen
+
+ VOLATILE_DIRS=" \
+ ${localstatedir}/run/xenstored \
+ ${localstatedir}/run/xend \
+ ${localstatedir}/run/xend/boot \
+ ${localstatedir}/run/xen \
+ ${localstatedir}/log/xen \
+ ${localstatedir}/lock/xen \
+ ${localstatedir}/lock/subsys \
+ ${localstatedir}/lib/xen \
+ "
+
+ # install volatiles using populate_volatiles mechanism
+ install -d ${D}${sysconfdir}/default/volatiles
+ for i in $VOLATILE_DIRS; do
+ echo "d root root 0755 $i none" >> ${D}${sysconfdir}/default/volatiles/99_xen
+ done
+
+ # workaround for xendomains script which searchs sysconfig if directory exists
+ install -d ${D}${sysconfdir}/sysconfig
+ ln -sf ${sysconfdir}/default/xendomains ${D}${sysconfdir}/sysconfig/xendomains
+
+ # systemd
+ if ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'true', 'false', d)}; then
+ # install volatiles using systemd tmpfiles.d
+ install -d ${D}${sysconfdir}/tmpfiles.d
+ for i in $VOLATILE_DIRS; do
+ echo "d $i 0755 root root - -" >> ${D}${sysconfdir}/tmpfiles.d/xen.conf
+ done
+ fi
+
+ # fixup default path to qemu-system-i386
+ sed -i 's#\(test -z "$QEMU_XEN" && QEMU_XEN=\).*$#\1"/usr/bin/qemu-system-i386"#' ${D}/etc/init.d/xencommons
+
+ if [ -e ${D}${systemd_unitdir}/system/xen-qemu-dom0-disk-backend.service ]; then
+ sed -i 's#ExecStart=.*qemu-system-i386\(.*\)$#ExecStart=/usr/bin/qemu-system-i386\1#' \
+ ${D}${systemd_unitdir}/system/xen-qemu-dom0-disk-backend.service
+ fi
+}
+
+pkg_postinst_${PN}-volatiles() {
+ if [ -z "$D" ]; then
+ if command -v systemd-tmpfiles >/dev/null; then
+ systemd-tmpfiles --create ${sysconfdir}/tmpfiles.d/xen.conf
+ elif [ -e ${sysconfdir}/init.d/populate-volatile.sh ]; then
+ ${sysconfdir}/init.d/populate-volatile.sh update
+ fi
+ fi
+}
+
+do_deploy() {
+ install -d ${DEPLOYDIR}
+
+ if [ -f ${D}/boot/xen ]; then
+ install -m 0644 ${D}/boot/xen ${DEPLOYDIR}/xen-${MACHINE}
+ fi
+
+ if [ -f ${D}/boot/xen.gz ]; then
+ install -m 0644 ${D}/boot/xen.gz ${DEPLOYDIR}/xen-${MACHINE}.gz
+ fi
+
+ if [ -f ${D}/usr/lib64/efi/xen.efi ]; then
+ install -m 0644 ${D}/usr/lib64/efi/xen.efi ${DEPLOYDIR}/xen-${MACHINE}.efi
+ fi
+
+ # Install the flask policy in the deploy directory if it exists
+ if [ -f ${D}/boot/${FLASK_POLICY_FILE} ]; then
+ install -m 0644 ${D}/boot/${FLASK_POLICY_FILE} ${DEPLOYDIR}
+ ln -sf ${FLASK_POLICY_FILE} ${DEPLOYDIR}/xenpolicy-${MACHINE}
+ fi
+}
+
+addtask deploy after do_populate_sysroot
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/xen_4.6.1.bb b/import-layers/meta-virtualization/recipes-extended/xen/xen_4.6.1.bb
new file mode 100644
index 0000000..0adf8ad
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/xen_4.6.1.bb
@@ -0,0 +1,10 @@
+require xen.inc
+
+SRC_URI = " \
+ http://bits.xensource.com/oss-xen/release/${PV}/xen-${PV}.tar.gz \
+ "
+
+SRC_URI[md5sum] = "df2d854c3c90ffeefaf71e7f868fb326"
+SRC_URI[sha256sum] = "44cc2fccba1e147ef4c8da0584ce0f24189c8743de0e3e9a9226da88ddb5f589"
+
+S = "${WORKDIR}/xen-${PV}"
diff --git a/import-layers/meta-virtualization/recipes-extended/xen/xen_git.bb b/import-layers/meta-virtualization/recipes-extended/xen/xen_git.bb
new file mode 100644
index 0000000..a86a501
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-extended/xen/xen_git.bb
@@ -0,0 +1,15 @@
+require xen.inc
+
+SRCREV = "1fd615aa0108490ffc558d27627f509183cbfdaf"
+
+XEN_REL="4.6"
+
+PV = "${XEN_REL}.0+git${SRCPV}"
+
+S = "${WORKDIR}/git"
+
+SRC_URI = " \
+ git://xenbits.xen.org/xen.git;branch=staging-${XEN_REL} \
+ "
+
+DEFAULT_PREFERENCE = "-1"
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/ebtables.cfg b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/ebtables.cfg
new file mode 100644
index 0000000..a3c514e
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/ebtables.cfg
@@ -0,0 +1,2 @@
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_T_NAT=m
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/ebtables.scc b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/ebtables.scc
new file mode 100644
index 0000000..b3895e5
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/ebtables.scc
@@ -0,0 +1,4 @@
+define KFEATURE_DESCRIPTION "Enable ebtables support"
+define KFEATURE_COMPATIBILITY board
+
+kconf non-hardware ebtables.cfg
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/lxc.cfg b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/lxc.cfg
new file mode 100644
index 0000000..035b314
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/lxc.cfg
@@ -0,0 +1,21 @@
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_MEM_RES_CTLR=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+CONFIG_USER_NS=y
+CONFIG_PID_NS=y
+CONFIG_NET_NS=y
+
+CONFIG_CLS_CGROUP=m
+CONFIG_BLK_CGROUP=m
+CONFIG_NETPRIO_CGROUP=m
+
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/lxc.scc b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/lxc.scc
new file mode 100644
index 0000000..ee51883
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/lxc.scc
@@ -0,0 +1,4 @@
+define KFEATURE_DESCRIPTION "Enable Features needed by LxC, namespaces, cgroups et.c."
+define KFEATURE_COMPATIBILITY board
+
+kconf non-hardware lxc.cfg
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/vswitch.cfg b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/vswitch.cfg
new file mode 100644
index 0000000..0067504
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/vswitch.cfg
@@ -0,0 +1,2 @@
+CONFIG_OPENVSWITCH=m
+CONFIG_NET_SCH_INGRESS=m
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/vswitch.scc b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/vswitch.scc
new file mode 100644
index 0000000..2790b4f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/vswitch.scc
@@ -0,0 +1,4 @@
+define KFEATURE_DESCRIPTION "Enable in kernel OpenvSwitch module"
+define KFEATURE_COMPATIBILITY board
+
+kconf non-hardware vswitch.cfg
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/xen.cfg b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/xen.cfg
new file mode 100644
index 0000000..4780311
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/xen.cfg
@@ -0,0 +1,50 @@
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PARAVIRT=y
+CONFIG_XEN=y
+CONFIG_XEN_DOM0=y
+CONFIG_XEN_PVHVM=y
+CONFIG_XEN_MAX_DOMAIN_MEMORY=500
+CONFIG_XEN_SAVE_RESTORE=y
+# CONFIG_XEN_DEBUG_FS is not set
+CONFIG_XEN_PVH=y
+CONFIG_MMU_NOTIFIER=y
+CONFIG_HIBERNATE_CALLBACKS=y
+CONFIG_PCI_XEN=y
+CONFIG_XEN_PCIDEV_FRONTEND=y
+CONFIG_SYS_HYPERVISOR=y
+CONFIG_XEN_BLKDEV_FRONTEND=y
+CONFIG_XEN_BLKDEV_BACKEND=m
+CONFIG_XEN_SCSI_FRONTEND=m
+CONFIG_XEN_NETDEV_FRONTEND=y
+CONFIG_XEN_NETDEV_BACKEND=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_XEN_KBDDEV_FRONTEND=y
+CONFIG_HVC_IRQ=y
+CONFIG_HVC_XEN=y
+CONFIG_HVC_XEN_FRONTEND=y
+CONFIG_XEN_WDT=m
+CONFIG_FB_SYS_FILLRECT=y
+CONFIG_FB_SYS_COPYAREA=y
+CONFIG_FB_SYS_IMAGEBLIT=y
+CONFIG_FB_SYS_FOPS=y
+CONFIG_FB_DEFERRED_IO=y
+CONFIG_XEN_FBDEV_FRONTEND=y
+CONFIG_XEN_BALLOON=y
+CONFIG_XEN_SCRUB_PAGES=y
+CONFIG_XEN_DEV_EVTCHN=y
+CONFIG_XEN_BACKEND=y
+CONFIG_XENFS=y
+CONFIG_XEN_COMPAT_XENFS=y
+CONFIG_XEN_SYS_HYPERVISOR=y
+CONFIG_XEN_XENBUS_FRONTEND=y
+CONFIG_XEN_GNTDEV=m
+CONFIG_XEN_GRANT_DEV_ALLOC=m
+CONFIG_SWIOTLB_XEN=y
+CONFIG_XEN_PCIDEV_BACKEND=m
+CONFIG_XEN_PRIVCMD=y
+CONFIG_XEN_ACPI_PROCESSOR=m
+CONFIG_XEN_MCE_LOG=y
+CONFIG_XEN_HAVE_PVMMU=y
+CONFIG_XEN_EFI=y
+CONFIG_XEN_AUTO_XLATE=y
+CONFIG_XEN_ACPI=y
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/xen.scc b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/xen.scc
new file mode 100644
index 0000000..b588e5d
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/xen.scc
@@ -0,0 +1,4 @@
+define KFEATURE_DESCRIPTION "Xen Kernel Support"
+define KFEATURE_COMPATIBILITY arch
+
+kconf non-hardware xen.cfg
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/xt-checksum.cfg b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/xt-checksum.cfg
new file mode 100644
index 0000000..58afbff
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/xt-checksum.cfg
@@ -0,0 +1 @@
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/xt-checksum.scc b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/xt-checksum.scc
new file mode 100644
index 0000000..d3804f0
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto/xt-checksum.scc
@@ -0,0 +1,4 @@
+define KFEATURE_DESCRIPTION "Add extra iptables modules"
+define KFEATURE_COMPATIBILITY board
+
+kconf non-hardware xt-checksum.cfg
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.1.bbappend b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.1.bbappend
new file mode 100644
index 0000000..85e98cc
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.1.bbappend
@@ -0,0 +1,19 @@
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
+
+SRC_URI += "file://xt-checksum.scc \
+ file://ebtables.scc \
+ file://vswitch.scc \
+ file://lxc.scc \
+ "
+KERNEL_FEATURES_append = " features/kvm/qemu-kvm-enable.scc"
+
+KERNEL_MODULE_AUTOLOAD += "openvswitch"
+KERNEL_MODULE_AUTOLOAD += "kvm"
+KERNEL_MODULE_AUTOLOAD += "kvm-amd"
+KERNEL_MODULE_AUTOLOAD += "kvm-intel"
+
+# aufs kernel support required for xen-image-minimal
+KERNEL_FEATURES_append += "${@base_contains('DISTRO_FEATURES', 'aufs', ' features/aufs/aufs-enable.scc', '', d)}"
+
+# xen kernel support
+SRC_URI += "${@base_contains('DISTRO_FEATURES', 'xen', ' file://xen.scc', '', d)}"
diff --git a/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.4.bbappend b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.4.bbappend
new file mode 100644
index 0000000..85e98cc
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-kernel/linux/linux-yocto_4.4.bbappend
@@ -0,0 +1,19 @@
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
+
+SRC_URI += "file://xt-checksum.scc \
+ file://ebtables.scc \
+ file://vswitch.scc \
+ file://lxc.scc \
+ "
+KERNEL_FEATURES_append = " features/kvm/qemu-kvm-enable.scc"
+
+KERNEL_MODULE_AUTOLOAD += "openvswitch"
+KERNEL_MODULE_AUTOLOAD += "kvm"
+KERNEL_MODULE_AUTOLOAD += "kvm-amd"
+KERNEL_MODULE_AUTOLOAD += "kvm-intel"
+
+# aufs kernel support required for xen-image-minimal
+KERNEL_FEATURES_append += "${@base_contains('DISTRO_FEATURES', 'aufs', ' features/aufs/aufs-enable.scc', '', d)}"
+
+# xen kernel support
+SRC_URI += "${@base_contains('DISTRO_FEATURES', 'xen', ' file://xen.scc', '', d)}"
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/files/configure-Only-link-against-libpcap-on-FreeBSD.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/configure-Only-link-against-libpcap-on-FreeBSD.patch
new file mode 100644
index 0000000..0a44b85
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/configure-Only-link-against-libpcap-on-FreeBSD.patch
@@ -0,0 +1,70 @@
+From d30e714ccb9d13caf39d14d5b2fc9523b678ed51 Mon Sep 17 00:00:00 2001
+From: Ben Pfaff <blp@nicira.com>
+Date: Thu, 14 Mar 2013 15:20:55 -0700
+Subject: [PATCH] configure: Only link against libpcap on FreeBSD.
+
+commit d30e714ccb9d13caf39d14d5b2fc9523b678ed51 upstream
+http://git.openvswitch.org/git/openvswitch
+
+On other platforms there is no benefit to linking against libpcap, because
+it is not used.
+
+Signed-off-by: Ben Pfaff <blp@nicira.com>
+CC: Ed Maste <emaste@freebsd.org>
+---
+ acinclude.m4 | 7 ++++++-
+ configure.ac | 3 +--
+ 2 files changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/acinclude.m4 b/acinclude.m4
+index f0610c9..19a47dd 100644
+--- a/acinclude.m4
++++ b/acinclude.m4
+@@ -1,6 +1,6 @@
+ # -*- autoconf -*-
+
+-# Copyright (c) 2008, 2009, 2010, 2011, 2012 Nicira, Inc.
++# Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # you may not use this file except in compliance with the License.
+@@ -295,6 +295,8 @@ AC_DEFUN([OVS_CHECK_IF_PACKET],
+ fi])
+
+ dnl Checks for net/if_dl.h.
++dnl
++dnl (We use this as a proxy for checking whether we're building on FreeBSD.)
+ AC_DEFUN([OVS_CHECK_IF_DL],
+ [AC_CHECK_HEADER([net/if_dl.h],
+ [HAVE_IF_DL=yes],
+@@ -303,6 +305,9 @@ AC_DEFUN([OVS_CHECK_IF_DL],
+ if test "$HAVE_IF_DL" = yes; then
+ AC_DEFINE([HAVE_IF_DL], [1],
+ [Define to 1 if net/if_dl.h is available.])
++
++ # On FreeBSD we use libpcap to access network devices.
++ AC_SEARCH_LIBS([pcap_open_live], [pcap])
+ fi])
+
+ dnl Checks for buggy strtok_r.
+diff --git a/configure.ac b/configure.ac
+index 1cacd29..bd49179 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -1,4 +1,4 @@
+-# Copyright (c) 2008, 2009, 2010, 2011, 2012 Nicira, Inc.
++# Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # you may not use this file except in compliance with the License.
+@@ -44,7 +44,6 @@ AC_SYS_LARGEFILE
+ AC_SEARCH_LIBS([pow], [m])
+ AC_SEARCH_LIBS([clock_gettime], [rt])
+ AC_SEARCH_LIBS([timer_create], [rt])
+-AC_SEARCH_LIBS([pcap_open_live], [pcap])
+
+ OVS_CHECK_ESX
+ OVS_CHECK_COVERAGE
+--
+1.8.3.2
+
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-add-more-target-python-substitutions.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-add-more-target-python-substitutions.patch
new file mode 100644
index 0000000..2b87a11
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-add-more-target-python-substitutions.patch
@@ -0,0 +1,50 @@
+From d1ab2f62a03c2c977de6fed5fca8de63e328a870 Mon Sep 17 00:00:00 2001
+Message-Id: <d1ab2f62a03c2c977de6fed5fca8de63e328a870.1391527986.git.Jim.Somerville@windriver.com>
+From: Jim Somerville <Jim.Somerville@windriver.com>
+Date: Tue, 4 Feb 2014 15:30:41 +0000
+Subject: [PATCH 1/1] openvswitch: add more target python substitutions
+
+The TARGET_PYTHON variable is used for script
+substitutions to ensure the scripts will be able to properly execute
+if the target system has a different path for the python.
+
+Signed-off-by: Jim Somerville <Jim.Somerville@windriver.com>
+---
+ ovsdb/ovsdb-dot.in | 2 +-
+ ovsdb/ovsdb-idlc.in | 2 +-
+ utilities/ovs-dpctl-top.in | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/ovsdb/ovsdb-dot.in b/ovsdb/ovsdb-dot.in
+index 85c126d..402a77c 100755
+--- a/ovsdb/ovsdb-dot.in
++++ b/ovsdb/ovsdb-dot.in
+@@ -1,4 +1,4 @@
+-#! @PYTHON@
++#! @TARGET_PYTHON@
+
+ from datetime import date
+ import ovs.db.error
+diff --git a/ovsdb/ovsdb-idlc.in b/ovsdb/ovsdb-idlc.in
+index ec1c655..f5c135f 100755
+--- a/ovsdb/ovsdb-idlc.in
++++ b/ovsdb/ovsdb-idlc.in
+@@ -1,4 +1,4 @@
+-#! @PYTHON@
++#! @TARGET_PYTHON@
+
+ import getopt
+ import os
+diff --git a/utilities/ovs-dpctl-top.in b/utilities/ovs-dpctl-top.in
+index f43fdeb..8475118 100755
+--- a/utilities/ovs-dpctl-top.in
++++ b/utilities/ovs-dpctl-top.in
+@@ -1,4 +1,4 @@
+-#! @PYTHON@
++#! @TARGET_PYTHON@
+ #
+ # Copyright (c) 2013 Nicira, Inc.
+ #
+--
+1.8.3.4
+
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-add-ptest.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-add-ptest.patch
new file mode 100644
index 0000000..cb708de
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-add-ptest.patch
@@ -0,0 +1,68 @@
+Add test-install rule to support ptest execution
+
+Signed-off-by: Radu Patriu <radu.patriu@enea.com>
+
+Index: openvswitch-2.1.2/Makefile.am
+===================================================================
+--- openvswitch-2.1.2.orig/Makefile.am
++++ openvswitch-2.1.2/Makefile.am
+@@ -300,3 +300,5 @@
+ include python/compat/automake.mk
+ include tutorial/automake.mk
+ include vtep/automake.mk
++include test.mk
++
+Index: openvswitch-2.1.2/test.mk
+===================================================================
+--- /dev/null
++++ openvswitch-2.1.2/test.mk
+@@ -0,0 +1,49 @@
++TEST_DEST ?= ${prefix}/lib/openvswitch
++TEST_ROOT ?= ${prefix}/lib/openvswitch
++TEST_DEPEND =
++
++if HAVE_OPENSSL
++TEST_DEPEND += $(TESTPKI_FILES)
++endif
++
++test-install: $(TEST_DEPEND)
++ @list='$(noinst_PROGRAMS) $(EXTRA_DIST) $(dist_check_SCRIPTS) $(TEST_DEPEND) tests/atlocal tests/atconfig' ;\
++ install -d $(TEST_DEST)/tests ;\
++ install -d $(TEST_DEST)/python ;\
++ install -d $(TEST_DEST)/python/ovs ;\
++ install -d $(TEST_DEST)/python/ovs/db ;\
++ install -d $(TEST_DEST)/python/ovs/unixctl ;\
++ install -d $(TEST_DEST)/vswitchd ;\
++ install vswitchd/vswitch.ovsschema $(TEST_DEST)/vswitchd ;\
++ install -d $(TEST_DEST)/debian ;\
++ install debian/ovs-monitor-ipsec $(TEST_DEST)/debian ;\
++ install -d $(TEST_DEST)/build-aux ;\
++ install build-aux/check-structs $(TEST_DEST)/build-aux ;\
++ install -d $(TEST_DEST)/xenserver ;\
++ install xenserver/usr_share_openvswitch_scripts_ovs-xapi-sync $(TEST_DEST)/xenserver ;\
++ install xenserver/opt_xensource_libexec_interface-reconfigure $(TEST_DEST)/xenserver ;\
++ install xenserver/opt_xensource_libexec_InterfaceReconfigure.py $(TEST_DEST)/xenserver ;\
++ install xenserver/opt_xensource_libexec_InterfaceReconfigureBridge.py $(TEST_DEST)/xenserver ;\
++ install xenserver/opt_xensource_libexec_InterfaceReconfigureVswitch.py $(TEST_DEST)/xenserver ;\
++ install -d $(TEST_DEST)/vtep ;\
++ install vtep/vtep.ovsschema $(TEST_DEST)/vtep ;\
++ for p in $$list ; do \
++ echo $$p ;\
++ p=$${p#./} ;\
++ pre=$${p#tests\/} ;\
++ if test $$pre != $$p ; then \
++ echo installing $$p to $(TEST_DEST)/tests/$$pre ;\
++ install $$p $(TEST_DEST)/tests/$$pre ;\
++ continue ;\
++ fi ;\
++ pre=$${p#python\/ovs\/} ;\
++ if test $$pre != $$p ; then \
++ echo installing $$p to $(TEST_DEST)/python/ovs/$$pre ;\
++ install $$p $(TEST_DEST)/python/ovs/$$pre ;\
++ continue ;\
++ fi; \
++ done ;\
++ sed -i 's|abs_builddir=.*|abs_builddir='"'"'$(TEST_ROOT)/tests'"'"'|g' $(TEST_DEST)/tests/atconfig
++ sed -i 's|abs_srcdir=.*|abs_srcdir='"'"'$(TEST_ROOT)/tests'"'"'|g' $(TEST_DEST)/tests/atconfig
++ sed -i 's|abs_top_srcdir=.*|abs_top_srcdir='"'"'$(TEST_ROOT)'"'"'|g' $(TEST_DEST)/tests/atconfig
++ sed -i 's|abs_top_builddir=.*|abs_top_builddir='"'"'$(TEST_ROOT)'"'"'|g' $(TEST_DEST)/tests/atconfig
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-add-target-perl-handling.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-add-target-perl-handling.patch
new file mode 100644
index 0000000..98224b4
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-add-target-perl-handling.patch
@@ -0,0 +1,48 @@
+From 569ac1066cd3046b8ac899153df9f07908d45145 Mon Sep 17 00:00:00 2001
+From: Mark Asselstine <mark.asselstine@windriver.com>
+Date: Fri, 21 Jun 2013 11:16:00 -0400
+Subject: [PATCH] openvswitch: add target perl handling
+
+Allow the build to specify a path for the perl instead of reusing
+the PERL variable which can lead to inconsistencies if we are cross
+compiling. The TARGET_PERL variable will be used for script
+substitutions to ensure the scripts will be able to properly execute
+if the target system has a different path for the perl.
+
+Signed-off-by: Mark Asselstine <mark.asselstine@windriver.com>
+---
+ Makefile.am | 1 +
+ configure.ac | 7 +++++++
+ utilities/ovs-parse-leaks.in | 2 +-
+ 3 files changed, 9 insertions(+), 1 deletion(-)
+
+Index: openvswitch-2.0.0/Makefile.am
+===================================================================
+--- openvswitch-2.0.0.orig/Makefile.am
++++ openvswitch-2.0.0/Makefile.am
+@@ -114,6 +114,7 @@
+ -e 's,[@]LOGDIR[@],$(LOGDIR),g' \
+ -e 's,[@]DBDIR[@],$(DBDIR),g' \
+ -e 's,[@]PERL[@],$(PERL),g' \
++ -e 's,[@]TARGET_PERL[@],$(TARGET_PERL),g' \
+ -e 's,[@]PYTHON[@],$(PYTHON),g' \
+ -e 's,[@]TARGET_PYTHON[@],$(TARGET_PYTHON),g' \
+ -e 's,[@]RUNDIR[@],$(RUNDIR),g' \
+Index: openvswitch-2.0.0/configure.ac
+===================================================================
+--- openvswitch-2.0.0.orig/configure.ac
++++ openvswitch-2.0.0/configure.ac
+@@ -115,6 +115,13 @@
+ AC_SUBST(KARCH)
+ OVS_CHECK_LINUX
+
++if test "$TARGET_PERL"; then
++ TARGET_PERL=$TARGET_PERL
++else
++ TARGET_PERL=$PERL
++fi
++AC_SUBST(TARGET_PERL)
++
+ if test "$TARGET_PYTHON"; then
+ TARGET_PYTHON=$TARGET_PYTHON
+ else
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-add-target-python-handling.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-add-target-python-handling.patch
new file mode 100644
index 0000000..97b352c
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-add-target-python-handling.patch
@@ -0,0 +1,136 @@
+From e8a5d34885c5fdba7d951fb1bb85131cbafca432 Mon Sep 17 00:00:00 2001
+From: Mark Asselstine <mark.asselstine@windriver.com>
+Date: Wed, 19 Jun 2013 17:25:56 -0400
+Subject: [PATCH] openvswitch: add target python handling
+
+Allow the build to specify a path for the python instead of reusing
+the PYTHON variable which can lead to inconsistencies if we are cross
+compiling. The TARGET_PYTHON variable will be used for script
+substitutions to ensure the scripts will be able to properly execute
+if the target system has a different path for the python.
+
+Signed-off-by: Mark Asselstine <mark.asselstine@windriver.com>
+---
+ Makefile.am | 1 +
+ configure.ac | 7 +++++++
+ ovsdb/ovsdbmonitor/ovsdbmonitor.in | 2 +-
+ utilities/bugtool/ovs-bugtool.in | 2 +-
+ utilities/ovs-check-dead-ifs.in | 2 +-
+ utilities/ovs-l3ping.in | 2 +-
+ utilities/ovs-parse-backtrace.in | 2 +-
+ utilities/ovs-pcap.in | 2 +-
+ utilities/ovs-tcpundump.in | 2 +-
+ utilities/ovs-test.in | 2 +-
+ utilities/ovs-vlan-test.in | 2 +-
+ 11 files changed, 17 insertions(+), 9 deletions(-)
+
+Index: openvswitch-2.1.2/Makefile.am
+===================================================================
+--- openvswitch-2.1.2.orig/Makefile.am
++++ openvswitch-2.1.2/Makefile.am
+@@ -125,6 +125,7 @@
+ -e 's,[@]DBDIR[@],$(DBDIR),g' \
+ -e 's,[@]PERL[@],$(PERL),g' \
+ -e 's,[@]PYTHON[@],$(PYTHON),g' \
++ -e 's,[@]TARGET_PYTHON[@],$(TARGET_PYTHON),g' \
+ -e 's,[@]RUNDIR[@],$(RUNDIR),g' \
+ -e 's,[@]VERSION[@],$(VERSION),g' \
+ -e 's,[@]localstatedir[@],$(localstatedir),g' \
+Index: openvswitch-2.1.2/configure.ac
+===================================================================
+--- openvswitch-2.1.2.orig/configure.ac
++++ openvswitch-2.1.2/configure.ac
+@@ -118,6 +118,13 @@
+ AC_SUBST(KARCH)
+ OVS_CHECK_LINUX
+
++if test "$TARGET_PYTHON"; then
++ TARGET_PYTHON=$TARGET_PYTHON
++else
++ TARGET_PYTHON=$PYTHON
++fi
++AC_SUBST(TARGET_PYTHON)
++
+ AC_CONFIG_FILES(Makefile)
+ AC_CONFIG_FILES(datapath/Makefile)
+ AC_CONFIG_FILES(datapath/linux/Kbuild)
+Index: openvswitch-2.1.2/utilities/bugtool/ovs-bugtool.in
+===================================================================
+--- openvswitch-2.1.2.orig/utilities/bugtool/ovs-bugtool.in
++++ openvswitch-2.1.2/utilities/bugtool/ovs-bugtool.in
+@@ -1,4 +1,4 @@
+-#! @PYTHON@
++#! @TARGET_PYTHON@
+
+ # This library is free software; you can redistribute it and/or
+ # modify it under the terms of version 2.1 of the GNU Lesser General Public
+Index: openvswitch-2.1.2/utilities/ovs-check-dead-ifs.in
+===================================================================
+--- openvswitch-2.1.2.orig/utilities/ovs-check-dead-ifs.in
++++ openvswitch-2.1.2/utilities/ovs-check-dead-ifs.in
+@@ -1,4 +1,4 @@
+-#! @PYTHON@
++#! @TARGET_PYTHON@
+
+ import os
+ import re
+Index: openvswitch-2.1.2/utilities/ovs-l3ping.in
+===================================================================
+--- openvswitch-2.1.2.orig/utilities/ovs-l3ping.in
++++ openvswitch-2.1.2/utilities/ovs-l3ping.in
+@@ -1,4 +1,4 @@
+-#! @PYTHON@
++#! @TARGET_PYTHON@
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # you may not use this file except in compliance with the License.
+Index: openvswitch-2.1.2/utilities/ovs-parse-backtrace.in
+===================================================================
+--- openvswitch-2.1.2.orig/utilities/ovs-parse-backtrace.in
++++ openvswitch-2.1.2/utilities/ovs-parse-backtrace.in
+@@ -1,4 +1,4 @@
+-#! @PYTHON@
++#! @TARGET_PYTHON@
+ #
+ # Copyright (c) 2012 Nicira, Inc.
+ #
+Index: openvswitch-2.1.2/utilities/ovs-pcap.in
+===================================================================
+--- openvswitch-2.1.2.orig/utilities/ovs-pcap.in
++++ openvswitch-2.1.2/utilities/ovs-pcap.in
+@@ -1,4 +1,4 @@
+-#! @PYTHON@
++#! @TARGET_PYTHON@
+ #
+ # Copyright (c) 2010 Nicira, Inc.
+ #
+Index: openvswitch-2.1.2/utilities/ovs-tcpundump.in
+===================================================================
+--- openvswitch-2.1.2.orig/utilities/ovs-tcpundump.in
++++ openvswitch-2.1.2/utilities/ovs-tcpundump.in
+@@ -1,4 +1,4 @@
+-#! @PYTHON@
++#! @TARGET_PYTHON@
+ #
+ # Copyright (c) 2010 Nicira, Inc.
+ #
+Index: openvswitch-2.1.2/utilities/ovs-test.in
+===================================================================
+--- openvswitch-2.1.2.orig/utilities/ovs-test.in
++++ openvswitch-2.1.2/utilities/ovs-test.in
+@@ -1,4 +1,4 @@
+-#! @PYTHON@
++#! @TARGET_PYTHON@
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # you may not use this file except in compliance with the License.
+Index: openvswitch-2.1.2/utilities/ovs-vlan-test.in
+===================================================================
+--- openvswitch-2.1.2.orig/utilities/ovs-vlan-test.in
++++ openvswitch-2.1.2/utilities/ovs-vlan-test.in
+@@ -1,4 +1,4 @@
+-#! @PYTHON@
++#! @TARGET_PYTHON@
+ #
+ # Copyright (c) 2010 Nicira, Inc.
+ #
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-example b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-example
new file mode 100644
index 0000000..6f08c3f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-example
@@ -0,0 +1,102 @@
+#! /bin/sh
+#
+# Copyright (C) 2011 Nicira Networks, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+### BEGIN INIT INFO
+# Provides: openvswitch-switch
+# Required-Start: $network $named $remote_fs $syslog
+# Required-Stop: $remote_fs
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Open vSwitch switch
+### END INIT INFO
+
+(test -x /usr/sbin/ovs-vswitchd && test -x /usr/sbin/ovsdb-server) || exit 0
+
+. /usr/share/openvswitch/scripts/ovs-lib
+test -e /etc/default/openvswitch-switch && . /etc/default/openvswitch-switch
+
+if test X"$BRCOMPAT" = Xyes && test ! -x /usr/sbin/ovs-brcompatd; then
+ BRCOMPAT=no
+ log_warning_msg "ovs-brcompatd missing, disabling bridge compatibility"
+fi
+
+ovs_ctl () {
+ set /usr/share/openvswitch/scripts/ovs-ctl "$@"
+ if test X"$BRCOMPAT" = Xyes; then
+ set "$@" --brcompat
+ fi
+ "$@"
+}
+
+load_kmod () {
+ ovs_ctl load-kmod || exit $?
+}
+
+start () {
+ if ovs_ctl load-kmod; then
+ :
+ else
+ echo "Module has probably not been built for this kernel."
+ if ! test -d /usr/share/doc/openvswitch-datapath-source; then
+ echo "Install the openvswitch-datapath-source package, then read"
+ else
+ echo "For instructions, read"
+ fi
+ echo "/usr/share/doc/openvswitch-datapath-source/README.Debian"
+ fi
+ set ovs_ctl ${1-start} --system-id=random
+ if test X"$FORCE_COREFILES" != X; then
+ set "$@" --force-corefiles="$FORCE_COREFILES"
+ fi
+ "$@" || exit $?
+
+ ovs_ctl --protocol=gre enable-protocol
+}
+
+stop () {
+ ovs_ctl stop
+}
+
+case $1 in
+ start)
+ start
+ ;;
+ stop | force-stop)
+ stop
+ ;;
+ reload | force-reload)
+ # The OVS daemons keep up-to-date.
+ ;;
+ restart)
+ stop
+ start
+ ;;
+ status)
+ ovs_ctl status
+ ;;
+ force-reload-kmod)
+ start force-reload-kmod
+ ;;
+ load-kmod)
+ load_kmod
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart|force-reload|status|force-stop|force-reload-kmod|load-kmod}" >&2
+ exit 1
+ ;;
+esac
+
+exit 0
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-switch b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-switch
new file mode 100644
index 0000000..6f08c3f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-switch
@@ -0,0 +1,102 @@
+#! /bin/sh
+#
+# Copyright (C) 2011 Nicira Networks, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+### BEGIN INIT INFO
+# Provides: openvswitch-switch
+# Required-Start: $network $named $remote_fs $syslog
+# Required-Stop: $remote_fs
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Open vSwitch switch
+### END INIT INFO
+
+(test -x /usr/sbin/ovs-vswitchd && test -x /usr/sbin/ovsdb-server) || exit 0
+
+. /usr/share/openvswitch/scripts/ovs-lib
+test -e /etc/default/openvswitch-switch && . /etc/default/openvswitch-switch
+
+if test X"$BRCOMPAT" = Xyes && test ! -x /usr/sbin/ovs-brcompatd; then
+ BRCOMPAT=no
+ log_warning_msg "ovs-brcompatd missing, disabling bridge compatibility"
+fi
+
+ovs_ctl () {
+ set /usr/share/openvswitch/scripts/ovs-ctl "$@"
+ if test X"$BRCOMPAT" = Xyes; then
+ set "$@" --brcompat
+ fi
+ "$@"
+}
+
+load_kmod () {
+ ovs_ctl load-kmod || exit $?
+}
+
+start () {
+ if ovs_ctl load-kmod; then
+ :
+ else
+ echo "Module has probably not been built for this kernel."
+ if ! test -d /usr/share/doc/openvswitch-datapath-source; then
+ echo "Install the openvswitch-datapath-source package, then read"
+ else
+ echo "For instructions, read"
+ fi
+ echo "/usr/share/doc/openvswitch-datapath-source/README.Debian"
+ fi
+ set ovs_ctl ${1-start} --system-id=random
+ if test X"$FORCE_COREFILES" != X; then
+ set "$@" --force-corefiles="$FORCE_COREFILES"
+ fi
+ "$@" || exit $?
+
+ ovs_ctl --protocol=gre enable-protocol
+}
+
+stop () {
+ ovs_ctl stop
+}
+
+case $1 in
+ start)
+ start
+ ;;
+ stop | force-stop)
+ stop
+ ;;
+ reload | force-reload)
+ # The OVS daemons keep up-to-date.
+ ;;
+ restart)
+ stop
+ start
+ ;;
+ status)
+ ovs_ctl status
+ ;;
+ force-reload-kmod)
+ start force-reload-kmod
+ ;;
+ load-kmod)
+ load_kmod
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|restart|force-reload|status|force-stop|force-reload-kmod|load-kmod}" >&2
+ exit 1
+ ;;
+esac
+
+exit 0
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-switch-setup b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-switch-setup
new file mode 100644
index 0000000..73387fb
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-switch-setup
@@ -0,0 +1,8 @@
+# This is a POSIX shell fragment -*- sh -*-
+
+# FORCE_COREFILES: If 'yes' then core files will be enabled.
+# FORCE_COREFILES=yes
+
+# BRCOMPAT: If 'yes' and the openvswitch-brcompat package is installed, then
+# Linux bridge compatibility will be enabled.
+# BRCOMPAT=yes
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-testcontroller b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-testcontroller
new file mode 100755
index 0000000..aad5ad6
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-testcontroller
@@ -0,0 +1,274 @@
+#!/bin/sh
+#
+# Copyright (c) 2011 Nicira Networks Inc.
+# Copyright (c) 2007, 2009 Javier Fernandez-Sanguino <jfs@debian.org>
+#
+# This is free software; you may redistribute it and/or modify
+# it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2,
+# or (at your option) any later version.
+#
+# This is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License with
+# the Debian operating system, in /usr/share/common-licenses/GPL; if
+# not, write to the Free Software Foundation, Inc., 59 Temple Place,
+# Suite 330, Boston, MA 02111-1307 USA
+#
+### BEGIN INIT INFO
+# Provides: openvswitch-testcontroller
+# Required-Start: $network $local_fs $remote_fs
+# Required-Stop: $remote_fs
+# Should-Start: $named
+# Should-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Open vSwitch controller
+### END INIT INFO
+
+PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
+
+DAEMON=/usr/bin/ovs-testcontroller # Introduce the server's location here
+NAME=ovs-testcontroller # Introduce the short server's name here
+DESC=ovs-testcontroller # Introduce a short description here
+LOGDIR=/var/log/openvswitch # Log directory to use
+
+PIDFILE=/var/run/openvswitch/$NAME.pid
+
+test -x $DAEMON || exit 0
+
+. /lib/lsb/init-functions
+
+# Default options, these can be overriden by the information
+# at /etc/default/openvswitch-testcontroller
+DAEMON_OPTS="" # Additional options given to the server
+
+DODTIME=10 # Time to wait for the server to die, in seconds
+ # If this value is set too low you might not
+ # let some servers to die gracefully and
+ # 'restart' will not work
+
+LOGFILE=$LOGDIR/$NAME.log # Server logfile
+#DAEMONUSER= # User to run the daemons as. If this value
+ # is set start-stop-daemon will chuid the server
+
+# Include defaults if available
+default=/etc/default/openvswitch-testcontroller
+if [ -f $default ] ; then
+ . $default
+fi
+
+# Check that the user exists (if we set a user)
+# Does the user exist?
+if [ -n "$DAEMONUSER" ] ; then
+ if getent passwd | grep -q "^$DAEMONUSER:"; then
+ # Obtain the uid and gid
+ DAEMONUID=`getent passwd |grep "^$DAEMONUSER:" | awk -F : '{print $3}'`
+ DAEMONGID=`getent passwd |grep "^$DAEMONUSER:" | awk -F : '{print $4}'`
+ else
+ log_failure_msg "The user $DAEMONUSER, required to run $NAME does not exist."
+ exit 1
+ fi
+fi
+
+
+set -e
+
+running_pid() {
+# Check if a given process pid's cmdline matches a given name
+ pid=$1
+ name=$2
+ [ -z "$pid" ] && return 1
+ [ ! -d /proc/$pid ] && return 1
+ cmd=`cat /proc/$pid/cmdline | tr "\000" "\n"|head -n 1 |cut -d : -f 1`
+ # Is this the expected server
+ [ "$cmd" != "$name" ] && return 1
+ return 0
+}
+
+running() {
+# Check if the process is running looking at /proc
+# (works for all users)
+
+ # No pidfile, probably no daemon present
+ [ ! -f "$PIDFILE" ] && return 1
+ pid=`cat $PIDFILE`
+ running_pid $pid $DAEMON || return 1
+ return 0
+}
+
+start_server() {
+ if [ -z "$LISTEN" ]; then
+ echo "$default: No connection methods configured, controller disabled" >&2
+ exit 0
+ fi
+
+ if [ ! -d /var/run/openvswitch ]; then
+ install -d -m 755 -o root -g root /var/run/openvswitch
+ fi
+
+ SSL_OPTS=
+ case $LISTEN in
+ *ssl*)
+ : ${PRIVKEY:=/etc/openvswitch-testcontroller/privkey.pem}
+ : ${CERT:=/etc/openvswitch-testcontroller/cert.pem}
+ : ${CACERT:=/etc/openvswitch-testcontroller/cacert.pem}
+ if test ! -e "$PRIVKEY" || test ! -e "$CERT" ||
+ test ! -e "$CACERT"; then
+ if test ! -e "$PRIVKEY"; then
+ echo "$PRIVKEY: private key missing" >&2
+ fi
+ if test ! -e "$CERT"; then
+ echo "$CERT: certificate for private key missing" >&2
+ fi
+ if test ! -e "$CACERT"; then
+ echo "$CACERT: CA certificate missing" >&2
+ fi
+ exit 1
+ fi
+ SSL_OPTS="--private-key=$PRIVKEY --certificate=$CERT --ca-cert=$CACERT"
+ ;;
+ esac
+
+# Start the process using the wrapper
+ if [ -z "$DAEMONUSER" ] ; then
+ start-stop-daemon --start --pidfile $PIDFILE \
+ --exec $DAEMON -- --detach --pidfile=$PIDFILE \
+ $LISTEN $DAEMON_OPTS $SSL_OPTS
+ errcode=$?
+ else
+# if we are using a daemonuser then change the user id
+ start-stop-daemon --start --quiet --pidfile $PIDFILE \
+ --chuid $DAEMONUSER --exec $DAEMON -- \
+ --detach --pidfile=$PIDFILE $LISTEN $DAEMON_OPTS \
+ $SSL_OPTS
+ errcode=$?
+ fi
+ return $errcode
+}
+
+stop_server() {
+# Stop the process using the wrapper
+ if [ -z "$DAEMONUSER" ] ; then
+ start-stop-daemon --stop --quiet --pidfile $PIDFILE \
+ --exec $DAEMON
+ errcode=$?
+ else
+# if we are using a daemonuser then look for process that match
+ start-stop-daemon --stop --quiet --pidfile $PIDFILE \
+ --user $DAEMONUSER --exec $DAEMON
+ errcode=$?
+ fi
+
+ return $errcode
+}
+
+reload_server() {
+ [ ! -f "$PIDFILE" ] && return 1
+ pid=`cat $PIDFILE` # This is the daemon's pid
+ # Send a SIGHUP
+ kill -1 $pid
+ return $?
+}
+
+force_stop() {
+# Force the process to die killing it manually
+ [ ! -e "$PIDFILE" ] && return
+ if running ; then
+ kill -15 $pid
+ # Is it really dead?
+ sleep "$DODTIME"
+ if running ; then
+ kill -9 $pid
+ sleep "$DODTIME"
+ if running ; then
+ echo "Cannot kill $NAME (pid=$pid)!"
+ exit 1
+ fi
+ fi
+ fi
+ rm -f $PIDFILE
+}
+
+
+case "$1" in
+ start)
+ log_begin_msg "Starting $DESC " "$NAME"
+ # Check if it's running first
+ if running ; then
+ log_warning_msg "apparently already running"
+ log_end_msg 0
+ exit 0
+ fi
+ if start_server && running ; then
+ # It's ok, the server started and is running
+ log_end_msg 0
+ else
+ # Either we could not start it or it is not running
+ # after we did
+ # NOTE: Some servers might die some time after they start,
+ # this code does not try to detect this and might give
+ # a false positive (use 'status' for that)
+ log_end_msg 1
+ fi
+ ;;
+ stop)
+ log_begin_msg "Stopping $DESC" "$NAME"
+ if running ; then
+ # Only stop the server if we see it running
+ stop_server
+ log_end_msg $?
+ else
+ # If it's not running don't do anything
+ log_warning_msg "apparently not running"
+ log_end_msg 0
+ exit 0
+ fi
+ ;;
+ force-stop)
+ # First try to stop gracefully the program
+ $0 stop
+ if running; then
+ # If it's still running try to kill it more forcefully
+ log_begin_msg "Stopping (force) $DESC" "$NAME"
+ force_stop
+ log_end_msg $?
+ fi
+ ;;
+ restart|force-reload)
+ log_begin_msg "Restarting $DESC" "$NAME"
+ stop_server
+ # Wait some sensible amount, some server need this
+ [ -n "$DODTIME" ] && sleep $DODTIME
+ start_server
+ running
+ log_end_msg $?
+ ;;
+ status)
+
+ log_begin_msg "Checking status of $DESC" "$NAME"
+ if running ; then
+ log_begin_msg "running"
+ log_end_msg 0
+ else
+ log_warning_msg "apparently not running"
+ log_end_msg 1
+ exit 1
+ fi
+ ;;
+ # Use this if the daemon cannot reload
+ reload)
+ log_warning_msg "Reloading $NAME daemon: not implemented, as the daemon"
+ log_warning_msg "cannot re-read the config file (use restart)."
+ ;;
+ *)
+ N=/etc/init.d/openvswitch-testcontroller
+ echo "Usage: $N {start|stop|force-stop|restart|force-reload|status}" >&2
+ exit 1
+ ;;
+esac
+
+exit 0
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-testcontroller-setup b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-testcontroller-setup
new file mode 100644
index 0000000..b431ece
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/openvswitch-testcontroller-setup
@@ -0,0 +1,29 @@
+# This is a POSIX shell fragment -*- sh -*-
+
+# LISTEN: What OpenFlow connection methods should the controller listen on?
+#
+# This is a space-delimited list of connection methods:
+#
+# * "pssl:[PORT]": Listen for SSL connections on the specified PORT
+# (default: 6633). The private key, certificate, and CA certificate
+# must be specified below.
+#
+# * "pctp:[PORT]": Listen for TCP connections on the specified PORT
+# (default: 6633). Not recommended for security reasons.
+#
+LISTEN="pssl:"
+
+# PRIVKEY: Name of file containing controller's private key.
+# Required if SSL enabled.
+PRIVKEY=/etc/openvswitch-testcontroller/privkey.pem
+
+# CERT: Name of file containing certificate for private key.
+# Required if SSL enabled.
+CERT=/etc/openvswitch-testcontroller/cert.pem
+
+# CACERT: Name of file containing switch CA certificate.
+# Required if SSL enabled.
+CACERT=/etc/openvswitch-testcontroller/cacert.pem
+
+# Additional options to pass to controller, e.g. "--hub"
+DAEMON_OPTS=""
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/files/run-ptest b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/run-ptest
new file mode 100644
index 0000000..dd06707
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/files/run-ptest
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+./tests/testsuite --am-fmt -C tests AUTOTEST_PATH=utilities:vswitchd:ovsdb:tests
+
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/disable_m4_check.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/disable_m4_check.patch
new file mode 100644
index 0000000..1ad5d6f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/disable_m4_check.patch
@@ -0,0 +1,18 @@
+Disable m4 file test where sources are built from git.
+
+Signed-off-by: Amy Fong <amy.fong@windriver.com>
+---
+ Makefile.am | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -185,7 +185,7 @@
+ # Makefile in datapath/linux, needed to get the list of files to
+ # distribute, requires GNU make extensions.
+ if GNU_MAKE
+-ALL_LOCAL += dist-hook-git
++# ALL_LOCAL += dist-hook-git
+ dist-hook-git: distfiles
+ @if test -e $(srcdir)/.git && (git --version) >/dev/null 2>&1; then \
+ (cd datapath && $(MAKE) distfiles); \
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/kernel_module.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/kernel_module.patch
new file mode 100644
index 0000000..033cfb8
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/kernel_module.patch
@@ -0,0 +1,20 @@
+Specify install path for kernel module
+
+Signed-off-by: Amy Fong <amy.fong@windriver.com>
+---
+ datapath/linux/Makefile.main.in | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/datapath/linux/Makefile.main.in
++++ b/datapath/linux/Makefile.main.in
+@@ -71,8 +71,8 @@
+ $(MAKE) -C $(KSRC) M=$(builddir) modules
+
+ modules_install:
+- $(MAKE) -C $(KSRC) M=$(builddir) modules_install
+- depmod `sed -n 's/#define UTS_RELEASE "\([^"]*\)"/\1/p' $(KSRC)/include/generated/utsrelease.h`
++ $(MAKE) -C $(KSRC) M=$(builddir) modules_install INSTALL_MOD_PATH=${INSTALL_MOD_PATH}
++ # depmod `sed -n 's/#define UTS_RELEASE "\([^"]*\)"/\1/p' $(KSRC)/include/generated/utsrelease.h`
+ endif
+
+ # Much of the kernel build system in this file is derived from Intel's
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/openvswitch-add-ptest-8c0b419a0b9ac0141d6973dcc80306dfc6a83d31.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/openvswitch-add-ptest-8c0b419a0b9ac0141d6973dcc80306dfc6a83d31.patch
new file mode 100644
index 0000000..7e74044
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/openvswitch-add-ptest-8c0b419a0b9ac0141d6973dcc80306dfc6a83d31.patch
@@ -0,0 +1,110 @@
+From e20556d7cf0fc8258db77c1f3e0a160cf9fd4514 Mon Sep 17 00:00:00 2001
+From: He Zhe <zhe.he@windriver.com>
+Date: Wed, 30 Mar 2016 06:23:45 -0400
+Subject: [PATCH] openvswitch: Add test-install rule to support ptest execution
+
+Signed-off-by: Radu Patriu <radu.patriu@enea.com>
+
+Fix ptest for v2.5
+ - Copy certain files from srcdir since it has been different from
+ build directory.
+ - Copy more necessary files for new added test cases.
+ - Modify config files for running on target.
+
+Signed-off-by: He Zhe <zhe.he@windriver.com>
+---
+ Makefile.am | 1 +
+ test.mk | 72 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 73 insertions(+)
+ create mode 100644 test.mk
+
+diff --git a/Makefile.am b/Makefile.am
+index ed43c2f..f38d278 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -401,3 +401,4 @@ include datapath-windows/include/automake.mk
+ include windows/automake.mk
+ include ovn/automake.mk
+ include selinux/automake.mk
++include test.mk
+diff --git a/test.mk b/test.mk
+new file mode 100644
+index 0000000..b10a581
+--- /dev/null
++++ b/test.mk
+@@ -0,0 +1,72 @@
++TEST_DEST ?= ${prefix}/lib/openvswitch
++TEST_ROOT ?= ${prefix}/lib/openvswitch
++TEST_DEPEND =
++
++if HAVE_OPENSSL
++TEST_DEPEND += $(TESTPKI_FILES)
++endif
++
++test-install: $(TEST_DEPEND)
++ @list='$(noinst_PROGRAMS) $(EXTRA_DIST) $(dist_check_SCRIPTS) $(TEST_DEPEND) tests/atlocal tests/atconfig' ;\
++ install -d $(TEST_DEST)/tests ;\
++ install $(srcdir)/vtep/ovs-vtep $(TEST_DEST)/tests ;\
++ install -d $(TEST_DEST)/python ;\
++ install -d $(TEST_DEST)/python/ovs ;\
++ install -d $(TEST_DEST)/python/ovs/db ;\
++ install -d $(TEST_DEST)/python/ovs/unixctl ;\
++ install -d $(TEST_DEST)/vswitchd ;\
++ install $(srcdir)/vswitchd/vswitch.ovsschema $(TEST_DEST)/vswitchd ;\
++ install vswitchd/ovs-vswitchd $(TEST_DEST)/vswitchd ;\
++ install -d $(TEST_DEST)/debian ;\
++ install $(srcdir)/debian/ovs-monitor-ipsec $(TEST_DEST)/debian ;\
++ install -d $(TEST_DEST)/build-aux ;\
++ install $(srcdir)/build-aux/check-structs $(TEST_DEST)/build-aux ;\
++ install -d $(TEST_DEST)/xenserver ;\
++ install $(srcdir)/xenserver/usr_share_openvswitch_scripts_ovs-xapi-sync $(TEST_DEST)/xenserver ;\
++ install $(srcdir)/xenserver/opt_xensource_libexec_interface-reconfigure $(TEST_DEST)/xenserver ;\
++ install $(srcdir)/xenserver/opt_xensource_libexec_InterfaceReconfigure.py $(TEST_DEST)/xenserver ;\
++ install $(srcdir)/xenserver/opt_xensource_libexec_InterfaceReconfigureBridge.py $(TEST_DEST)/xenserver ;\
++ install $(srcdir)/xenserver/opt_xensource_libexec_InterfaceReconfigureVswitch.py $(TEST_DEST)/xenserver ;\
++ install -d $(TEST_DEST)/vtep ;\
++ install $(srcdir)/vtep/vtep.ovsschema $(TEST_DEST)/vtep ;\
++ install -d $(TEST_DEST)/ovn ;\
++ install $(srcdir)/ovn/ovn-nb.ovsschema $(TEST_DEST)/ovn ;\
++ install $(srcdir)/ovn/ovn-sb.ovsschema $(TEST_DEST)/ovn ;\
++ install -d $(TEST_DEST)/utilities ;\
++ install $(srcdir)/utilities/ovs-pcap.in $(TEST_DEST)/utilities ;\
++ install $(srcdir)/utilities/ovs-pki.in $(TEST_DEST)/utilities ;\
++ for p in $$list ; do \
++ echo $$p ;\
++ p=$${p#../git/} ;\
++ pre=$${p#tests\/} ;\
++ if test $$pre != $$p ; then \
++ echo installing $$p to $(TEST_DEST)/tests/$$pre ;\
++ if test -f $$p ; then \
++ install $$p $(TEST_DEST)/tests/$$pre ;\
++ else \
++ install $(srcdir)/$$p $(TEST_DEST)/tests/$$pre ;\
++ fi ;\
++ continue ;\
++ fi ;\
++ pre=$${p#python\/ovs\/} ;\
++ if test $$pre != $$p ; then \
++ echo installing $$p to $(TEST_DEST)/python/ovs/$$pre ;\
++ if test -f $$p ; then \
++ install $$p $(TEST_DEST)/python/ovs/$$pre ;\
++ else \
++ install $(srcdir)/$$p $(TEST_DEST)/python/ovs/$$pre ;\
++ fi ;\
++ continue ;\
++ fi; \
++ done ;\
++ sed -i 's|abs_builddir=.*|abs_builddir='"'"'$(TEST_ROOT)/tests'"'"'|g' $(TEST_DEST)/tests/atconfig
++ sed -i 's|abs_srcdir=.*|abs_srcdir='"'"'$(TEST_ROOT)/tests'"'"'|g' $(TEST_DEST)/tests/atconfig
++ sed -i 's|abs_top_srcdir=.*|abs_top_srcdir='"'"'$(TEST_ROOT)'"'"'|g' $(TEST_DEST)/tests/atconfig
++ sed -i 's|abs_top_builddir=.*|abs_top_builddir='"'"'$(TEST_ROOT)'"'"'|g' $(TEST_DEST)/tests/atconfig
++ sed -i 's|at_srcdir=.*|at_srcdir='"'"'.'"'"'|g' $(TEST_DEST)/tests/atconfig
++ sed -i 's|at_top_srcdir=.*|at_top_srcdir='"'"'..'"'"'|g' $(TEST_DEST)/tests/atconfig
++ sed -i 's|at_top_build_prefix=.*|at_top_build_prefix='"'"'../'"'"'|g' $(TEST_DEST)/tests/atconfig
++ sed -i 's|^\(.*config\.log.*\)|#\1|g' $(TEST_DEST)/tests/testsuite
++ sed -i 's|$$srcdir|$$abs_srcdir|g' $(TEST_DEST)/tests/testsuite
++ sed -i 's|ovs-appctl-bashcomp\.bash|/etc/bash_completion.d/ovs-appctl-bashcomp\.bash|g' $(TEST_DEST)/tests/testsuite
++ sed -i 's|ovs-vsctl-bashcomp\.bash|/etc/bash_completion.d/ovs-vsctl-bashcomp\.bash|g' $(TEST_DEST)/tests/testsuite
+--
+1.9.1
+
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/openvswitch-ptest-Fix-python-path.patch b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/openvswitch-ptest-Fix-python-path.patch
new file mode 100644
index 0000000..c75ebc5
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch-git/openvswitch-ptest-Fix-python-path.patch
@@ -0,0 +1,30 @@
+From 2a78bddb865e0d3377f437428ed6825195af0a14 Mon Sep 17 00:00:00 2001
+From: He Zhe <zhe.he@windriver.com>
+Date: Wed, 13 Apr 2016 03:13:01 -0400
+Subject: [PATCH] openvswitch: ptest: Fix python path
+
+For now PYTHON just happens to be the same as TARGET_PYTHON so we get the right
+python binary. This patch sets PYTHON to TARGET_PYTHON since ptest is going to
+be run on target.
+
+Signed-off-by: He Zhe <zhe.he@windriver.com>
+---
+ tests/atlocal.in | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tests/atlocal.in b/tests/atlocal.in
+index 5815c6c..db2d2c9 100644
+--- a/tests/atlocal.in
++++ b/tests/atlocal.in
+@@ -5,7 +5,7 @@ EGREP='@EGREP@'
+ PERL='@PERL@'
+
+ if test x"$PYTHON" = x; then
+- PYTHON='@PYTHON@'
++ PYTHON='@TARGET_PYTHON@'
+ fi
+
+ PYTHONPATH=$abs_top_srcdir/python:$abs_top_builddir/tests:$PYTHONPATH
+--
+1.9.1
+
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch.inc b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch.inc
new file mode 100644
index 0000000..fc515e9
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch.inc
@@ -0,0 +1,141 @@
+SUMMARY = "OpenvSwitch"
+DESCRIPTION = "\
+ Open vSwitch is a production quality, multilayer virtual switch \
+ licensed under the open source Apache 2.0 license. It is designed \
+ to enable massive network automation through programmatic extension, \
+ while still supporting standard management interfaces and protocols \
+ (e.g. NetFlow, sFlow, SPAN, RSPAN, CLI, LACP, 802.1ag) \
+ "
+
+HOMEPAGE = "http://openvswitch.org/"
+SECTION = "networking"
+LICENSE = "Apache-2"
+
+DEPENDS += "bridge-utils openssl python perl"
+
+RDEPENDS_${PN} += "util-linux-uuidgen util-linux-libuuid coreutils \
+ python perl perl-module-strict ${PN}-switch \
+ bash"
+RDEPENDS_${PN}-testcontroller = "${PN} lsb ${PN}-pki"
+RDEPENDS_${PN}-switch = "${PN} openssl procps util-linux-uuidgen"
+RDEPENDS_${PN}-pki = "${PN}"
+RDEPENDS_${PN}-brcompat = "${PN} ${PN}-switch"
+
+# Some installers will fail because of an install order based on
+# rdeps. E.g. ovs-pki calls sed in the postinstall. sed may be
+# queued for install later.
+RDEPENDS_${PN} += "sed gawk grep"
+
+SRC_URI = "\
+ file://openvswitch-switch \
+ file://openvswitch-switch-setup \
+ file://openvswitch-testcontroller \
+ file://openvswitch-testcontroller-setup \
+ file://openvswitch-add-target-python-handling.patch \
+ file://openvswitch-add-target-perl-handling.patch \
+ "
+
+EXTRA_OECONF += "\
+ TARGET_PYTHON=${bindir}/python \
+ TARGET_PERL=${bindir}/perl \
+ "
+CONFIGUREOPT_DEPTRACK = ""
+
+# Don't compile kernel modules by default since it heavily depends on
+# kernel version. Use the in-kernel module for now.
+# distro layers can enable with EXTRA_OECONF_pn_openvswitch += ""
+# EXTRA_OECONF = "--with-linux=${STAGING_KERNEL_DIR} KARCH=${TARGET_ARCH}"
+
+ALLOW_EMPTY_${PN}-pki = "1"
+PACKAGES =+ "${PN}-testcontroller ${PN}-switch ${PN}-brcompat ${PN}-pki"
+
+FILES_${PN}-testcontroller = "\
+ ${sysconfdir}/init.d/openvswitch-testcontroller \
+ ${sysconfdir}/default/openvswitch-testcontroller \
+ ${sysconfdir}/openvswitch-testcontroller \
+ ${bindir}/ovs-testcontroller \
+ "
+
+FILES_${PN}-brcompat = "${sbindir}/ovs-brcompatd"
+
+FILES_${PN}-switch = "\
+ ${sysconfdir}/init.d/openvswitch-switch \
+ ${sysconfdir}/default/openvswitch-switch \
+ ${systemd_unitdir}/system/openvswitch.service \
+ ${systemd_unitdir}/system/openvswitch-nonetwork.service \
+ "
+
+# silence a warning
+FILES_${PN} += "${datadir}/ovsdbmonitor"
+FILES_${PN} += "/run"
+
+inherit autotools update-rc.d systemd
+
+SYSTEMD_PACKAGES = "${PN}-switch"
+SYSTEMD_SERVICE_${PN}-switch = " \
+ openvswitch-nonetwork.service \
+ openvswitch-switch.service \
+"
+
+INITSCRIPT_PACKAGES = "${PN}-switch ${PN}-testcontroller"
+INITSCRIPT_NAME_${PN}-switch = "openvswitch-switch"
+INITSCRIPT_PARAMS_${PN}-switch = "defaults 71"
+
+INITSCRIPT_NAME_${PN}-testcontroller = "openvswitch-testcontroller"
+INITSCRIPT_PARAMS_${PN}-testcontroller = "defaults 72"
+
+do_install_append() {
+ install -d ${D}/${sysconfdir}/default/
+ install -m 660 ${WORKDIR}/openvswitch-switch-setup ${D}/${sysconfdir}/default/openvswitch-switch
+ install -d ${D}/${sysconfdir}/openvswitch-testcontroller
+ install -m 660 ${WORKDIR}/openvswitch-testcontroller-setup ${D}/${sysconfdir}/default/openvswitch-testcontroller
+
+ install -d ${D}/${sysconfdir}/init.d/
+ install -m 755 ${WORKDIR}/openvswitch-testcontroller ${D}/${sysconfdir}/init.d/openvswitch-testcontroller
+ install -m 755 ${WORKDIR}/openvswitch-switch ${D}/${sysconfdir}/init.d/openvswitch-switch
+ true || rm -fr ${D}/${datadir}/${PN}/pki
+
+ install -d ${D}/${systemd_unitdir}/system/
+ install -m 644 ${S}/rhel/usr_lib_systemd_system_openvswitch.service \
+ ${D}/${systemd_unitdir}/system/openvswitch-switch.service
+ install -m 644 ${S}/rhel/usr_lib_systemd_system_openvswitch-nonetwork.service \
+ ${D}/${systemd_unitdir}/system/openvswitch-nonetwork.service
+
+ oe_runmake modules_install INSTALL_MOD_PATH=${D}
+}
+
+pkg_postinst_${PN}-pki () {
+ # can't do this offline
+ if [ "x$D" != "x" ]; then
+ exit 1
+ fi
+ if test ! -d $D/${datadir}/${PN}/pki; then
+ ovs-pki init --dir=$D/${datadir}/${PN}/pki
+ fi
+}
+
+pkg_postinst_${PN}-testcontroller () {
+ # can't do this offline
+ if [ "x$D" != "x" ]; then
+ exit 1
+ fi
+
+ if test ! -d $D/${datadir}/${PN}/pki; then
+ ovs-pki init --dir=$D/${datadir}/${PN}/pki
+ fi
+
+ cd $D/${sysconfdir}/openvswitch-testcontroller
+ if ! test -e cacert.pem; then
+ ln -s $D/${datadir}/${PN}/pki/switchca/cacert.pem cacert.pem
+ fi
+ if ! test -e privkey.pem || ! test -e cert.pem; then
+ oldumask=$(umask)
+ umask 077
+ ovs-pki req+sign --dir=$D/${datadir}/${PN}/pki tmp controller >/dev/null
+ mv tmp-privkey.pem privkey.pem
+ mv tmp-cert.pem cert.pem
+ mv tmp-req.pem req.pem
+ chmod go+r cert.pem req.pem
+ umask $oldumask
+ fi
+}
diff --git a/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch_git.bb b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch_git.bb
new file mode 100644
index 0000000..b46d5af
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-networking/openvswitch/openvswitch_git.bb
@@ -0,0 +1,53 @@
+require openvswitch.inc
+
+DEPENDS += "virtual/kernel"
+
+RDEPENDS_${PN}-ptest += "\
+ python-logging python-syslog python-argparse python-io \
+ python-fcntl python-shell python-lang python-xml python-math \
+ python-datetime python-netclient python sed \
+ ldd perl-module-socket perl-module-carp perl-module-exporter \
+ perl-module-xsloader python-netserver python-threading \
+ python-resource python-subprocess \
+ "
+
+S = "${WORKDIR}/git"
+PV = "2.5.0+${SRCREV}"
+
+FILESEXTRAPATHS_append := "${THISDIR}/${PN}-git:"
+
+SRCREV = "8c0b419a0b9ac0141d6973dcc80306dfc6a83d31"
+SRC_URI += "\
+ git://github.com/openvswitch/ovs.git;protocol=git;branch=branch-2.5 \
+ file://openvswitch-add-more-target-python-substitutions.patch \
+ file://openvswitch-add-ptest-${SRCREV}.patch \
+ file://run-ptest \
+ file://disable_m4_check.patch \
+ file://kernel_module.patch \
+ file://openvswitch-ptest-Fix-python-path.patch \
+ "
+
+LIC_FILES_CHKSUM = "file://COPYING;md5=247d8817aece26b21a8cd6791b3ea994"
+
+PACKAGECONFIG ?= ""
+PACKAGECONFIG[dpdk] = "--with-dpdk=${STAGING_DIR_TARGET}/opt/dpdk/${TARGET_ARCH}-native-linuxapp-gcc,,dpdk,"
+
+# Don't compile kernel modules by default since it heavily depends on
+# kernel version. Use the in-kernel module for now.
+# distro layers can enable with EXTRA_OECONF_pn_openvswitch += ""
+# EXTRA_OECONF += "--with-linux=${STAGING_KERNEL_BUILDDIR} --with-linux-source=${STAGING_KERNEL_DIR} KARCH=${TARGET_ARCH}"
+
+# silence a warning
+FILES_${PN} += "/lib/modules"
+
+inherit ptest
+
+EXTRA_OEMAKE += "TEST_DEST=${D}${PTEST_PATH} TEST_ROOT=${PTEST_PATH}"
+
+do_install_ptest() {
+ oe_runmake test-install
+}
+
+do_install_append() {
+ oe_runmake modules_install INSTALL_MOD_PATH=${D}
+}
diff --git a/import-layers/meta-virtualization/recipes-support/dnsmasq/dnsmasq_2.%.bbappend b/import-layers/meta-virtualization/recipes-support/dnsmasq/dnsmasq_2.%.bbappend
new file mode 100644
index 0000000..8d66ca6
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-support/dnsmasq/dnsmasq_2.%.bbappend
@@ -0,0 +1,5 @@
+# dnsmasq is greedy with interfaces by default using bind-dynamic will
+# make it less greedy but still function as it did by default.
+do_install_append() {
+ sed -i '/#bind-interfaces/a # Play nice with libvirt\nbind-dynamic' ${D}${sysconfdir}/dnsmasq.conf
+}