Squashed 'import-layers/meta-virtualization/' content from commit c4a1711

Change-Id: I42132e4f0aef12ec265e74d95f489a6409e22f46
git-subtree-dir: import-layers/meta-virtualization
git-subtree-split: c4a1711dd31659b027c70c07e4ef6da98591ac95
Signed-off-by: Patrick Williams <patrick@stwcx.xyz>
diff --git a/import-layers/meta-virtualization/recipes-containers/cgroup-lite/cgroup-lite_1.1.bb b/import-layers/meta-virtualization/recipes-containers/cgroup-lite/cgroup-lite_1.1.bb
new file mode 100644
index 0000000..3ca5238
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/cgroup-lite/cgroup-lite_1.1.bb
@@ -0,0 +1,22 @@
+SECTION = "devel"
+SUMMARY = "Light-weight package to set up cgroups at system boot."
+DESCRIPTION =  "Light-weight package to set up cgroups at system boot."
+HOMEPAGE = "http://packages.ubuntu.com/source/precise/cgroup-lite"
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://debian/copyright;md5=5d5da4e0867cf06014f87102154d0102"
+SRC_URI = "http://archive.ubuntu.com/ubuntu/pool/main/c/cgroup-lite/cgroup-lite_1.1.tar.gz"
+SRC_URI += "file://cgroups-init"
+SRC_URI[md5sum] = "041a0d8ad2b192271a2e5507fdb6809f"
+SRC_URI[sha256sum] = "e7f9992b90b5b4634f3b8fb42580ff28ff31093edb297ab872c37f61a94586bc"
+
+inherit update-rc.d
+
+INITSCRIPT_NAME = "cgroups-init"
+INITSCRIPT_PARAMS = "start 8 2 3 4 5 . stop 20 0 1 6 ."
+do_install() {
+	install -d ${D}/bin
+	install -d ${D}${sysconfdir}/init.d
+	install -m 0755 ${S}/scripts/cgroups-mount ${D}/bin
+	install -m 0755 ${S}/scripts/cgroups-umount ${D}/bin
+	install -m 0755 ${WORKDIR}/cgroups-init ${D}${sysconfdir}/init.d/cgroups-init
+}
diff --git a/import-layers/meta-virtualization/recipes-containers/cgroup-lite/files/cgroups-init b/import-layers/meta-virtualization/recipes-containers/cgroup-lite/files/cgroups-init
new file mode 100755
index 0000000..e504024
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/cgroup-lite/files/cgroups-init
@@ -0,0 +1,27 @@
+#!/bin/sh
+### BEGIN INIT INFO
+# Provides:          cgroups mount
+# Required-Start:    $network $remote_fs
+# Required-Stop:     $network $remote_fs
+# Default-Start:     2 3 4 5
+# Default-Stop:      0 1 6
+# Short-Description: mount/unmount cgroups
+### END INIT INFO
+
+# must start before libvirtd is run
+case "$1" in
+  start)
+        echo -n "Mounting cgroups..."
+		/bin/cgroups-mount
+        echo "Done"
+        ;;
+  stop)
+        echo -n "Unmounting cgroups..."
+		/bin/cgroups-umount
+        echo "Done"
+        ;;
+  *)
+        echo "Usage: /etc/init.d/cgroups-init {start|stop}"
+        exit 1
+        ;;
+esac
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/criu_git.bb b/import-layers/meta-virtualization/recipes-containers/criu/criu_git.bb
new file mode 100644
index 0000000..48bcdc2
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/criu_git.bb
@@ -0,0 +1,71 @@
+SUMMARY = "CRIU"
+DESCRIPTION = "Checkpoint/Restore In Userspace, or CRIU, is a software tool for \
+Linux operating system. Using this tool, you can freeze a running application \
+(or part of it) and checkpoint it to a hard drive as a collection of files. \
+You can then use the files to restore and run the application from the point \
+it was frozen at. The distinctive feature of the CRIU project is that it is \
+mainly implemented in user space"
+HOMEPAGE = "http://criu.org"
+SECTION = "console/tools"
+LICENSE = "GPLv2"
+
+EXCLUDE_FROM_WORLD = "1"
+
+LIC_FILES_CHKSUM = "file://COPYING;md5=5cc804625b8b491b6b4312f0c9cb5efa"
+
+SRCREV = "4c5b23e52c1dc4e3fbbc7472b92e7b1ce9d22f02"
+PR = "r0"
+PV = "1.6+git${SRCPV}"
+
+SRC_URI = "git://github.com/xemul/criu.git;protocol=git \
+	   file://0001-criu-Fix-toolchain-hardcode.patch \
+	   file://0002-criu-Skip-documentation-install.patch \
+       file://0001-criu-Change-libraries-install-directory.patch \
+	  "
+
+COMPATIBLE_HOST = "(x86_64|arm|aarch64).*-linux"
+
+DEPENDS += "protobuf-c-native protobuf-c"
+
+S = "${WORKDIR}/git"
+
+#
+# CRIU just can be built on ARMv7 and ARMv6, so the Makefile check
+# if the ARCH is ARMv7 or ARMv6.
+# ARM BSPs need set CRIU_BUILD_ARCH variable for building CRIU.
+#
+EXTRA_OEMAKE_arm += "ARCH=${CRIU_BUILD_ARCH} WERROR=0"
+EXTRA_OEMAKE_x86-64 += "ARCH=${TARGET_ARCH} WERROR=0"
+EXTRA_OEMAKE_aarch64 += "ARCH=${TARGET_ARCH} WERROR=0"
+
+EXTRA_OEMAKE_append += "SBINDIR=${sbindir} LIBDIR=${libdir} INCLUDEDIR=${includedir} PIEGEN=no"
+EXTRA_OEMAKE_append += "LOGROTATEDIR=${sysconfdir} SYSTEMDUNITDIR=${systemd_unitdir}"
+
+CFLAGS += "-D__USE_GNU -D_GNU_SOURCE"
+
+# overide LDFLAGS to allow criu to build without: "x86_64-poky-linux-ld: unrecognized option '-Wl,-O1'"
+export LDFLAGS=""
+
+export BUILD_SYS
+export HOST_SYS
+
+inherit setuptools
+
+do_compile_prepend() {
+    rm -rf ${S}/protobuf/google/protobuf/descriptor.proto
+    ln -s  ${PKG_CONFIG_SYSROOT_DIR}/usr/include/google/protobuf/descriptor.proto ${S}/protobuf/google/protobuf/descriptor.proto
+}
+
+do_compile () {
+	oe_runmake
+}
+
+do_install () {
+    oe_runmake PREFIX=${exec_prefix} LIBDIR=${libdir} DESTDIR="${D}" install
+}
+
+FILES_${PN} += "${systemd_unitdir}/ \
+            ${libdir}/python2.7/site-packages/ \
+            ${libdir}/pycriu/ \
+            ${libdir}/crit-0.0.1-py2.7.egg-info \
+            "
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch
new file mode 100644
index 0000000..28d638b
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Change-libraries-install-directory.patch
@@ -0,0 +1,48 @@
+From cb9933dc34af0b4d52c4584332600114ac65c402 Mon Sep 17 00:00:00 2001
+From: Jianchuan Wang <jianchuan.wang@windriver.com>
+Date: Tue, 4 Aug 2015 17:45:51 +0800
+Subject: [PATCH] criu: Change libraries install directory
+
+Install the libraries into /usr/lib(/usr/lib64)
+
+Signed-off-by: Jianchuan Wang <jianchuan.wang@windriver.com>
+---
+ Makefile     | 2 +-
+ Makefile.inc | 9 ---------
+ 2 files changed, 1 insertion(+), 10 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index 7f5c890..6dbc436 100644
+--- a/Makefile
++++ b/Makefile
+@@ -351,7 +351,7 @@ install-man:
+ 
+ install-crit: crit
+ 	$(E) "  INSTALL crit"
+-	$(Q) python scripts/crit-setup.py install --root=$(DESTDIR) --prefix=$(PREFIX)
++	$(Q) python scripts/crit-setup.py install --root=$(DESTDIR) --prefix=$(PREFIX) --install-lib=$(LIBDIR)
+ 
+ .PHONY: install install-man install-crit install-criu
+ 
+diff --git a/Makefile.inc b/Makefile.inc
+index 5496f41..ba70aea 100644
+--- a/Makefile.inc
++++ b/Makefile.inc
+@@ -17,14 +17,5 @@ MANDIR		:= $(PREFIX)/share/man
+ SYSTEMDUNITDIR	:= $(PREFIX)/lib/systemd/system/
+ LOGROTATEDIR	:= $(PREFIX)/etc/logrotate.d/
+ LIBDIR		:= $(PREFIX)/lib
+-# For recent Debian/Ubuntu with multiarch support
+-DEB_HOST_MULTIARCH ?= $(shell dpkg-architecture \
+-			-qDEB_HOST_MULTIARCH 2>/dev/null)
+-ifneq "$(DEB_HOST_MULTIARCH)" ""
+-LIBDIR		:= $(PREFIX)/lib/$(DEB_HOST_MULTIARCH)
+-# For most other systems
+-else ifeq "$(shell uname -m)" "x86_64"
+-LIBDIR		:= $(PREFIX)/lib64
+-endif
+ 
+ INCLUDEDIR	:= $(PREFIX)/include/criu
+-- 
+1.9.1
+
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Fix-toolchain-hardcode.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Fix-toolchain-hardcode.patch
new file mode 100644
index 0000000..2fabe0a
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-criu-Fix-toolchain-hardcode.patch
@@ -0,0 +1,46 @@
+From 3d4f112fdb434712eba09239a468842323f1af4c Mon Sep 17 00:00:00 2001
+From: Yang Shi <yang.shi@windriver.com>
+Date: Tue, 26 Aug 2014 14:42:42 -0700
+Subject: [PATCH 1/2] criu: Fix toolchain hardcode
+
+Replace ":=" to "?=" so that the toolchain used by bitbake build system will
+be taken.
+
+Signed-off-by: Yang Shi <yang.shi@windriver.com>
+Signed-off-by: Nam Ninh <nam.ninh@windriver.com>
+---
+ Makefile | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index f1c8784..43252ec 100644
+--- a/Makefile
++++ b/Makefile
+@@ -23,15 +23,15 @@ export VERSION_SO_MAJOR VERSION_SO_MINOR
+ # Common definitions
+ #
+ 
+-FIND		:= find
+-CSCOPE		:= cscope
+-RM		:= rm -f
+-LD		:= $(CROSS_COMPILE)ld
+-CC		:= $(CROSS_COMPILE)gcc
+-NM		:= $(CROSS_COMPILE)nm
+-SH		:= bash
+-MAKE		:= make
+-OBJCOPY		:= $(CROSS_COMPILE)objcopy
++FIND		?= find
++CSCOPE		?= cscope
++RM		?= rm -f
++LD		?= $(CROSS_COMPILE)ld
++CC		?= $(CROSS_COMPILE)gcc
++NM		?= $(CROSS_COMPILE)nm
++SH		?= bash
++MAKE		?= make
++OBJCOPY		?= $(CROSS_COMPILE)objcopy
+ 
+ CFLAGS		+= $(USERCFLAGS)
+ 
+-- 
+2.0.2
+
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/0001-protobuf-c-Remove-the-rules-which-depend-on-the-nati.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-protobuf-c-Remove-the-rules-which-depend-on-the-nati.patch
new file mode 100644
index 0000000..b6fbf01
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/0001-protobuf-c-Remove-the-rules-which-depend-on-the-nati.patch
@@ -0,0 +1,46 @@
+From 81bc5928cdc1b432656eb6590967306d8cf3ac9d Mon Sep 17 00:00:00 2001
+From: Jianchuan Wang <jianchuan.wang@windriver.com>
+Date: Tue, 4 Aug 2015 10:22:21 +0800
+Subject: [PATCH] protobuf-c: Remove the rules which depend on the native
+ command
+
+Those rules are not for cross-compile since the command protoc-c/cxx-generate-packed-data
+need be executed to generate some local files in the compiling processing.
+
+Signed-off-by: Jianchuan Wang <jianchuan.wang@windriver.com>
+---
+ Makefile.am | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/Makefile.am b/Makefile.am
+index 310aa09..0602e96 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -148,17 +148,18 @@ t_generated_code2_cxx_generate_packed_data_CXXFLAGS = \
+ t_generated_code2_cxx_generate_packed_data_LDADD = \
+ 	$(protobuf_LIBS)
+ 
+-t/test.pb-c.c t/test.pb-c.h: $(top_builddir)/protoc-c/protoc-c$(EXEEXT) $(top_srcdir)/t/test.proto
+-	$(AM_V_GEN)$(top_builddir)/protoc-c/protoc-c$(EXEEXT) -I$(top_srcdir) --c_out=$(top_builddir) $(top_srcdir)/t/test.proto
++t/test.pb-c.c t/test.pb-c.h: $(top_srcdir)/t/test.proto
++	$(AM_V_GEN)protoc-c -I$(top_srcdir) --c_out=$(top_builddir) $(top_srcdir)/t/test.proto
+ 
+-t/test-full.pb-c.c t/test-full.pb-c.h: $(top_builddir)/protoc-c/protoc-c$(EXEEXT) $(top_srcdir)/t/test-full.proto
+-	$(AM_V_GEN)$(top_builddir)/protoc-c/protoc-c$(EXEEXT) -I$(top_srcdir) --c_out=$(top_builddir) $(top_srcdir)/t/test-full.proto
++t/test-full.pb-c.c t/test-full.pb-c.h: $(top_srcdir)/t/test-full.proto
++	$(AM_V_GEN)protoc-c -I$(top_srcdir) --c_out=$(top_builddir) $(top_srcdir)/t/test-full.proto
+ 
+ t/test-full.pb.cc t/test-full.pb.h: @PROTOC@ $(top_srcdir)/t/test-full.proto
+ 	$(AM_V_GEN)@PROTOC@ -I$(top_srcdir) --cpp_out=$(top_builddir) $(top_srcdir)/t/test-full.proto
+ 
+-t/generated-code2/test-full-cxx-output.inc: t/generated-code2/cxx-generate-packed-data$(EXEEXT)
+-	$(AM_V_GEN)$(top_builddir)/t/generated-code2/cxx-generate-packed-data$(EXEEXT) > $(top_builddir)/t/generated-code2/test-full-cxx-output.inc
++t/generated-code2/test-full-cxx-output.inc: 
++	mkdir -p $(top_builddir)/t/generated-code2
++	$(AM_V_GEN)cxx-generate-packed-data > $(top_builddir)/t/generated-code2/test-full-cxx-output.inc
+ 
+ BUILT_SOURCES += \
+ 	t/test.pb-c.c t/test.pb-c.h \
+-- 
+1.9.1
+
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/0002-criu-Skip-documentation-install.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/0002-criu-Skip-documentation-install.patch
new file mode 100644
index 0000000..eaf8160
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/0002-criu-Skip-documentation-install.patch
@@ -0,0 +1,29 @@
+From e9c2a94b9eb37ad24672b10caa398bd18282b962 Mon Sep 17 00:00:00 2001
+From: Yang Shi <yang.shi@windriver.com>
+Date: Tue, 26 Aug 2014 14:44:51 -0700
+Subject: [PATCH 2/2] criu: Skip documentation install
+
+asciidoc is needed to generate CRIU documentation, so skip it in install.
+
+Signed-off-by: Yang Shi <yang.shi@windriver.com>
+Signed-off-by: Nam Ninh <nam.ninh@windriver.com>
+---
+ Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Makefile b/Makefile
+index 43252ec..e25edcc 100644
+--- a/Makefile
++++ b/Makefile
+@@ -265,7 +265,7 @@ install: $(PROGRAM) install-man
+ 	$(Q) install -m 644 scripts/logrotate.d/criu-service $(DESTDIR)$(LOGROTATEDIR)
+ 
+ install-man:
+-	$(Q) $(MAKE) -C Documentation install
++#	$(Q) $(MAKE) -C Documentation install
+ 
+ .PHONY: install install-man
+ 
+-- 
+2.0.2
+
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/Omit-google-apputils-dependency.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/Omit-google-apputils-dependency.patch
new file mode 100644
index 0000000..ef60fc0
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/Omit-google-apputils-dependency.patch
@@ -0,0 +1,25 @@
+From f8b7c90f6da90b67bdd7d5301894c5c28bd9d076 Mon Sep 17 00:00:00 2001
+From: Jianchuan Wang <jianchuan.wang@windriver.com>
+Date: Mon, 10 Aug 2015 11:23:31 +0800
+Subject: [PATCH] Omit google-apputils dependency
+
+Signed-off-by: Jianchuan Wang <jianchuan.wang@windriver.com>
+---
+ python/setup.py | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/python/setup.py b/python/setup.py
+index 2450a77..6f6bffb 100755
+--- a/python/setup.py
++++ b/python/setup.py
+@@ -189,7 +189,6 @@ if __name__ == '__main__':
+           'google.protobuf.text_format'],
+         cmdclass = { 'clean': clean, 'build_py': build_py },
+         install_requires = ['setuptools'],
+-        setup_requires = ['google-apputils'],
+         ext_modules = ext_module_list,
+         url = 'https://developers.google.com/protocol-buffers/',
+         maintainer = maintainer_email,
+-- 
+1.9.1
+
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/disable_tests.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/disable_tests.patch
new file mode 100644
index 0000000..dac8942
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/disable_tests.patch
@@ -0,0 +1,19 @@
+diff -Naur protobuf-c-0.15.old/src/Makefile.am protobuf-c-0.15/src/Makefile.am
+--- protobuf-c-0.15.old/src/Makefile.am	2012-11-28 14:59:57.845251943 +0100
++++ protobuf-c-0.15/src/Makefile.am	2012-11-28 15:00:23.549252632 +0100
+@@ -1,5 +1,5 @@
+ if BUILD_PROTOC_C
+-SUBDIRS = . test
++
+ bin_PROGRAMS = protoc-c
+ protoc_c_SOURCES = \
+ google/protobuf/compiler/c/c_service.cc \
+@@ -23,7 +23,7 @@
+ lib_LTLIBRARIES = libprotobuf-c.la
+ protobufcincludedir = $(includedir)/google/protobuf-c
+ 
+-EXTRA_DIST = CMakeLists.txt test/CMakeLists.txt
++EXTRA_DIST = CMakeLists.txt 
+ 
+ libprotobuf_c_la_SOURCES = \
+ google/protobuf-c/protobuf-c-dispatch.c \
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/protobuf-allow-running-python-scripts-from-anywhere.patch b/import-layers/meta-virtualization/recipes-containers/criu/files/protobuf-allow-running-python-scripts-from-anywhere.patch
new file mode 100644
index 0000000..13d4e84
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/protobuf-allow-running-python-scripts-from-anywhere.patch
@@ -0,0 +1,38 @@
+From 46e331263eb92e47510e88478b255f226d30245c Mon Sep 17 00:00:00 2001
+From: Keith Holman <Keith.Holman@windriver.com>
+Date: Mon, 18 Aug 2014 15:19:35 -0400
+Subject: [PATCH] protobuf: allow running python scripts from anywhere
+
+The Makefile to generate the examples with Google Protocol Buffers
+generates some scripts for python.  However, these generated scripts
+only work if they are ran in the same directory as the source files.
+This fix generates scripts to execute from anywhere on the system.
+
+Signed-off-by: Keith Holman <Keith.Holman@windriver.com>
+---
+ examples/Makefile | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/examples/Makefile b/examples/Makefile
+index 8dc9083..a993d63 100644
+--- a/examples/Makefile
++++ b/examples/Makefile
+@@ -48,11 +48,13 @@ list_people_java: javac_middleman
+ add_person_python: add_person.py protoc_middleman
+ 	@echo "Writing shortcut script add_person_python..."
+ 	@echo '#! /bin/sh' > add_person_python
+-	@echo './add_person.py "$$@"' >> add_person_python
++	@echo 'SCRIPT_DIR=$$(dirname $$0)' >> add_person_python
++	@echo '$$SCRIPT_DIR/add_person.py "$$@"' >> add_person_python
+ 	@chmod +x add_person_python
+ 
+ list_people_python: list_people.py protoc_middleman
+ 	@echo "Writing shortcut script list_people_python..."
+ 	@echo '#! /bin/sh' > list_people_python
+-	@echo './list_people.py "$$@"' >> list_people_python
++	@echo 'SCRIPT_DIR=$$(dirname $$0)' >> list_people_python
++	@echo '$$SCRIPT_DIR/list_people.py "$$@"' >> list_people_python
+ 	@chmod +x list_people_python
+-- 
+1.9.3
+
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/files/run-ptest b/import-layers/meta-virtualization/recipes-containers/criu/files/run-ptest
new file mode 100755
index 0000000..a5a7b0f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/files/run-ptest
@@ -0,0 +1,32 @@
+#!/bin/bash
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+TEST_FILE="/tmp/test.data"
+
+RETVAL=0
+# Test every writing test application
+for write_exe_full_path in ${DIR}/add_person_*; do
+	if [ -x "${write_exe_full_path}" ]; then
+		write_exe=`basename ${write_exe_full_path}`
+		echo "Generating new test file using ${write_exe}..."
+		${write_exe_full_path} "${TEST_FILE}"
+		RETVAL=$?
+
+		# Test every reading test application
+		for read_exe_full_path in ${DIR}/list_people_*; do
+			read_exe=`basename ${read_exe_full_path}`
+			echo "Test: Write with ${write_exe}; Read with ${read_exe}..."
+			if [ -x "${read_exe_full_path}" ]; then
+				${read_exe_full_path} "${TEST_FILE}"
+				RETVAL=$?
+			fi
+		done
+
+		# Cleanup...
+		if [ -e "${TEST_FILE}" ]; then
+			rm "${TEST_FILE}"
+		fi
+	fi
+done
+
+exit $RETVAL
+
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/protobuf-c_1.1.1.bb b/import-layers/meta-virtualization/recipes-containers/criu/protobuf-c_1.1.1.bb
new file mode 100644
index 0000000..0d03ebe
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/protobuf-c_1.1.1.bb
@@ -0,0 +1,28 @@
+SUMMARY = "protobuf-c"
+DESCRIPTION = "This package provides a code generator and runtime libraries to use Protocol Buffers from pure C"
+HOMEPAGE = "http://code.google.com/p/protobuf-c/"
+SECTION = "console/tools"
+LICENSE = "Apache-2.0"
+
+LIC_FILES_CHKSUM = "file://protobuf-c/protobuf-c.c;endline=28;md5=0feb44cc63eacef97219b0174967492f"
+
+COMPATIBLE_HOST = "(x86_64|arm|aarch64).*-linux"
+
+DEPENDS = "protobuf protobuf-c-native"
+
+SRC_URI[md5sum] = "41d437677ea16f9d3611d98841c4af3b"
+SRC_URI[sha256sum] = "09c5bb187b7a8e86bc0ff860f7df86370be9e8661cdb99c1072dcdab0763562c"
+SRC_URI = "https://github.com/protobuf-c/protobuf-c/releases/download/v1.1.1/protobuf-c-1.1.1.tar.gz "
+SRC_URI_append_class-target ="file://0001-protobuf-c-Remove-the-rules-which-depend-on-the-nati.patch"
+
+inherit autotools pkgconfig
+
+BBCLASSEXTEND = "native nativesdk"
+
+do_configure_prepend_class-target() {
+    export PKG_CONFIG_PATH="${STAGING_LIBDIR_NATIVE}/pkgconfig:${PKG_CONFIG_PATH}"
+}
+
+do_install_append_class-native() {
+    install -m 755 ${B}/t/generated-code2/cxx-generate-packed-data ${D}/${bindir}
+}
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/protobuf-native_2.6.1.bb b/import-layers/meta-virtualization/recipes-containers/criu/protobuf-native_2.6.1.bb
new file mode 100644
index 0000000..e88c9e7
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/protobuf-native_2.6.1.bb
@@ -0,0 +1,21 @@
+SUMMARY = "protobuf"
+DESCRIPTION = "Protocol Buffers are a way of encoding structured data in \
+an efficient yet extensible format. Google uses Protocol Buffers for \
+almost all of its internal RPC protocols and file formats."
+HOMEPAGE = "http://code.google.com/p/protobuf/"
+SECTION = "console/tools"
+LICENSE = "BSD-3-Clause"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=af6809583bfde9a31595a58bb4a24514"
+
+PR = "r0"
+
+SRC_URI[md5sum] = "af05b2cca289f7b86eef2734a0cdc8b9"
+SRC_URI[sha256sum] = "2667b7cda4a6bc8a09e5463adf3b5984e08d94e72338277affa8594d8b6e5cd1"
+SRC_URI = "https://github.com/google/protobuf/archive/v2.6.1.tar.gz;downloadfilename=protobuf-2.6.1.tar.gz \
+	"
+
+EXTRA_OECONF += " --with-protoc=echo --disable-shared"
+
+inherit native autotools
+
diff --git a/import-layers/meta-virtualization/recipes-containers/criu/protobuf_2.6.1.bb b/import-layers/meta-virtualization/recipes-containers/criu/protobuf_2.6.1.bb
new file mode 100644
index 0000000..1b7ab20
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/criu/protobuf_2.6.1.bb
@@ -0,0 +1,97 @@
+SUMMARY = "protobuf"
+DESCRIPTION = "Protocol Buffers are a way of encoding structured data in \
+an efficient yet extensible format. Google uses Protocol Buffers for \
+almost all of its internal RPC protocols and file formats."
+HOMEPAGE = "http://code.google.com/p/protobuf/"
+SECTION = "console/tools"
+LICENSE = "BSD-3-Clause"
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=af6809583bfde9a31595a58bb4a24514"
+
+PR = "r0"
+EXCLUDE_FROM_WORLD = "1"
+
+SRC_URI[md5sum] = "af05b2cca289f7b86eef2734a0cdc8b9"
+SRC_URI[sha256sum] = "2667b7cda4a6bc8a09e5463adf3b5984e08d94e72338277affa8594d8b6e5cd1"
+SRC_URI = "https://github.com/google/protobuf/archive/v2.6.1.tar.gz;downloadfilename=protobuf-2.6.1.tar.gz\
+	file://protobuf-allow-running-python-scripts-from-anywhere.patch \
+	file://Omit-google-apputils-dependency.patch \
+	file://run-ptest"
+
+COMPATIBLE_HOST = "(x86_64|arm|aarch64).*-linux"
+
+EXTRA_OECONF += " --with-protoc=${STAGING_BINDIR_NATIVE}/protoc"
+inherit autotools setuptools ptest
+
+DEPENDS += "protobuf-native"
+
+PYTHON_SRC_DIR="python"
+TEST_SRC_DIR="examples"
+LANG_SUPPORT="cpp python"
+
+do_compile() {
+	# Compile protoc compiler
+	base_do_compile
+}
+
+do_compile_ptest() {
+	# Modify makefile to use the cross-compiler
+	sed -e "s|c++|${CXX}|g" -i "${S}/${TEST_SRC_DIR}/Makefile"
+
+	mkdir -p "${B}/${TEST_SRC_DIR}"
+
+	# Add the location of the cross-compiled header and library files
+	# which haven't been installed yet.
+	cp "${B}/protobuf.pc" "${B}/${TEST_SRC_DIR}/protobuf.pc"
+	sed -e 's|libdir=|libdir=${PKG_CONFIG_SYSROOT_DIR}|' -i "${B}/${TEST_SRC_DIR}/protobuf.pc"
+	sed -e 's|Cflags:|Cflags: -I${S}/src|' -i "${B}/${TEST_SRC_DIR}/protobuf.pc"
+	sed -e 's|Libs:|Libs: -L${B}/src/.libs|' -i "${B}/${TEST_SRC_DIR}/protobuf.pc"
+	export PKG_CONFIG_PATH="${B}/${TEST_SRC_DIR}"
+
+	# Save the pkgcfg sysroot variable, and update it to nothing so
+	# that it doesn't append the sysroot to the beginning of paths.
+	# The header and library files aren't installed to the target
+	# system yet.  So the absolute paths were specified above.
+	save_pkg_config_sysroot_dir=$PKG_CONFIG_SYSROOT_DIR
+	export PKG_CONFIG_SYSROOT_DIR=
+
+	# Compile the tests
+	for lang in ${LANG_SUPPORT}; do
+		oe_runmake -C "${S}/${TEST_SRC_DIR}" ${lang}
+	done
+
+	# Restore the pkgconfig sysroot variable
+	export PKG_CONFIG_SYSROOT_DIR=$save_pkg_config_sysroot_dir
+}
+
+do_install() {
+	local olddir=`pwd`
+
+	# Install protoc compiler
+	autotools_do_install
+
+	# Install header files
+	export PROTOC="${STAGING_BINDIR_NATIVE}/protoc"
+	cd "${S}/${PYTHON_SRC_DIR}"
+	distutils_do_install
+
+	cd "$olddir"
+}
+
+do_install_ptest() {
+	local olddir=`pwd`
+
+	cd "${S}/${TEST_SRC_DIR}"
+	install -d "${D}/${PTEST_PATH}"
+	for i in add_person* list_people*; do
+		if [ -x "$i" ]; then
+			install "$i" "${D}/${PTEST_PATH}"
+		fi
+	done
+	cp "${S}/${TEST_SRC_DIR}/addressbook_pb2.py" "${D}/${PTEST_PATH}"
+	
+	cd "$olddir"
+}
+
+BBCLASSEXTEND = "nativesdk"
+
diff --git a/import-layers/meta-virtualization/recipes-containers/docker-registry/docker-registry_git.bb b/import-layers/meta-virtualization/recipes-containers/docker-registry/docker-registry_git.bb
new file mode 100644
index 0000000..0320440
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker-registry/docker-registry_git.bb
@@ -0,0 +1,93 @@
+HOMEPAGE = "https://github.com/docker/docker-registry"
+SUMMARY = "Registry server for Docker"
+DESCRIPTION = "\
+ This is the classic python docker-registry. \
+ . \
+ hosting/delivering of repositories and images \
+ "
+
+SRCREV = "fd8c0c114985547b69088e0f1526e58bfe2ff914"
+SRC_URI = "\
+	git://github.com/docker/docker-registry.git \
+	file://docker-registry.conf \
+	file://docker-registry.service \
+	file://config.yml \
+	file://change_sqlalchemy_rqt.patch \
+	"
+
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=35e8e5305c1b7b4a5761f9de5d44e5f4"
+
+S = "${WORKDIR}/git"
+
+PV = "0.9.1+git${SRCREV}"
+
+RDEPENDS_${PN} += "\
+  docker \
+  gunicorn (>= 19.1.1) \
+  python-pip \
+  python-distribute \
+  python-m2crypto (>= 0.22.3) \
+  python-pyyaml (>= 3.11) \
+  python-flask (>= 0.10.1) \
+  python-gevent (>= 1.0.1) \
+  python-requests \
+  python-sqlalchemy (>= 0.9.4) \
+  python-blinker (>= 1.3) \
+  python-backports-lzma (>= 0.0.3) \
+  python-flask-cors (>= 1.10.3) \
+  python-bugsnag (>= 2.0.2) \
+  python-docker-registry-core (>= 2.0.3) \
+  python-newrelic (>= 2.22.0.19) \
+  python-itsdangerous (>= 0.21) \
+  python-jinja2 (>= 2.4) \
+  python-werkzeug (>= 0.7) \
+  python-simplejson (>= 3.6.2) \
+  python-redis (>= 2.10.3) \
+  python-boto (>= 2.34.0) \
+  python-webob \
+  "
+# OFFICIAL REQ:
+# docker-registry-core>=2,<3
+# blinker==1.3
+# backports.lzma==0.0.3,!=0.0.4
+
+# Flask==0.10.1
+# gevent==1.0.1
+# gunicorn==19.1.1
+# PyYAML==3.11
+# requests==2.3.0
+# M2Crypto==0.22.3
+# sqlalchemy==0.9.4
+# setuptools==5.8
+# 
+# [bugsnag]
+# bugsnag>=2.0,<2.1
+# 
+# [cors]
+# Flask-cors>=1.8,<2.0
+# 
+# [newrelic]
+# newrelic>=2.22,<2.23
+
+
+inherit setuptools systemd
+
+SYSTEMD_PACKAGES = "${@base_contains('DISTRO_FEATURES','systemd','${PN}','',d)}"
+SYSTEMD_SERVICE_${PN} = "${@base_contains('DISTRO_FEATURES','systemd','docker-registry.service','',d)}"
+
+do_install_append() {
+	mkdir -p ${D}/etc/default/
+	cp ${WORKDIR}/docker-registry.conf ${D}/etc/default/docker-registry
+
+	if ${@base_contains('DISTRO_FEATURES','systemd','true','false',d)}; then
+		install -d ${D}${systemd_unitdir}/system
+		install -m 644 ${WORKDIR}/docker-registry.service ${D}/${systemd_unitdir}/system
+		sed -i "s|#WORKDIR#|${PYTHON_SITEPACKAGES_DIR}/docker_registry|" ${D}/${systemd_unitdir}/system/docker-registry.service
+	fi
+	# based on config_mirror.yml - uses /var/docker-registry instead of /tmp for files
+	install ${WORKDIR}/config.yml ${D}/etc/docker-registry.yml
+	mkdir -p ${D}/var/docker-registry
+}
+
+FILES_${PN} += "/etc/default /var/docker-registry /etc/ /etc/default/volatiles"
diff --git a/import-layers/meta-virtualization/recipes-containers/docker-registry/files/change_sqlalchemy_rqt.patch b/import-layers/meta-virtualization/recipes-containers/docker-registry/files/change_sqlalchemy_rqt.patch
new file mode 100644
index 0000000..75cbd6d
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker-registry/files/change_sqlalchemy_rqt.patch
@@ -0,0 +1,13 @@
+---
+ requirements/main.txt |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/requirements/main.txt
++++ b/requirements/main.txt
+@@ -5,5 +5,5 @@
+ PyYAML==3.11
+ requests==2.3.0
+ M2Crypto==0.22.3
+-sqlalchemy==0.9.4
++sqlalchemy>=0.9.4
+ setuptools==5.8
diff --git a/import-layers/meta-virtualization/recipes-containers/docker-registry/files/config.yml b/import-layers/meta-virtualization/recipes-containers/docker-registry/files/config.yml
new file mode 100644
index 0000000..8b33766
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker-registry/files/config.yml
@@ -0,0 +1,228 @@
+# All other flavors inherit the `common' config snippet
+common: &common
+    issue: '"docker-registry server"'
+    # Default log level is info
+    loglevel: _env:LOGLEVEL:info
+    # Enable debugging (additional informations in the output of the _ping endpoint)
+    debug: _env:DEBUG:false
+    # By default, the registry acts standalone (eg: doesn't query the index)
+    standalone: _env:STANDALONE:true
+    # The default endpoint to use (if NOT standalone) is index.docker.io
+    index_endpoint: _env:INDEX_ENDPOINT:https://index.docker.io
+    # Storage redirect is disabled
+    storage_redirect: _env:STORAGE_REDIRECT
+    # Token auth is enabled (if NOT standalone)
+    disable_token_auth: _env:DISABLE_TOKEN_AUTH
+    # No priv key
+    privileged_key: _env:PRIVILEGED_KEY
+    # No search backend
+    search_backend: _env:SEARCH_BACKEND
+    # SQLite search backend
+    sqlalchemy_index_database: _env:SQLALCHEMY_INDEX_DATABASE:sqlite:////var/docker-registry/docker-registry.db
+
+    # Mirroring is not enabled
+    mirroring:
+        source: _env:MIRROR_SOURCE # https://registry-1.docker.io
+        source_index: _env:MIRROR_SOURCE_INDEX # https://index.docker.io
+        tags_cache_ttl: _env:MIRROR_TAGS_CACHE_TTL:172800 # seconds
+
+    cache:
+        host: _env:CACHE_REDIS_HOST
+        port: _env:CACHE_REDIS_PORT
+        db: _env:CACHE_REDIS_DB:0
+        password: _env:CACHE_REDIS_PASSWORD
+
+    # Enabling LRU cache for small files
+    # This speeds up read/write on small files
+    # when using a remote storage backend (like S3).
+    cache_lru:
+        host: _env:CACHE_LRU_REDIS_HOST
+        port: _env:CACHE_LRU_REDIS_PORT
+        db: _env:CACHE_LRU_REDIS_DB:0
+        password: _env:CACHE_LRU_REDIS_PASSWORD
+
+    # Enabling these options makes the Registry send an email on each code Exception
+    email_exceptions:
+        smtp_host: _env:SMTP_HOST
+        smtp_port: _env:SMTP_PORT:25
+        smtp_login: _env:SMTP_LOGIN
+        smtp_password: _env:SMTP_PASSWORD
+        smtp_secure: _env:SMTP_SECURE:false
+        from_addr: _env:SMTP_FROM_ADDR:docker-registry@localdomain.local
+        to_addr: _env:SMTP_TO_ADDR:noise+dockerregistry@localdomain.local
+
+    # Enable bugsnag (set the API key)
+    bugsnag: _env:BUGSNAG
+
+    # CORS support is not enabled by default
+    cors:
+        origins: _env:CORS_ORIGINS
+        methods: _env:CORS_METHODS
+        headers: _env:CORS_HEADERS:[Content-Type]
+        expose_headers: _env:CORS_EXPOSE_HEADERS
+        supports_credentials: _env:CORS_SUPPORTS_CREDENTIALS
+        max_age: _env:CORS_MAX_AGE
+        send_wildcard: _env:CORS_SEND_WILDCARD
+        always_send: _env:CORS_ALWAYS_SEND
+        automatic_options: _env:CORS_AUTOMATIC_OPTIONS
+        vary_header: _env:CORS_VARY_HEADER
+        resources: _env:CORS_RESOURCES
+
+local: &local
+    <<: *common
+    storage: local
+    storage_path: _env:STORAGE_PATH:/var/docker-registry
+
+
+s3: &s3
+    <<: *common
+    storage: s3
+    s3_region: _env:AWS_REGION
+    s3_bucket: _env:AWS_BUCKET
+    boto_bucket: _env:AWS_BUCKET
+    storage_path: _env:STORAGE_PATH:/registry
+    s3_encrypt: _env:AWS_ENCRYPT:true
+    s3_secure: _env:AWS_SECURE:true
+    s3_access_key: _env:AWS_KEY
+    s3_secret_key: _env:AWS_SECRET
+    s3_use_sigv4: _env:AWS_USE_SIGV4
+    boto_host: _env:AWS_HOST
+    boto_port: _env:AWS_PORT
+    boto_calling_format: _env:AWS_CALLING_FORMAT
+
+cloudfronts3: &cloudfronts3
+    <<: *s3
+    cloudfront:
+        base: _env:CF_BASE_URL
+        keyid: _env:CF_KEYID
+        keysecret: _env:CF_KEYSECRET
+
+azureblob: &azureblob
+    <<: *common
+    storage: azureblob
+    azure_storage_account_name: _env:AZURE_STORAGE_ACCOUNT_NAME
+    azure_storage_account_key: _env:AZURE_STORAGE_ACCOUNT_KEY
+    azure_storage_container: _env:AZURE_STORAGE_CONTAINER:registry
+    azure_use_https: _env:AZURE_USE_HTTPS:true
+
+# Ceph Object Gateway Configuration
+# See http://ceph.com/docs/master/radosgw/ for details on installing this service.
+ceph-s3: &ceph-s3
+    <<: *common
+    storage: s3
+    s3_region: ~
+    s3_bucket: _env:AWS_BUCKET
+    s3_encrypt: _env:AWS_ENCRYPT:false
+    s3_secure: _env:AWS_SECURE:false
+    storage_path: _env:STORAGE_PATH:/registry
+    s3_access_key: _env:AWS_KEY
+    s3_secret_key: _env:AWS_SECRET
+    boto_bucket: _env:AWS_BUCKET
+    boto_host: _env:AWS_HOST
+    boto_port: _env:AWS_PORT
+    boto_debug: _env:AWS_DEBUG:0
+    boto_calling_format: _env:AWS_CALLING_FORMAT
+
+# Google Cloud Storage Configuration
+# See:
+# https://developers.google.com/storage/docs/reference/v1/getting-startedv1#keys
+# for details on access and secret keys.
+gcs:
+    <<: *common
+    storage: gcs
+    boto_bucket: _env:GCS_BUCKET
+    storage_path: _env:STORAGE_PATH:/registry
+    gs_secure: _env:GCS_SECURE:true
+    gs_access_key: _env:GCS_KEY
+    gs_secret_key: _env:GCS_SECRET
+    # OAuth 2.0 authentication with the storage.
+    # oauth2 can be set to true or false. If it is set to true, gs_access_key,
+    # gs_secret_key and gs_secure are not needed.
+    # Client ID and Client Secret must be set into OAUTH2_CLIENT_ID and
+    # OAUTH2_CLIENT_SECRET environment variables.
+    # See: https://developers.google.com/accounts/docs/OAuth2.
+    oauth2: _env:GCS_OAUTH2:false
+
+# This flavor is for storing images in Openstack Swift
+swift: &swift
+    <<: *common
+    storage: swift
+    storage_path: _env:STORAGE_PATH:/registry
+    # keystone authorization
+    swift_authurl: _env:OS_AUTH_URL
+    swift_container: _env:OS_CONTAINER
+    swift_user: _env:OS_USERNAME
+    swift_password: _env:OS_PASSWORD
+    swift_tenant_name: _env:OS_TENANT_NAME
+    swift_region_name: _env:OS_REGION_NAME
+
+# This flavor stores the images in Glance (to integrate with openstack)
+# See also: https://github.com/docker/openstack-docker
+glance: &glance
+    <<: *common
+    storage: glance
+    storage_alternate: _env:GLANCE_STORAGE_ALTERNATE:file
+    storage_path: _env:STORAGE_PATH:/var/docker-registry
+
+openstack:
+    <<: *glance
+
+# This flavor stores the images in Glance (to integrate with openstack)
+# and tags in Swift.
+glance-swift: &glance-swift
+    <<: *swift
+    storage: glance
+    storage_alternate: swift
+
+openstack-swift:
+    <<: *glance-swift
+
+elliptics:
+    <<: *common
+    storage: elliptics
+    elliptics_nodes: _env:ELLIPTICS_NODES
+    elliptics_wait_timeout: _env:ELLIPTICS_WAIT_TIMEOUT:60
+    elliptics_check_timeout: _env:ELLIPTICS_CHECK_TIMEOUT:60
+    elliptics_io_thread_num: _env:ELLIPTICS_IO_THREAD_NUM:2
+    elliptics_net_thread_num: _env:ELLIPTICS_NET_THREAD_NUM:2
+    elliptics_nonblocking_io_thread_num: _env:ELLIPTICS_NONBLOCKING_IO_THREAD_NUM:2
+    elliptics_groups: _env:ELLIPTICS_GROUPS
+    elliptics_verbosity: _env:ELLIPTICS_VERBOSITY:4
+    elliptics_logfile: _env:ELLIPTICS_LOGFILE:/dev/stderr
+    elliptics_addr_family: _env:ELLIPTICS_ADDR_FAMILY:2
+
+# This flavor stores the images in Aliyun OSS
+# See:
+# https://i.aliyun.com/access_key/
+# for details on access and secret keys.
+oss: &oss
+    <<: *common
+    storage: oss
+    storage_path: _env:STORAGE_PATH:/registry/
+    oss_host: _env:OSS_HOST
+    oss_bucket: _env:OSS_BUCKET
+    oss_accessid: _env:OSS_KEY
+    oss_accesskey: _env:OSS_SECRET
+
+
+
+# This is the default configuration when no flavor is specified
+dev: &dev
+    <<: *local
+    loglevel: _env:LOGLEVEL:debug
+    debug: _env:DEBUG:true
+    search_backend: _env:SEARCH_BACKEND:sqlalchemy
+
+# This flavor is used by unit tests
+test:
+    <<: *dev
+    index_endpoint: https://registry-stage.hub.docker.com
+    standalone: true
+    storage_path: _env:STORAGE_PATH:./tmp/test
+
+# To specify another flavor, set the environment variable SETTINGS_FLAVOR
+# $ export SETTINGS_FLAVOR=prod
+prod:
+    <<: *s3
+    storage_path: _env:STORAGE_PATH:/prod
+
diff --git a/import-layers/meta-virtualization/recipes-containers/docker-registry/files/docker-registry.conf b/import-layers/meta-virtualization/recipes-containers/docker-registry/files/docker-registry.conf
new file mode 100644
index 0000000..940ece1
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker-registry/files/docker-registry.conf
@@ -0,0 +1,19 @@
+# The Docker registry configuration file
+DOCKER_REGISTRY_CONFIG=/etc/docker-registry.yml
+
+# The configuration to use from DOCKER_REGISTRY_CONFIG file
+SETTINGS_FLAVOR=local
+
+# Address to bind the registry to
+REGISTRY_ADDRESS=0.0.0.0
+
+# Port to bind the registry to
+REGISTRY_PORT=5000
+
+# Number of workers to handle the connections
+GUNICORN_WORKERS=4
+
+STANDALONE=true
+
+MIRROR_SOURCE=https://registry-1.docker.io
+MIRROR_SOURCE_INDEX=https://index.docker.io
diff --git a/import-layers/meta-virtualization/recipes-containers/docker-registry/files/docker-registry.service b/import-layers/meta-virtualization/recipes-containers/docker-registry/files/docker-registry.service
new file mode 100644
index 0000000..4f4cfe7
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker-registry/files/docker-registry.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=Registry server for Docker
+After=docker.service
+Requires=docker.service
+
+[Service]
+Type=simple
+Environment=DOCKER_REGISTRY_CONFIG=/etc/docker-registry.yml
+EnvironmentFile=-/etc/default/docker-registry
+WorkingDirectory=#WORKDIR#
+ExecStart=/usr/bin/gunicorn --access-logfile /var/log/docker-registry-access.log --error-logfile /var/log/docker-registry-error.log --debug --max-requests 100 --graceful-timeout 3600 -t 3600 -k gevent -b ${REGISTRY_ADDRESS}:${REGISTRY_PORT} -w ${GUNICORN_WORKERS} docker_registry.wsgi:application
+Restart=on-failure
+
+[Install]
+WantedBy=multi-user.target
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/docker_git.bb b/import-layers/meta-virtualization/recipes-containers/docker/docker_git.bb
new file mode 100644
index 0000000..42a336e
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker/docker_git.bb
@@ -0,0 +1,147 @@
+HOMEPAGE = "http://www.docker.com"
+SUMMARY = "Linux container runtime"
+DESCRIPTION = "Linux container runtime \
+ Docker complements kernel namespacing with a high-level API which \
+ operates at the process level. It runs unix processes with strong \
+ guarantees of isolation and repeatability across servers. \
+ . \
+ Docker is a great building block for automating distributed systems: \
+ large-scale web deployments, database clusters, continuous deployment \
+ systems, private PaaS, service-oriented architectures, etc. \
+ . \
+ This package contains the daemon and client. Using docker.io on non-amd64 \
+ hosts is not supported at this time. Please be careful when using it \
+ on anything besides amd64. \
+ . \
+ Also, note that kernel version 3.8 or above is required for proper \
+ operation of the daemon process, and that any lower versions may have \
+ subtle and/or glaring issues. \
+ "
+
+SRCREV = "76d6bc9a9f1690e16f3721ba165364688b626de2"
+SRC_URI = "\
+	git://github.com/docker/docker.git;nobranch=1 \
+	file://docker.service \
+	file://docker.init \
+	file://hi.Dockerfile \
+	file://disable_sha1sum_startup.patch \
+	file://Bump-bolt-to-v1.1.0.patch \
+	"
+
+# Apache-2.0 for docker
+LICENSE = "Apache-2.0"
+LIC_FILES_CHKSUM = "file://LICENSE;md5=cc2221abf0b96ea39dd68141b70f7937"
+
+S = "${WORKDIR}/git"
+
+DOCKER_VERSION = "1.9.0"
+PV = "${DOCKER_VERSION}+git${SRCREV}"
+
+DEPENDS = "go-cross \
+    go-cli \
+    go-pty \
+    go-context \
+    go-mux \
+    go-patricia \
+    go-libtrust \
+    go-logrus \
+    go-fsnotify \
+    go-dbus \
+    go-capability \
+    go-systemd \
+    btrfs-tools \
+    sqlite3 \
+    go-distribution-digest \
+    "
+
+DEPENDS_append_class-target = "lvm2"
+RDEPENDS_${PN} = "curl aufs-util git cgroup-lite util-linux iptables"
+RRECOMMENDS_${PN} = "lxc docker-registry rt-tests"
+RRECOMMENDS_${PN} += " kernel-module-dm-thin-pool kernel-module-nf-nat"
+DOCKER_PKG="github.com/docker/docker"
+
+do_configure[noexec] = "1"
+
+do_compile() {
+	export GOARCH="${TARGET_ARCH}"
+	# supported amd64, 386, arm arm64
+	if [ "${TARGET_ARCH}" = "x86_64" ]; then
+		export GOARCH="amd64"
+	fi
+	if [ "${TARGET_ARCH}" = "aarch64" ]; then
+		export GOARCH="arm64"
+	fi
+
+	# Set GOPATH. See 'PACKAGERS.md'. Don't rely on
+	# docker to download its dependencies but rather
+	# use dependencies packaged independently.
+	cd ${S}
+	rm -rf .gopath
+	mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")"
+	ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}"
+	export GOPATH="${S}/.gopath:${S}/vendor:${STAGING_DIR_TARGET}/${prefix}/local/go"
+	cd -
+
+	# Pass the needed cflags/ldflags so that cgo
+	# can find the needed headers files and libraries
+	export CGO_ENABLED="1"
+	export CGO_CFLAGS="${BUILDSDK_CFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+	export CGO_LDFLAGS="${BUILDSDK_LDFLAGS} --sysroot=${STAGING_DIR_TARGET}"
+	# in order to exclude devicemapper and btrfs - https://github.com/docker/docker/issues/14056
+	export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs exclude_graphdriver_devicemapper'
+
+	# this is the unsupported built structure
+	# that doesn't rely on an existing docker
+	# to build this:
+	DOCKER_GITCOMMIT="${SRCREV}" \
+	  ./hack/make.sh dynbinary
+}
+
+inherit systemd update-rc.d
+
+SYSTEMD_PACKAGES = "${@base_contains('DISTRO_FEATURES','systemd','${PN}','',d)}"
+SYSTEMD_SERVICE_${PN} = "${@base_contains('DISTRO_FEATURES','systemd','docker.service','',d)}"
+
+INITSCRIPT_PACKAGES += "${@base_contains('DISTRO_FEATURES','sysvinit','${PN}','',d)}"
+INITSCRIPT_NAME_${PN} = "${@base_contains('DISTRO_FEATURES','sysvinit','docker.init','',d)}"
+INITSCRIPT_PARAMS_${PN} = "${OS_DEFAULT_INITSCRIPT_PARAMS}"
+
+do_install() {
+	mkdir -p ${D}/${bindir}
+	cp ${S}/bundles/${DOCKER_VERSION}/dynbinary/docker-${DOCKER_VERSION} \
+	  ${D}/${bindir}/docker
+	cp ${S}/bundles/${DOCKER_VERSION}/dynbinary/dockerinit-${DOCKER_VERSION} \
+	  ${D}/${bindir}/dockerinit
+
+	if ${@base_contains('DISTRO_FEATURES','systemd','true','false',d)}; then
+		install -d ${D}${systemd_unitdir}/system
+		install -m 644 ${S}/contrib/init/systemd/docker.* ${D}/${systemd_unitdir}/system
+		# replaces one copied from above with one that uses the local registry for a mirror
+		install -m 644 ${WORKDIR}/docker.service ${D}/${systemd_unitdir}/system
+        else
+            install -d ${D}${sysconfdir}/init.d
+            install -m 0755 ${WORKDIR}/docker.init ${D}${sysconfdir}/init.d/docker.init
+	fi
+
+	mkdir -p ${D}/usr/share/docker/
+	cp ${WORKDIR}/hi.Dockerfile ${D}/usr/share/docker/
+}
+
+inherit useradd
+USERADD_PACKAGES = "${PN}"
+GROUPADD_PARAM_${PN} = "-r docker"
+
+FILES_${PN} += "/lib/systemd/system/*"
+
+# DO NOT STRIP docker and dockerinit!!!
+#
+# Reason:
+# The "docker" package contains two binaries: "docker" and "dockerinit",
+# which are both written in Go. The "dockerinit" package is built first,
+# then its checksum is given to the build process compiling the "docker"
+# binary. Hence the checksum of the unstripped "dockerinit" binary is hard
+# coded into the "docker" binary. At runtime the "docker" binary invokes
+# the "dockerinit" binary, but before doing that it ensures the checksum
+# of "dockerinit" matches with the hard coded value.
+#
+INHIBIT_PACKAGE_STRIP = "1"
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/files/Bump-bolt-to-v1.1.0.patch b/import-layers/meta-virtualization/recipes-containers/docker/files/Bump-bolt-to-v1.1.0.patch
new file mode 100644
index 0000000..ca4ad81
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker/files/Bump-bolt-to-v1.1.0.patch
@@ -0,0 +1,1828 @@
+From a41917c2c88bd7f694d141ac67f4a194aaa16fa1 Mon Sep 17 00:00:00 2001
+From: Qiang Huang <h.huangqiang@huawei.com>
+Date: Wed, 28 Oct 2015 08:49:45 +0800
+Subject: [PATCH] Bump bolt to v1.1.0
+
+It adds ARM64, ppc64le, s390x, solaris support, and a bunch of
+bugfixs.
+
+Signed-off-by: Qiang Huang <h.huangqiang@huawei.com>
+---
+ hack/vendor.sh                                     |   2 +-
+ vendor/src/github.com/boltdb/bolt/.gitignore       |   1 +
+ vendor/src/github.com/boltdb/bolt/README.md        | 250 +++++++++++++++++++--
+ vendor/src/github.com/boltdb/bolt/batch.go         | 138 ++++++++++++
+ vendor/src/github.com/boltdb/bolt/bolt_386.go      |   5 +-
+ vendor/src/github.com/boltdb/bolt/bolt_amd64.go    |   3 +
+ vendor/src/github.com/boltdb/bolt/bolt_arm.go      |   5 +-
+ vendor/src/github.com/boltdb/bolt/bolt_arm64.go    |   9 +
+ vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go  |   9 +
+ vendor/src/github.com/boltdb/bolt/bolt_s390x.go    |   9 +
+ vendor/src/github.com/boltdb/bolt/bolt_unix.go     |  37 ++-
+ .../github.com/boltdb/bolt/bolt_unix_solaris.go    | 101 +++++++++
+ vendor/src/github.com/boltdb/bolt/bolt_windows.go  |  10 +-
+ vendor/src/github.com/boltdb/bolt/bucket.go        |  29 ++-
+ vendor/src/github.com/boltdb/bolt/cursor.go        |  12 +-
+ vendor/src/github.com/boltdb/bolt/db.go            | 195 ++++++++++++----
+ vendor/src/github.com/boltdb/bolt/errors.go        |   4 +
+ vendor/src/github.com/boltdb/bolt/freelist.go      |  28 ++-
+ vendor/src/github.com/boltdb/bolt/node.go          |  36 ++-
+ vendor/src/github.com/boltdb/bolt/page.go          |  45 +++-
+ vendor/src/github.com/boltdb/bolt/tx.go            |  80 +++++--
+ 21 files changed, 886 insertions(+), 122 deletions(-)
+ create mode 100644 vendor/src/github.com/boltdb/bolt/batch.go
+ create mode 100644 vendor/src/github.com/boltdb/bolt/bolt_arm64.go
+ create mode 100644 vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go
+ create mode 100644 vendor/src/github.com/boltdb/bolt/bolt_s390x.go
+ create mode 100644 vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go
+
+diff --git a/hack/vendor.sh b/hack/vendor.sh
+index d872d4a..c28e677 100755
+--- a/hack/vendor.sh
++++ b/hack/vendor.sh
+@@ -36,7 +36,7 @@ clone git github.com/coreos/etcd v2.2.0
+ fix_rewritten_imports github.com/coreos/etcd
+ clone git github.com/ugorji/go 5abd4e96a45c386928ed2ca2a7ef63e2533e18ec
+ clone git github.com/hashicorp/consul v0.5.2
+-clone git github.com/boltdb/bolt v1.0
++clone git github.com/boltdb/bolt v1.1.0
+ 
+ # get graph and distribution packages
+ clone git github.com/docker/distribution 20c4b7a1805a52753dfd593ee1cc35558722a0ce # docker/1.9 branch
+diff --git a/vendor/src/github.com/boltdb/bolt/.gitignore b/vendor/src/github.com/boltdb/bolt/.gitignore
+index b2bb382..c7bd2b7 100644
+--- a/vendor/src/github.com/boltdb/bolt/.gitignore
++++ b/vendor/src/github.com/boltdb/bolt/.gitignore
+@@ -1,3 +1,4 @@
+ *.prof
+ *.test
++*.swp
+ /bin/
+diff --git a/vendor/src/github.com/boltdb/bolt/README.md b/vendor/src/github.com/boltdb/bolt/README.md
+index 727e977..0a33ebc 100644
+--- a/vendor/src/github.com/boltdb/bolt/README.md
++++ b/vendor/src/github.com/boltdb/bolt/README.md
+@@ -16,7 +16,7 @@ and setting values. That's it.
+ 
+ ## Project Status
+ 
+-Bolt is stable and the API is fixed. Full unit test coverage and randomized 
++Bolt is stable and the API is fixed. Full unit test coverage and randomized
+ black box testing are used to ensure database consistency and thread safety.
+ Bolt is currently in high-load production environments serving databases as
+ large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
+@@ -87,6 +87,11 @@ are not thread safe. To work with data in multiple goroutines you must start
+ a transaction for each one or use locking to ensure only one goroutine accesses
+ a transaction at a time. Creating transaction from the `DB` is thread safe.
+ 
++Read-only transactions and read-write transactions should not depend on one
++another and generally shouldn't be opened simultaneously in the same goroutine.
++This can cause a deadlock as the read-write transaction needs to periodically
++re-map the data file but it cannot do so while a read-only transaction is open.
++
+ 
+ #### Read-write transactions
+ 
+@@ -120,12 +125,88 @@ err := db.View(func(tx *bolt.Tx) error {
+ })
+ ```
+ 
+-You also get a consistent view of the database within this closure, however, 
++You also get a consistent view of the database within this closure, however,
+ no mutating operations are allowed within a read-only transaction. You can only
+ retrieve buckets, retrieve values, and copy the database within a read-only
+ transaction.
+ 
+ 
++#### Batch read-write transactions
++
++Each `DB.Update()` waits for disk to commit the writes. This overhead
++can be minimized by combining multiple updates with the `DB.Batch()`
++function:
++
++```go
++err := db.Batch(func(tx *bolt.Tx) error {
++	...
++	return nil
++})
++```
++
++Concurrent Batch calls are opportunistically combined into larger
++transactions. Batch is only useful when there are multiple goroutines
++calling it.
++
++The trade-off is that `Batch` can call the given
++function multiple times, if parts of the transaction fail. The
++function must be idempotent and side effects must take effect only
++after a successful return from `DB.Batch()`.
++
++For example: don't display messages from inside the function, instead
++set variables in the enclosing scope:
++
++```go
++var id uint64
++err := db.Batch(func(tx *bolt.Tx) error {
++	// Find last key in bucket, decode as bigendian uint64, increment
++	// by one, encode back to []byte, and add new key.
++	...
++	id = newValue
++	return nil
++})
++if err != nil {
++	return ...
++}
++fmt.Println("Allocated ID %d", id)
++```
++
++
++#### Managing transactions manually
++
++The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
++function. These helper functions will start the transaction, execute a function,
++and then safely close your transaction if an error is returned. This is the
++recommended way to use Bolt transactions.
++
++However, sometimes you may want to manually start and end your transactions.
++You can use the `Tx.Begin()` function directly but _please_ be sure to close the
++transaction.
++
++```go
++// Start a writable transaction.
++tx, err := db.Begin(true)
++if err != nil {
++    return err
++}
++defer tx.Rollback()
++
++// Use the transaction...
++_, err := tx.CreateBucket([]byte("MyBucket"))
++if err != nil {
++    return err
++}
++
++// Commit the transaction and check for error.
++if err := tx.Commit(); err != nil {
++    return err
++}
++```
++
++The first argument to `DB.Begin()` is a boolean stating if the transaction
++should be writable.
++
++
+ ### Using buckets
+ 
+ Buckets are collections of key/value pairs within the database. All keys in a
+@@ -175,13 +256,61 @@ db.View(func(tx *bolt.Tx) error {
+ ```
+ 
+ The `Get()` function does not return an error because its operation is
+-guarenteed to work (unless there is some kind of system failure). If the key
++guaranteed to work (unless there is some kind of system failure). If the key
+ exists then it will return its byte slice value. If it doesn't exist then it
+ will return `nil`. It's important to note that you can have a zero-length value
+ set to a key which is different than the key not existing.
+ 
+ Use the `Bucket.Delete()` function to delete a key from the bucket.
+ 
++Please note that values returned from `Get()` are only valid while the
++transaction is open. If you need to use a value outside of the transaction
++then you must use `copy()` to copy it to another byte slice.
++
++
++### Autoincrementing integer for the bucket
++By using the NextSequence() function, you can let Bolt determine a sequence
++which can be used as the unique identifier for your key/value pairs. See the
++example below.
++
++```go
++// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
++func (s *Store) CreateUser(u *User) error {
++    return s.db.Update(func(tx *bolt.Tx) error {
++        // Retrieve the users bucket.
++        // This should be created when the DB is first opened.
++        b := tx.Bucket([]byte("users"))
++
++        // Generate ID for the user.
++        // This returns an error only if the Tx is closed or not writeable.
++        // That can't happen in an Update() call so I ignore the error check.
++        id, _ = b.NextSequence()
++        u.ID = int(id)
++
++        // Marshal user data into bytes.
++        buf, err := json.Marshal(u)
++        if err != nil {
++            return err
++        }
++
++        // Persist bytes to users bucket.
++        return b.Put(itob(u.ID), buf)
++    })
++}
++
++// itob returns an 8-byte big endian representation of v.
++func itob(v int) []byte {
++    b := make([]byte, 8)
++    binary.BigEndian.PutUint64(b, uint64(v))
++    return b
++}
++
++type User struct {
++    ID int
++    ...
++}
++
++```
+ 
+ ### Iterating over keys
+ 
+@@ -254,7 +383,7 @@ db.View(func(tx *bolt.Tx) error {
+ 	max := []byte("2000-01-01T00:00:00Z")
+ 
+ 	// Iterate over the 90's.
+-	for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) != -1; k, v = c.Next() {
++	for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
+ 		fmt.Printf("%s: %s\n", k, v)
+ 	}
+ 
+@@ -294,7 +423,7 @@ func (*Bucket) DeleteBucket(key []byte) error
+ 
+ ### Database backups
+ 
+-Bolt is a single file so it's easy to backup. You can use the `Tx.Copy()`
++Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
+ function to write a consistent view of the database to a writer. If you call
+ this from a read-only transaction, it will perform a hot backup and not block
+ your other database reads and writes. It will also use `O_DIRECT` when available
+@@ -305,11 +434,12 @@ do database backups:
+ 
+ ```go
+ func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
+-	err := db.View(func(tx bolt.Tx) error {
++	err := db.View(func(tx *bolt.Tx) error {
+ 		w.Header().Set("Content-Type", "application/octet-stream")
+ 		w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
+ 		w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
+-		return tx.Copy(w)
++		_, err := tx.WriteTo(w)
++		return err
+ 	})
+ 	if err != nil {
+ 		http.Error(w, err.Error(), http.StatusInternalServerError)
+@@ -351,14 +481,13 @@ go func() {
+ 		// Grab the current stats and diff them.
+ 		stats := db.Stats()
+ 		diff := stats.Sub(&prev)
+-		
++
+ 		// Encode stats to JSON and print to STDERR.
+ 		json.NewEncoder(os.Stderr).Encode(diff)
+ 
+ 		// Save stats for the next loop.
+ 		prev = stats
+ 	}
+-}
+ }()
+ ```
+ 
+@@ -366,25 +495,83 @@ It's also useful to pipe these stats to a service such as statsd for monitoring
+ or to provide an HTTP endpoint that will perform a fixed-length sample.
+ 
+ 
++### Read-Only Mode
++
++Sometimes it is useful to create a shared, read-only Bolt database. To this,
++set the `Options.ReadOnly` flag when opening your database. Read-only mode
++uses a shared lock to allow multiple processes to read from the database but
++it will block any processes from opening the database in read-write mode.
++
++```go
++db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
++if err != nil {
++	log.Fatal(err)
++}
++```
++
++
+ ## Resources
+ 
+ For more information on getting started with Bolt, check out the following articles:
+ 
+ * [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
++* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
++
++
++## Comparison with other databases
++
++### Postgres, MySQL, & other relational databases
++
++Relational databases structure data into rows and are only accessible through
++the use of SQL. This approach provides flexibility in how you store and query
++your data but also incurs overhead in parsing and planning SQL statements. Bolt
++accesses all data by a byte slice key. This makes Bolt fast to read and write
++data by key but provides no built-in support for joining values together.
++
++Most relational databases (with the exception of SQLite) are standalone servers
++that run separately from your application. This gives your systems
++flexibility to connect multiple application servers to a single database
++server but also adds overhead in serializing and transporting data over the
++network. Bolt runs as a library included in your application so all data access
++has to go through your application's process. This brings data closer to your
++application but limits multi-process access to the data.
++
++
++### LevelDB, RocksDB
+ 
++LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
++they are libraries bundled into the application, however, their underlying
++structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
++random writes by using a write ahead log and multi-tiered, sorted files called
++SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
++have trade offs.
+ 
++If you require a high random write throughput (>10,000 w/sec) or you need to use
++spinning disks then LevelDB could be a good choice. If your application is
++read-heavy or does a lot of range scans then Bolt could be a good choice.
+ 
+-## Comparing Bolt to LMDB
++One other important consideration is that LevelDB does not have transactions.
++It supports batch writing of key/values pairs and it supports read snapshots
++but it will not give you the ability to do a compare-and-swap operation safely.
++Bolt supports fully serializable ACID transactions.
++
++
++### LMDB
+ 
+ Bolt was originally a port of LMDB so it is architecturally similar. Both use
+-a B+tree, have ACID semanetics with fully serializable transactions, and support
++a B+tree, have ACID semantics with fully serializable transactions, and support
+ lock-free MVCC using a single writer and multiple readers.
+ 
+ The two projects have somewhat diverged. LMDB heavily focuses on raw performance
+ while Bolt has focused on simplicity and ease of use. For example, LMDB allows
+-several unsafe actions such as direct writes and append writes for the sake of
+-performance. Bolt opts to disallow actions which can leave the database in a 
+-corrupted state. The only exception to this in Bolt is `DB.NoSync`.
++several unsafe actions such as direct writes for the sake of performance. Bolt
++opts to disallow actions which can leave the database in a corrupted state. The
++only exception to this in Bolt is `DB.NoSync`.
++
++There are also a few differences in API. LMDB requires a maximum mmap size when
++opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
++automatically. LMDB overloads the getter and setter functions with multiple
++flags whereas Bolt splits these specialized cases into their own functions.
+ 
+ 
+ ## Caveats & Limitations
+@@ -425,14 +612,33 @@ Here are a few things to note when evaluating and using Bolt:
+   can in memory and will release memory as needed to other processes. This means
+   that Bolt can show very high memory usage when working with large databases.
+   However, this is expected and the OS will release memory as needed. Bolt can
+-  handle databases much larger than the available physical RAM.
++  handle databases much larger than the available physical RAM, provided its
++  memory-map fits in the process virtual address space. It may be problematic
++  on 32-bits systems.
++
++* The data structures in the Bolt database are memory mapped so the data file
++  will be endian specific. This means that you cannot copy a Bolt file from a
++  little endian machine to a big endian machine and have it work. For most 
++  users this is not a concern since most modern CPUs are little endian.
++
++* Because of the way pages are laid out on disk, Bolt cannot truncate data files
++  and return free pages back to the disk. Instead, Bolt maintains a free list
++  of unused pages within its data file. These free pages can be reused by later
++  transactions. This works well for many use cases as databases generally tend
++  to grow. However, it's important to note that deleting large chunks of data
++  will not allow you to reclaim that space on disk.
++
++  For more information on page allocation, [see this comment][page-allocation].
++
++[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
+ 
+ 
+ ## Other Projects Using Bolt
+ 
+ Below is a list of public, open source projects that use Bolt:
+ 
+-* [Bazil](https://github.com/bazillion/bazil) - A file system that lets your data reside where it is most convenient for it to reside.
++* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
++* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
+ * [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
+ * [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
+ * [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
+@@ -450,6 +656,16 @@ Below is a list of public, open source projects that use Bolt:
+ * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
+ * [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
+ * [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database.
++* [Seaweed File System](https://github.com/chrislusf/weed-fs) - Highly scalable distributed key~file system with O(1) disk read.
++* [InfluxDB](http://influxdb.com) - Scalable datastore for metrics, events, and real-time analytics.
++* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
++* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
++* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
++* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
++* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
++* [stow](https://github.com/djherbis/stow) -  a persistence manager for objects
++  backed by boltdb.
++* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
++  simple tx and key scans.
+ 
+ If you are using Bolt in a project please send a pull request to add it to the list.
+-
+diff --git a/vendor/src/github.com/boltdb/bolt/batch.go b/vendor/src/github.com/boltdb/bolt/batch.go
+new file mode 100644
+index 0000000..84acae6
+--- /dev/null
++++ b/vendor/src/github.com/boltdb/bolt/batch.go
+@@ -0,0 +1,138 @@
++package bolt
++
++import (
++	"errors"
++	"fmt"
++	"sync"
++	"time"
++)
++
++// Batch calls fn as part of a batch. It behaves similar to Update,
++// except:
++//
++// 1. concurrent Batch calls can be combined into a single Bolt
++// transaction.
++//
++// 2. the function passed to Batch may be called multiple times,
++// regardless of whether it returns error or not.
++//
++// This means that Batch function side effects must be idempotent and
++// take permanent effect only after a successful return is seen in
++// caller.
++//
++// The maximum batch size and delay can be adjusted with DB.MaxBatchSize
++// and DB.MaxBatchDelay, respectively.
++//
++// Batch is only useful when there are multiple goroutines calling it.
++func (db *DB) Batch(fn func(*Tx) error) error {
++	errCh := make(chan error, 1)
++
++	db.batchMu.Lock()
++	if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
++		// There is no existing batch, or the existing batch is full; start a new one.
++		db.batch = &batch{
++			db: db,
++		}
++		db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
++	}
++	db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
++	if len(db.batch.calls) >= db.MaxBatchSize {
++		// wake up batch, it's ready to run
++		go db.batch.trigger()
++	}
++	db.batchMu.Unlock()
++
++	err := <-errCh
++	if err == trySolo {
++		err = db.Update(fn)
++	}
++	return err
++}
++
++type call struct {
++	fn  func(*Tx) error
++	err chan<- error
++}
++
++type batch struct {
++	db    *DB
++	timer *time.Timer
++	start sync.Once
++	calls []call
++}
++
++// trigger runs the batch if it hasn't already been run.
++func (b *batch) trigger() {
++	b.start.Do(b.run)
++}
++
++// run performs the transactions in the batch and communicates results
++// back to DB.Batch.
++func (b *batch) run() {
++	b.db.batchMu.Lock()
++	b.timer.Stop()
++	// Make sure no new work is added to this batch, but don't break
++	// other batches.
++	if b.db.batch == b {
++		b.db.batch = nil
++	}
++	b.db.batchMu.Unlock()
++
++retry:
++	for len(b.calls) > 0 {
++		var failIdx = -1
++		err := b.db.Update(func(tx *Tx) error {
++			for i, c := range b.calls {
++				if err := safelyCall(c.fn, tx); err != nil {
++					failIdx = i
++					return err
++				}
++			}
++			return nil
++		})
++
++		if failIdx >= 0 {
++			// take the failing transaction out of the batch. it's
++			// safe to shorten b.calls here because db.batch no longer
++			// points to us, and we hold the mutex anyway.
++			c := b.calls[failIdx]
++			b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
++			// tell the submitter re-run it solo, continue with the rest of the batch
++			c.err <- trySolo
++			continue retry
++		}
++
++		// pass success, or bolt internal errors, to all callers
++		for _, c := range b.calls {
++			if c.err != nil {
++				c.err <- err
++			}
++		}
++		break retry
++	}
++}
++
++// trySolo is a special sentinel error value used for signaling that a
++// transaction function should be re-run. It should never be seen by
++// callers.
++var trySolo = errors.New("batch function returned an error and should be re-run solo")
++
++type panicked struct {
++	reason interface{}
++}
++
++func (p panicked) Error() string {
++	if err, ok := p.reason.(error); ok {
++		return err.Error()
++	}
++	return fmt.Sprintf("panic: %v", p.reason)
++}
++
++func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
++	defer func() {
++		if p := recover(); p != nil {
++			err = panicked{p}
++		}
++	}()
++	return fn(tx)
++}
+diff --git a/vendor/src/github.com/boltdb/bolt/bolt_386.go b/vendor/src/github.com/boltdb/bolt/bolt_386.go
+index 856f401..e659bfb 100644
+--- a/vendor/src/github.com/boltdb/bolt/bolt_386.go
++++ b/vendor/src/github.com/boltdb/bolt/bolt_386.go
+@@ -1,4 +1,7 @@
+ package bolt
+ 
+ // maxMapSize represents the largest mmap size supported by Bolt.
+-const maxMapSize = 0xFFFFFFF // 256MB
++const maxMapSize = 0x7FFFFFFF // 2GB
++
++// maxAllocSize is the size used when creating array pointers.
++const maxAllocSize = 0xFFFFFFF
+diff --git a/vendor/src/github.com/boltdb/bolt/bolt_amd64.go b/vendor/src/github.com/boltdb/bolt/bolt_amd64.go
+index 4262932..cca6b7e 100644
+--- a/vendor/src/github.com/boltdb/bolt/bolt_amd64.go
++++ b/vendor/src/github.com/boltdb/bolt/bolt_amd64.go
+@@ -2,3 +2,6 @@ package bolt
+ 
+ // maxMapSize represents the largest mmap size supported by Bolt.
+ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
++
++// maxAllocSize is the size used when creating array pointers.
++const maxAllocSize = 0x7FFFFFFF
+diff --git a/vendor/src/github.com/boltdb/bolt/bolt_arm.go b/vendor/src/github.com/boltdb/bolt/bolt_arm.go
+index 856f401..e659bfb 100644
+--- a/vendor/src/github.com/boltdb/bolt/bolt_arm.go
++++ b/vendor/src/github.com/boltdb/bolt/bolt_arm.go
+@@ -1,4 +1,7 @@
+ package bolt
+ 
+ // maxMapSize represents the largest mmap size supported by Bolt.
+-const maxMapSize = 0xFFFFFFF // 256MB
++const maxMapSize = 0x7FFFFFFF // 2GB
++
++// maxAllocSize is the size used when creating array pointers.
++const maxAllocSize = 0xFFFFFFF
+diff --git a/vendor/src/github.com/boltdb/bolt/bolt_arm64.go b/vendor/src/github.com/boltdb/bolt/bolt_arm64.go
+new file mode 100644
+index 0000000..6d23093
+--- /dev/null
++++ b/vendor/src/github.com/boltdb/bolt/bolt_arm64.go
+@@ -0,0 +1,9 @@
++// +build arm64
++
++package bolt
++
++// maxMapSize represents the largest mmap size supported by Bolt.
++const maxMapSize = 0xFFFFFFFFFFFF // 256TB
++
++// maxAllocSize is the size used when creating array pointers.
++const maxAllocSize = 0x7FFFFFFF
+diff --git a/vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go
+new file mode 100644
+index 0000000..8351e12
+--- /dev/null
++++ b/vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go
+@@ -0,0 +1,9 @@
++// +build ppc64le
++
++package bolt
++
++// maxMapSize represents the largest mmap size supported by Bolt.
++const maxMapSize = 0xFFFFFFFFFFFF // 256TB
++
++// maxAllocSize is the size used when creating array pointers.
++const maxAllocSize = 0x7FFFFFFF
+diff --git a/vendor/src/github.com/boltdb/bolt/bolt_s390x.go b/vendor/src/github.com/boltdb/bolt/bolt_s390x.go
+new file mode 100644
+index 0000000..f4dd26b
+--- /dev/null
++++ b/vendor/src/github.com/boltdb/bolt/bolt_s390x.go
+@@ -0,0 +1,9 @@
++// +build s390x
++
++package bolt
++
++// maxMapSize represents the largest mmap size supported by Bolt.
++const maxMapSize = 0xFFFFFFFFFFFF // 256TB
++
++// maxAllocSize is the size used when creating array pointers.
++const maxAllocSize = 0x7FFFFFFF
+diff --git a/vendor/src/github.com/boltdb/bolt/bolt_unix.go b/vendor/src/github.com/boltdb/bolt/bolt_unix.go
+index 95647a7..6eef6b2 100644
+--- a/vendor/src/github.com/boltdb/bolt/bolt_unix.go
++++ b/vendor/src/github.com/boltdb/bolt/bolt_unix.go
+@@ -1,8 +1,9 @@
+-// +build !windows,!plan9
++// +build !windows,!plan9,!solaris
+ 
+ package bolt
+ 
+ import (
++	"fmt"
+ 	"os"
+ 	"syscall"
+ 	"time"
+@@ -10,7 +11,7 @@ import (
+ )
+ 
+ // flock acquires an advisory lock on a file descriptor.
+-func flock(f *os.File, timeout time.Duration) error {
++func flock(f *os.File, exclusive bool, timeout time.Duration) error {
+ 	var t time.Time
+ 	for {
+ 		// If we're beyond our timeout then return an error.
+@@ -20,9 +21,13 @@ func flock(f *os.File, timeout time.Duration) error {
+ 		} else if timeout > 0 && time.Since(t) > timeout {
+ 			return ErrTimeout
+ 		}
++		flag := syscall.LOCK_SH
++		if exclusive {
++			flag = syscall.LOCK_EX
++		}
+ 
+ 		// Otherwise attempt to obtain an exclusive lock.
+-		err := syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)
++		err := syscall.Flock(int(f.Fd()), flag|syscall.LOCK_NB)
+ 		if err == nil {
+ 			return nil
+ 		} else if err != syscall.EWOULDBLOCK {
+@@ -41,11 +46,28 @@ func funlock(f *os.File) error {
+ 
+ // mmap memory maps a DB's data file.
+ func mmap(db *DB, sz int) error {
++	// Truncate and fsync to ensure file size metadata is flushed.
++	// https://github.com/boltdb/bolt/issues/284
++	if !db.NoGrowSync && !db.readOnly {
++		if err := db.file.Truncate(int64(sz)); err != nil {
++			return fmt.Errorf("file resize error: %s", err)
++		}
++		if err := db.file.Sync(); err != nil {
++			return fmt.Errorf("file sync error: %s", err)
++		}
++	}
++
++	// Map the data file to memory.
+ 	b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED)
+ 	if err != nil {
+ 		return err
+ 	}
+ 
++	// Advise the kernel that the mmap is accessed randomly.
++	if err := madvise(b, syscall.MADV_RANDOM); err != nil {
++		return fmt.Errorf("madvise: %s", err)
++	}
++
+ 	// Save the original byte slice and convert to a byte array pointer.
+ 	db.dataref = b
+ 	db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
+@@ -67,3 +89,12 @@ func munmap(db *DB) error {
+ 	db.datasz = 0
+ 	return err
+ }
++
++// NOTE: This function is copied from stdlib because it is not available on darwin.
++func madvise(b []byte, advice int) (err error) {
++	_, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
++	if e1 != 0 {
++		err = e1
++	}
++	return
++}
+diff --git a/vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go
+new file mode 100644
+index 0000000..f480ee7
+--- /dev/null
++++ b/vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go
+@@ -0,0 +1,101 @@
++
++package bolt
++
++import (
++	"fmt"
++	"os"
++	"syscall"
++	"time"
++	"unsafe"
++	"golang.org/x/sys/unix"
++)
++
++// flock acquires an advisory lock on a file descriptor.
++func flock(f *os.File, exclusive bool, timeout time.Duration) error {
++	var t time.Time
++	for {
++		// If we're beyond our timeout then return an error.
++		// This can only occur after we've attempted a flock once.
++		if t.IsZero() {
++			t = time.Now()
++		} else if timeout > 0 && time.Since(t) > timeout {
++			return ErrTimeout
++		}
++		var lock syscall.Flock_t
++		lock.Start = 0
++		lock.Len = 0
++		lock.Pid = 0
++		lock.Whence = 0
++		lock.Pid = 0
++		if exclusive {
++			lock.Type = syscall.F_WRLCK
++		} else {
++			lock.Type = syscall.F_RDLCK
++		}
++		err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock)
++		if err == nil {
++			return nil
++		} else if err != syscall.EAGAIN {
++			return err
++		}
++
++		// Wait for a bit and try again.
++		time.Sleep(50 * time.Millisecond)
++	}
++}
++
++// funlock releases an advisory lock on a file descriptor.
++func funlock(f *os.File) error {
++	var lock syscall.Flock_t
++	lock.Start = 0
++	lock.Len = 0
++	lock.Type = syscall.F_UNLCK
++	lock.Whence = 0
++	return syscall.FcntlFlock(uintptr(f.Fd()), syscall.F_SETLK, &lock)
++}
++
++// mmap memory maps a DB's data file.
++func mmap(db *DB, sz int) error {
++	// Truncate and fsync to ensure file size metadata is flushed.
++	// https://github.com/boltdb/bolt/issues/284
++	if !db.NoGrowSync && !db.readOnly {
++		if err := db.file.Truncate(int64(sz)); err != nil {
++			return fmt.Errorf("file resize error: %s", err)
++		}
++		if err := db.file.Sync(); err != nil {
++			return fmt.Errorf("file sync error: %s", err)
++		}
++	}
++
++	// Map the data file to memory.
++	b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED)
++	if err != nil {
++		return err
++	}
++
++	// Advise the kernel that the mmap is accessed randomly.
++	if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
++		return fmt.Errorf("madvise: %s", err)
++	}
++
++	// Save the original byte slice and convert to a byte array pointer.
++	db.dataref = b
++	db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
++	db.datasz = sz
++	return nil
++}
++
++// munmap unmaps a DB's data file from memory.
++func munmap(db *DB) error {
++	// Ignore the unmap if we have no mapped data.
++	if db.dataref == nil {
++		return nil
++	}
++
++	// Unmap using the original byte slice.
++	err := unix.Munmap(db.dataref)
++	db.dataref = nil
++	db.data = nil
++	db.datasz = 0
++	return err
++}
+diff --git a/vendor/src/github.com/boltdb/bolt/bolt_windows.go b/vendor/src/github.com/boltdb/bolt/bolt_windows.go
+index c8539d4..8b782be 100644
+--- a/vendor/src/github.com/boltdb/bolt/bolt_windows.go
++++ b/vendor/src/github.com/boltdb/bolt/bolt_windows.go
+@@ -16,7 +16,7 @@ func fdatasync(db *DB) error {
+ }
+ 
+ // flock acquires an advisory lock on a file descriptor.
+-func flock(f *os.File, _ time.Duration) error {
++func flock(f *os.File, _ bool, _ time.Duration) error {
+ 	return nil
+ }
+ 
+@@ -28,9 +28,11 @@ func funlock(f *os.File) error {
+ // mmap memory maps a DB's data file.
+ // Based on: https://github.com/edsrzf/mmap-go
+ func mmap(db *DB, sz int) error {
+-	// Truncate the database to the size of the mmap.
+-	if err := db.file.Truncate(int64(sz)); err != nil {
+-		return fmt.Errorf("truncate: %s", err)
++	if !db.readOnly {
++		// Truncate the database to the size of the mmap.
++		if err := db.file.Truncate(int64(sz)); err != nil {
++			return fmt.Errorf("truncate: %s", err)
++		}
+ 	}
+ 
+ 	// Open a file mapping handle.
+diff --git a/vendor/src/github.com/boltdb/bolt/bucket.go b/vendor/src/github.com/boltdb/bolt/bucket.go
+index 2630800..2925288 100644
+--- a/vendor/src/github.com/boltdb/bolt/bucket.go
++++ b/vendor/src/github.com/boltdb/bolt/bucket.go
+@@ -99,6 +99,7 @@ func (b *Bucket) Cursor() *Cursor {
+ 
+ // Bucket retrieves a nested bucket by name.
+ // Returns nil if the bucket does not exist.
++// The bucket instance is only valid for the lifetime of the transaction.
+ func (b *Bucket) Bucket(name []byte) *Bucket {
+ 	if b.buckets != nil {
+ 		if child := b.buckets[string(name)]; child != nil {
+@@ -148,6 +149,7 @@ func (b *Bucket) openBucket(value []byte) *Bucket {
+ 
+ // CreateBucket creates a new bucket at the given key and returns the new bucket.
+ // Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
++// The bucket instance is only valid for the lifetime of the transaction.
+ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
+ 	if b.tx.db == nil {
+ 		return nil, ErrTxClosed
+@@ -192,6 +194,7 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
+ 
+ // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
+ // Returns an error if the bucket name is blank, or if the bucket name is too long.
++// The bucket instance is only valid for the lifetime of the transaction.
+ func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
+ 	child, err := b.CreateBucket(key)
+ 	if err == ErrBucketExists {
+@@ -252,6 +255,7 @@ func (b *Bucket) DeleteBucket(key []byte) error {
+ 
+ // Get retrieves the value for a key in the bucket.
+ // Returns a nil value if the key does not exist or if the key is a nested bucket.
++// The returned value is only valid for the life of the transaction.
+ func (b *Bucket) Get(key []byte) []byte {
+ 	k, v, flags := b.Cursor().seek(key)
+ 
+@@ -332,6 +336,12 @@ func (b *Bucket) NextSequence() (uint64, error) {
+ 		return 0, ErrTxNotWritable
+ 	}
+ 
++	// Materialize the root node if it hasn't been already so that the
++	// bucket will be saved during commit.
++	if b.rootNode == nil {
++		_ = b.node(b.root, nil)
++	}
++
+ 	// Increment and return the sequence.
+ 	b.bucket.sequence++
+ 	return b.bucket.sequence, nil
+@@ -339,7 +349,8 @@ func (b *Bucket) NextSequence() (uint64, error) {
+ 
+ // ForEach executes a function for each key/value pair in a bucket.
+ // If the provided function returns an error then the iteration is stopped and
+-// the error is returned to the caller.
++// the error is returned to the caller. The provided function must not modify
++// the bucket; this will result in undefined behavior.
+ func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
+ 	if b.tx.db == nil {
+ 		return ErrTxClosed
+@@ -511,8 +522,12 @@ func (b *Bucket) spill() error {
+ 		// Update parent node.
+ 		var c = b.Cursor()
+ 		k, _, flags := c.seek([]byte(name))
+-		_assert(bytes.Equal([]byte(name), k), "misplaced bucket header: %x -> %x", []byte(name), k)
+-		_assert(flags&bucketLeafFlag != 0, "unexpected bucket header flag: %x", flags)
++		if !bytes.Equal([]byte(name), k) {
++			panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
++		}
++		if flags&bucketLeafFlag == 0 {
++			panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
++		}
+ 		c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
+ 	}
+ 
+@@ -528,7 +543,9 @@ func (b *Bucket) spill() error {
+ 	b.rootNode = b.rootNode.root()
+ 
+ 	// Update the root node for this bucket.
+-	_assert(b.rootNode.pgid < b.tx.meta.pgid, "pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)
++	if b.rootNode.pgid >= b.tx.meta.pgid {
++		panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
++	}
+ 	b.root = b.rootNode.pgid
+ 
+ 	return nil
+@@ -659,7 +676,9 @@ func (b *Bucket) pageNode(id pgid) (*page, *node) {
+ 	// Inline buckets have a fake page embedded in their value so treat them
+ 	// differently. We'll return the rootNode (if available) or the fake page.
+ 	if b.root == 0 {
+-		_assert(id == 0, "inline bucket non-zero page access(2): %d != 0", id)
++		if id != 0 {
++			panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
++		}
+ 		if b.rootNode != nil {
+ 			return nil, b.rootNode
+ 		}
+diff --git a/vendor/src/github.com/boltdb/bolt/cursor.go b/vendor/src/github.com/boltdb/bolt/cursor.go
+index 3bfc2f1..006c548 100644
+--- a/vendor/src/github.com/boltdb/bolt/cursor.go
++++ b/vendor/src/github.com/boltdb/bolt/cursor.go
+@@ -2,6 +2,7 @@ package bolt
+ 
+ import (
+ 	"bytes"
++	"fmt"
+ 	"sort"
+ )
+ 
+@@ -9,6 +10,8 @@ import (
+ // Cursors see nested buckets with value == nil.
+ // Cursors can be obtained from a transaction and are valid as long as the transaction is open.
+ //
++// Keys and values returned from the cursor are only valid for the life of the transaction.
++//
+ // Changing data while traversing with a cursor may cause it to be invalidated
+ // and return unexpected keys and/or values. You must reposition your cursor
+ // after mutating data.
+@@ -24,6 +27,7 @@ func (c *Cursor) Bucket() *Bucket {
+ 
+ // First moves the cursor to the first item in the bucket and returns its key and value.
+ // If the bucket is empty then a nil key and value are returned.
++// The returned key and value are only valid for the life of the transaction.
+ func (c *Cursor) First() (key []byte, value []byte) {
+ 	_assert(c.bucket.tx.db != nil, "tx closed")
+ 	c.stack = c.stack[:0]
+@@ -40,6 +44,7 @@ func (c *Cursor) First() (key []byte, value []byte) {
+ 
+ // Last moves the cursor to the last item in the bucket and returns its key and value.
+ // If the bucket is empty then a nil key and value are returned.
++// The returned key and value are only valid for the life of the transaction.
+ func (c *Cursor) Last() (key []byte, value []byte) {
+ 	_assert(c.bucket.tx.db != nil, "tx closed")
+ 	c.stack = c.stack[:0]
+@@ -57,6 +62,7 @@ func (c *Cursor) Last() (key []byte, value []byte) {
+ 
+ // Next moves the cursor to the next item in the bucket and returns its key and value.
+ // If the cursor is at the end of the bucket then a nil key and value are returned.
++// The returned key and value are only valid for the life of the transaction.
+ func (c *Cursor) Next() (key []byte, value []byte) {
+ 	_assert(c.bucket.tx.db != nil, "tx closed")
+ 	k, v, flags := c.next()
+@@ -68,6 +74,7 @@ func (c *Cursor) Next() (key []byte, value []byte) {
+ 
+ // Prev moves the cursor to the previous item in the bucket and returns its key and value.
+ // If the cursor is at the beginning of the bucket then a nil key and value are returned.
++// The returned key and value are only valid for the life of the transaction.
+ func (c *Cursor) Prev() (key []byte, value []byte) {
+ 	_assert(c.bucket.tx.db != nil, "tx closed")
+ 
+@@ -99,6 +106,7 @@ func (c *Cursor) Prev() (key []byte, value []byte) {
+ // Seek moves the cursor to a given key and returns it.
+ // If the key does not exist then the next key is used. If no keys
+ // follow, a nil key is returned.
++// The returned key and value are only valid for the life of the transaction.
+ func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
+ 	k, v, flags := c.seek(seek)
+ 
+@@ -228,8 +236,8 @@ func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
+ // search recursively performs a binary search against a given page/node until it finds a given key.
+ func (c *Cursor) search(key []byte, pgid pgid) {
+ 	p, n := c.bucket.pageNode(pgid)
+-	if p != nil {
+-		_assert((p.flags&(branchPageFlag|leafPageFlag)) != 0, "invalid page type: %d: %x", p.id, p.flags)
++	if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
++		panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
+ 	}
+ 	e := elemRef{page: p, node: n}
+ 	c.stack = append(c.stack, e)
+diff --git a/vendor/src/github.com/boltdb/bolt/db.go b/vendor/src/github.com/boltdb/bolt/db.go
+index 6c45736..d39c4aa 100644
+--- a/vendor/src/github.com/boltdb/bolt/db.go
++++ b/vendor/src/github.com/boltdb/bolt/db.go
+@@ -12,9 +12,6 @@ import (
+ 	"unsafe"
+ )
+ 
+-// The smallest size that the mmap can be.
+-const minMmapSize = 1 << 22 // 4MB
+-
+ // The largest step that can be taken when remapping the mmap.
+ const maxMmapStep = 1 << 30 // 1GB
+ 
+@@ -30,6 +27,12 @@ const magic uint32 = 0xED0CDAED
+ // must be synchronzied using the msync(2) syscall.
+ const IgnoreNoSync = runtime.GOOS == "openbsd"
+ 
++// Default values if not set in a DB instance.
++const (
++	DefaultMaxBatchSize  int = 1000
++	DefaultMaxBatchDelay     = 10 * time.Millisecond
++)
++
+ // DB represents a collection of buckets persisted to a file on disk.
+ // All data access is performed through transactions which can be obtained through the DB.
+ // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
+@@ -52,9 +55,33 @@ type DB struct {
+ 	// THIS IS UNSAFE. PLEASE USE WITH CAUTION.
+ 	NoSync bool
+ 
++	// When true, skips the truncate call when growing the database.
++	// Setting this to true is only safe on non-ext3/ext4 systems.
++	// Skipping truncation avoids preallocation of hard drive space and
++	// bypasses a truncate() and fsync() syscall on remapping.
++	//
++	// https://github.com/boltdb/bolt/issues/284
++	NoGrowSync bool
++
++	// MaxBatchSize is the maximum size of a batch. Default value is
++	// copied from DefaultMaxBatchSize in Open.
++	//
++	// If <=0, disables batching.
++	//
++	// Do not change concurrently with calls to Batch.
++	MaxBatchSize int
++
++	// MaxBatchDelay is the maximum delay before a batch starts.
++	// Default value is copied from DefaultMaxBatchDelay in Open.
++	//
++	// If <=0, effectively disables batching.
++	//
++	// Do not change concurrently with calls to Batch.
++	MaxBatchDelay time.Duration
++
+ 	path     string
+ 	file     *os.File
+-	dataref  []byte
++	dataref  []byte // mmap'ed readonly, write throws SEGV
+ 	data     *[maxMapSize]byte
+ 	datasz   int
+ 	meta0    *meta
+@@ -66,6 +93,9 @@ type DB struct {
+ 	freelist *freelist
+ 	stats    Stats
+ 
++	batchMu sync.Mutex
++	batch   *batch
++
+ 	rwlock   sync.Mutex   // Allows only one writer at a time.
+ 	metalock sync.Mutex   // Protects meta page access.
+ 	mmaplock sync.RWMutex // Protects mmap access during remapping.
+@@ -74,6 +104,10 @@ type DB struct {
+ 	ops struct {
+ 		writeAt func(b []byte, off int64) (n int, err error)
+ 	}
++
++	// Read only mode.
++	// When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately.
++	readOnly bool
+ }
+ 
+ // Path returns the path to currently open database file.
+@@ -101,20 +135,34 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
+ 	if options == nil {
+ 		options = DefaultOptions
+ 	}
++	db.NoGrowSync = options.NoGrowSync
++
++	// Set default values for later DB operations.
++	db.MaxBatchSize = DefaultMaxBatchSize
++	db.MaxBatchDelay = DefaultMaxBatchDelay
++
++	flag := os.O_RDWR
++	if options.ReadOnly {
++		flag = os.O_RDONLY
++		db.readOnly = true
++	}
+ 
+ 	// Open data file and separate sync handler for metadata writes.
+ 	db.path = path
+-
+ 	var err error
+-	if db.file, err = os.OpenFile(db.path, os.O_RDWR|os.O_CREATE, mode); err != nil {
++	if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil {
+ 		_ = db.close()
+ 		return nil, err
+ 	}
+ 
+-	// Lock file so that other processes using Bolt cannot use the database
+-	// at the same time. This would cause corruption since the two processes
+-	// would write meta pages and free pages separately.
+-	if err := flock(db.file, options.Timeout); err != nil {
++	// Lock file so that other processes using Bolt in read-write mode cannot
++	// use the database  at the same time. This would cause corruption since
++	// the two processes would write meta pages and free pages separately.
++	// The database file is locked exclusively (only one process can grab the lock)
++	// if !options.ReadOnly.
++	// The database file is locked using the shared lock (more than one process may
++	// hold a lock at the same time) otherwise (options.ReadOnly is set).
++	if err := flock(db.file, !db.readOnly, options.Timeout); err != nil {
+ 		_ = db.close()
+ 		return nil, err
+ 	}
+@@ -162,16 +210,6 @@ func (db *DB) mmap(minsz int) error {
+ 	db.mmaplock.Lock()
+ 	defer db.mmaplock.Unlock()
+ 
+-	// Dereference all mmap references before unmapping.
+-	if db.rwtx != nil {
+-		db.rwtx.root.dereference()
+-	}
+-
+-	// Unmap existing data before continuing.
+-	if err := db.munmap(); err != nil {
+-		return err
+-	}
+-
+ 	info, err := db.file.Stat()
+ 	if err != nil {
+ 		return fmt.Errorf("mmap stat error: %s", err)
+@@ -184,7 +222,20 @@ func (db *DB) mmap(minsz int) error {
+ 	if size < minsz {
+ 		size = minsz
+ 	}
+-	size = db.mmapSize(size)
++	size, err = db.mmapSize(size)
++	if err != nil {
++		return err
++	}
++
++	// Dereference all mmap references before unmapping.
++	if db.rwtx != nil {
++		db.rwtx.root.dereference()
++	}
++
++	// Unmap existing data before continuing.
++	if err := db.munmap(); err != nil {
++		return err
++	}
+ 
+ 	// Memory-map the data file as a byte slice.
+ 	if err := mmap(db, size); err != nil {
+@@ -215,22 +266,40 @@ func (db *DB) munmap() error {
+ }
+ 
+ // mmapSize determines the appropriate size for the mmap given the current size
+-// of the database. The minimum size is 4MB and doubles until it reaches 1GB.
+-func (db *DB) mmapSize(size int) int {
+-	if size <= minMmapSize {
+-		return minMmapSize
+-	} else if size < maxMmapStep {
+-		size *= 2
+-	} else {
+-		size += maxMmapStep
++// of the database. The minimum size is 1MB and doubles until it reaches 1GB.
++// Returns an error if the new mmap size is greater than the max allowed.
++func (db *DB) mmapSize(size int) (int, error) {
++	// Double the size from 32KB until 1GB.
++	for i := uint(15); i <= 30; i++ {
++		if size <= 1<<i {
++			return 1 << i, nil
++		}
++	}
++
++	// Verify the requested size is not above the maximum allowed.
++	if size > maxMapSize {
++		return 0, fmt.Errorf("mmap too large")
++	}
++
++	// If larger than 1GB then grow by 1GB at a time.
++	sz := int64(size)
++	if remainder := sz % int64(maxMmapStep); remainder > 0 {
++		sz += int64(maxMmapStep) - remainder
+ 	}
+ 
+ 	// Ensure that the mmap size is a multiple of the page size.
+-	if (size % db.pageSize) != 0 {
+-		size = ((size / db.pageSize) + 1) * db.pageSize
++	// This should always be true since we're incrementing in MBs.
++	pageSize := int64(db.pageSize)
++	if (sz % pageSize) != 0 {
++		sz = ((sz / pageSize) + 1) * pageSize
++	}
++
++	// If we've exceeded the max size then only grow up to the max size.
++	if sz > maxMapSize {
++		sz = maxMapSize
+ 	}
+ 
+-	return size
++	return int(sz), nil
+ }
+ 
+ // init creates a new database file and initializes its meta pages.
+@@ -250,7 +319,6 @@ func (db *DB) init() error {
+ 		m.magic = magic
+ 		m.version = version
+ 		m.pageSize = uint32(db.pageSize)
+-		m.version = version
+ 		m.freelist = 2
+ 		m.root = bucket{root: 3}
+ 		m.pgid = 4
+@@ -283,8 +351,15 @@ func (db *DB) init() error {
+ // Close releases all database resources.
+ // All transactions must be closed before closing the database.
+ func (db *DB) Close() error {
++	db.rwlock.Lock()
++	defer db.rwlock.Unlock()
++
+ 	db.metalock.Lock()
+ 	defer db.metalock.Unlock()
++
++	db.mmaplock.RLock()
++	defer db.mmaplock.RUnlock()
++
+ 	return db.close()
+ }
+ 
+@@ -304,8 +379,11 @@ func (db *DB) close() error {
+ 
+ 	// Close file handles.
+ 	if db.file != nil {
+-		// Unlock the file.
+-		_ = funlock(db.file)
++		// No need to unlock read-only file.
++		if !db.readOnly {
++			// Unlock the file.
++			_ = funlock(db.file)
++		}
+ 
+ 		// Close the file descriptor.
+ 		if err := db.file.Close(); err != nil {
+@@ -323,6 +401,11 @@ func (db *DB) close() error {
+ // will cause the calls to block and be serialized until the current write
+ // transaction finishes.
+ //
++// Transactions should not be depedent on one another. Opening a read
++// transaction and a write transaction in the same goroutine can cause the
++// writer to deadlock because the database periodically needs to re-mmap itself
++// as it grows and it cannot do that while a read transaction is open.
++//
+ // IMPORTANT: You must close read-only transactions after you are finished or
+ // else the database will not reclaim old pages.
+ func (db *DB) Begin(writable bool) (*Tx, error) {
+@@ -371,6 +454,11 @@ func (db *DB) beginTx() (*Tx, error) {
+ }
+ 
+ func (db *DB) beginRWTx() (*Tx, error) {
++	// If the database was opened with Options.ReadOnly, return an error.
++	if db.readOnly {
++		return nil, ErrDatabaseReadOnly
++	}
++
+ 	// Obtain writer lock. This is released by the transaction when it closes.
+ 	// This enforces only one writer transaction at a time.
+ 	db.rwlock.Lock()
+@@ -501,6 +589,12 @@ func (db *DB) View(fn func(*Tx) error) error {
+ 	return nil
+ }
+ 
++// Sync executes fdatasync() against the database file handle.
++//
++// This is not necessary under normal operation, however, if you use NoSync
++// then it allows you to force the database file to sync against the disk.
++func (db *DB) Sync() error { return fdatasync(db) }
++
+ // Stats retrieves ongoing performance stats for the database.
+ // This is only updated when a transaction closes.
+ func (db *DB) Stats() Stats {
+@@ -561,18 +655,30 @@ func (db *DB) allocate(count int) (*page, error) {
+ 	return p, nil
+ }
+ 
++func (db *DB) IsReadOnly() bool {
++	return db.readOnly
++}
++
+ // Options represents the options that can be set when opening a database.
+ type Options struct {
+ 	// Timeout is the amount of time to wait to obtain a file lock.
+ 	// When set to zero it will wait indefinitely. This option is only
+ 	// available on Darwin and Linux.
+ 	Timeout time.Duration
++
++	// Sets the DB.NoGrowSync flag before memory mapping the file.
++	NoGrowSync bool
++
++	// Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
++	// grab a shared lock (UNIX).
++	ReadOnly bool
+ }
+ 
+ // DefaultOptions represent the options used if nil options are passed into Open().
+ // No timeout is used which will cause Bolt to wait indefinitely for a lock.
+ var DefaultOptions = &Options{
+-	Timeout: 0,
++	Timeout:    0,
++	NoGrowSync: false,
+ }
+ 
+ // Stats represents statistics about the database.
+@@ -647,9 +753,11 @@ func (m *meta) copy(dest *meta) {
+ 
+ // write writes the meta onto a page.
+ func (m *meta) write(p *page) {
+-
+-	_assert(m.root.root < m.pgid, "root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)
+-	_assert(m.freelist < m.pgid, "freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)
++	if m.root.root >= m.pgid {
++		panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
++	} else if m.freelist >= m.pgid {
++		panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
++	}
+ 
+ 	// Page id is either going to be 0 or 1 which we can determine by the transaction ID.
+ 	p.id = pgid(m.txid % 2)
+@@ -675,13 +783,8 @@ func _assert(condition bool, msg string, v ...interface{}) {
+ 	}
+ }
+ 
+-func warn(v ...interface{}) {
+-	fmt.Fprintln(os.Stderr, v...)
+-}
+-
+-func warnf(msg string, v ...interface{}) {
+-	fmt.Fprintf(os.Stderr, msg+"\n", v...)
+-}
++func warn(v ...interface{})              { fmt.Fprintln(os.Stderr, v...) }
++func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) }
+ 
+ func printstack() {
+ 	stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n")
+diff --git a/vendor/src/github.com/boltdb/bolt/errors.go b/vendor/src/github.com/boltdb/bolt/errors.go
+index aa504f1..6883786 100644
+--- a/vendor/src/github.com/boltdb/bolt/errors.go
++++ b/vendor/src/github.com/boltdb/bolt/errors.go
+@@ -36,6 +36,10 @@ var (
+ 	// ErrTxClosed is returned when committing or rolling back a transaction
+ 	// that has already been committed or rolled back.
+ 	ErrTxClosed = errors.New("tx closed")
++
++	// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
++	// read-only database.
++	ErrDatabaseReadOnly = errors.New("database is in read-only mode")
+ )
+ 
+ // These errors can occur when putting or deleting a value or a bucket.
+diff --git a/vendor/src/github.com/boltdb/bolt/freelist.go b/vendor/src/github.com/boltdb/bolt/freelist.go
+index 150e3e6..0161948 100644
+--- a/vendor/src/github.com/boltdb/bolt/freelist.go
++++ b/vendor/src/github.com/boltdb/bolt/freelist.go
+@@ -1,6 +1,7 @@
+ package bolt
+ 
+ import (
++	"fmt"
+ 	"sort"
+ 	"unsafe"
+ )
+@@ -47,15 +48,14 @@ func (f *freelist) pending_count() int {
+ 
+ // all returns a list of all free ids and all pending ids in one sorted list.
+ func (f *freelist) all() []pgid {
+-	ids := make([]pgid, len(f.ids))
+-	copy(ids, f.ids)
++	m := make(pgids, 0)
+ 
+ 	for _, list := range f.pending {
+-		ids = append(ids, list...)
++		m = append(m, list...)
+ 	}
+ 
+-	sort.Sort(pgids(ids))
+-	return ids
++	sort.Sort(m)
++	return pgids(f.ids).merge(m)
+ }
+ 
+ // allocate returns the starting page id of a contiguous list of pages of a given size.
+@@ -67,7 +67,9 @@ func (f *freelist) allocate(n int) pgid {
+ 
+ 	var initial, previd pgid
+ 	for i, id := range f.ids {
+-		_assert(id > 1, "invalid page allocation: %d", id)
++		if id <= 1 {
++			panic(fmt.Sprintf("invalid page allocation: %d", id))
++		}
+ 
+ 		// Reset initial page if this is not contiguous.
+ 		if previd == 0 || id-previd != 1 {
+@@ -103,13 +105,17 @@ func (f *freelist) allocate(n int) pgid {
+ // free releases a page and its overflow for a given transaction id.
+ // If the page is already free then a panic will occur.
+ func (f *freelist) free(txid txid, p *page) {
+-	_assert(p.id > 1, "cannot free page 0 or 1: %d", p.id)
++	if p.id <= 1 {
++		panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
++	}
+ 
+ 	// Free page and all its overflow pages.
+ 	var ids = f.pending[txid]
+ 	for id := p.id; id <= p.id+pgid(p.overflow); id++ {
+ 		// Verify that page is not already free.
+-		_assert(!f.cache[id], "page %d already freed", id)
++		if f.cache[id] {
++			panic(fmt.Sprintf("page %d already freed", id))
++		}
+ 
+ 		// Add to the freelist and cache.
+ 		ids = append(ids, id)
+@@ -120,15 +126,17 @@ func (f *freelist) free(txid txid, p *page) {
+ 
+ // release moves all page ids for a transaction id (or older) to the freelist.
+ func (f *freelist) release(txid txid) {
++	m := make(pgids, 0)
+ 	for tid, ids := range f.pending {
+ 		if tid <= txid {
+ 			// Move transaction's pending pages to the available freelist.
+ 			// Don't remove from the cache since the page is still free.
+-			f.ids = append(f.ids, ids...)
++			m = append(m, ids...)
+ 			delete(f.pending, tid)
+ 		}
+ 	}
+-	sort.Sort(pgids(f.ids))
++	sort.Sort(m)
++	f.ids = pgids(f.ids).merge(m)
+ }
+ 
+ // rollback removes the pages from a given pending tx.
+diff --git a/vendor/src/github.com/boltdb/bolt/node.go b/vendor/src/github.com/boltdb/bolt/node.go
+index c204c39..c9fb21c 100644
+--- a/vendor/src/github.com/boltdb/bolt/node.go
++++ b/vendor/src/github.com/boltdb/bolt/node.go
+@@ -2,6 +2,7 @@ package bolt
+ 
+ import (
+ 	"bytes"
++	"fmt"
+ 	"sort"
+ 	"unsafe"
+ )
+@@ -70,7 +71,9 @@ func (n *node) pageElementSize() int {
+ 
+ // childAt returns the child node at a given index.
+ func (n *node) childAt(index int) *node {
+-	_assert(!n.isLeaf, "invalid childAt(%d) on a leaf node", index)
++	if n.isLeaf {
++		panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
++	}
+ 	return n.bucket.node(n.inodes[index].pgid, n)
+ }
+ 
+@@ -111,9 +114,13 @@ func (n *node) prevSibling() *node {
+ 
+ // put inserts a key/value.
+ func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
+-	_assert(pgid < n.bucket.tx.meta.pgid, "pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)
+-	_assert(len(oldKey) > 0, "put: zero-length old key")
+-	_assert(len(newKey) > 0, "put: zero-length new key")
++	if pgid >= n.bucket.tx.meta.pgid {
++		panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
++	} else if len(oldKey) <= 0 {
++		panic("put: zero-length old key")
++	} else if len(newKey) <= 0 {
++		panic("put: zero-length new key")
++	}
+ 
+ 	// Find insertion index.
+ 	index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
+@@ -189,7 +196,9 @@ func (n *node) write(p *page) {
+ 		p.flags |= branchPageFlag
+ 	}
+ 
+-	_assert(len(n.inodes) < 0xFFFF, "inode overflow: %d (pgid=%d)", len(n.inodes), p.id)
++	if len(n.inodes) >= 0xFFFF {
++		panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
++	}
+ 	p.count = uint16(len(n.inodes))
+ 
+ 	// Loop over each item and write it to the page.
+@@ -212,11 +221,20 @@ func (n *node) write(p *page) {
+ 			_assert(elem.pgid != p.id, "write: circular dependency occurred")
+ 		}
+ 
++		// If the length of key+value is larger than the max allocation size
++		// then we need to reallocate the byte array pointer.
++		//
++		// See: https://github.com/boltdb/bolt/pull/335
++		klen, vlen := len(item.key), len(item.value)
++		if len(b) < klen+vlen {
++			b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
++		}
++
+ 		// Write data for the element to the end of the page.
+ 		copy(b[0:], item.key)
+-		b = b[len(item.key):]
++		b = b[klen:]
+ 		copy(b[0:], item.value)
+-		b = b[len(item.value):]
++		b = b[vlen:]
+ 	}
+ 
+ 	// DEBUG ONLY: n.dump()
+@@ -348,7 +366,9 @@ func (n *node) spill() error {
+ 		}
+ 
+ 		// Write the node.
+-		_assert(p.id < tx.meta.pgid, "pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)
++		if p.id >= tx.meta.pgid {
++			panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
++		}
+ 		node.pgid = p.id
+ 		node.write(p)
+ 		node.spilled = true
+diff --git a/vendor/src/github.com/boltdb/bolt/page.go b/vendor/src/github.com/boltdb/bolt/page.go
+index b3dc473..818aa1b 100644
+--- a/vendor/src/github.com/boltdb/bolt/page.go
++++ b/vendor/src/github.com/boltdb/bolt/page.go
+@@ -3,12 +3,12 @@ package bolt
+ import (
+ 	"fmt"
+ 	"os"
++	"sort"
+ 	"unsafe"
+ )
+ 
+ const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
+ 
+-const maxAllocSize = 0xFFFFFFF
+ const minKeysPerPage = 2
+ 
+ const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
+@@ -97,7 +97,7 @@ type branchPageElement struct {
+ // key returns a byte slice of the node key.
+ func (n *branchPageElement) key() []byte {
+ 	buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
+-	return buf[n.pos : n.pos+n.ksize]
++	return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
+ }
+ 
+ // leafPageElement represents a node on a leaf page.
+@@ -111,13 +111,13 @@ type leafPageElement struct {
+ // key returns a byte slice of the node key.
+ func (n *leafPageElement) key() []byte {
+ 	buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
+-	return buf[n.pos : n.pos+n.ksize]
++	return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
+ }
+ 
+ // value returns a byte slice of the node value.
+ func (n *leafPageElement) value() []byte {
+ 	buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
+-	return buf[n.pos+n.ksize : n.pos+n.ksize+n.vsize]
++	return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize]
+ }
+ 
+ // PageInfo represents human readable information about a page.
+@@ -133,3 +133,40 @@ type pgids []pgid
+ func (s pgids) Len() int           { return len(s) }
+ func (s pgids) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+ func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
++
++// merge returns the sorted union of a and b.
++func (a pgids) merge(b pgids) pgids {
++	// Return the opposite slice if one is nil.
++	if len(a) == 0 {
++		return b
++	} else if len(b) == 0 {
++		return a
++	}
++
++	// Create a list to hold all elements from both lists.
++	merged := make(pgids, 0, len(a)+len(b))
++
++	// Assign lead to the slice with a lower starting value, follow to the higher value.
++	lead, follow := a, b
++	if b[0] < a[0] {
++		lead, follow = b, a
++	}
++
++	// Continue while there are elements in the lead.
++	for len(lead) > 0 {
++		// Merge largest prefix of lead that is ahead of follow[0].
++		n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
++		merged = append(merged, lead[:n]...)
++		if n >= len(lead) {
++			break
++		}
++
++		// Swap lead and follow.
++		lead, follow = follow, lead[n:]
++	}
++
++	// Append what's left in follow.
++	merged = append(merged, follow...)
++
++	return merged
++}
+diff --git a/vendor/src/github.com/boltdb/bolt/tx.go b/vendor/src/github.com/boltdb/bolt/tx.go
+index c041d73..fe6c287 100644
+--- a/vendor/src/github.com/boltdb/bolt/tx.go
++++ b/vendor/src/github.com/boltdb/bolt/tx.go
+@@ -87,18 +87,21 @@ func (tx *Tx) Stats() TxStats {
+ 
+ // Bucket retrieves a bucket by name.
+ // Returns nil if the bucket does not exist.
++// The bucket instance is only valid for the lifetime of the transaction.
+ func (tx *Tx) Bucket(name []byte) *Bucket {
+ 	return tx.root.Bucket(name)
+ }
+ 
+ // CreateBucket creates a new bucket.
+ // Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
++// The bucket instance is only valid for the lifetime of the transaction.
+ func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
+ 	return tx.root.CreateBucket(name)
+ }
+ 
+ // CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
+ // Returns an error if the bucket name is blank, or if the bucket name is too long.
++// The bucket instance is only valid for the lifetime of the transaction.
+ func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
+ 	return tx.root.CreateBucketIfNotExists(name)
+ }
+@@ -127,7 +130,8 @@ func (tx *Tx) OnCommit(fn func()) {
+ }
+ 
+ // Commit writes all changes to disk and updates the meta page.
+-// Returns an error if a disk write error occurs.
++// Returns an error if a disk write error occurs, or if Commit is
++// called on a read-only transaction.
+ func (tx *Tx) Commit() error {
+ 	_assert(!tx.managed, "managed tx commit not allowed")
+ 	if tx.db == nil {
+@@ -203,7 +207,8 @@ func (tx *Tx) Commit() error {
+ 	return nil
+ }
+ 
+-// Rollback closes the transaction and ignores all previous updates.
++// Rollback closes the transaction and ignores all previous updates. Read-only
++// transactions must be rolled back and not committed.
+ func (tx *Tx) Rollback() error {
+ 	_assert(!tx.managed, "managed tx rollback not allowed")
+ 	if tx.db == nil {
+@@ -234,7 +239,8 @@ func (tx *Tx) close() {
+ 		var freelistPendingN = tx.db.freelist.pending_count()
+ 		var freelistAlloc = tx.db.freelist.size()
+ 
+-		// Remove writer lock.
++		// Remove transaction ref & writer lock.
++		tx.db.rwtx = nil
+ 		tx.db.rwlock.Unlock()
+ 
+ 		// Merge statistics.
+@@ -248,41 +254,51 @@ func (tx *Tx) close() {
+ 	} else {
+ 		tx.db.removeTx(tx)
+ 	}
++
++	// Clear all references.
+ 	tx.db = nil
++	tx.meta = nil
++	tx.root = Bucket{tx: tx}
++	tx.pages = nil
+ }
+ 
+ // Copy writes the entire database to a writer.
+-// A reader transaction is maintained during the copy so it is safe to continue
+-// using the database while a copy is in progress.
+-// Copy will write exactly tx.Size() bytes into the writer.
++// This function exists for backwards compatibility. Use WriteTo() in
+ func (tx *Tx) Copy(w io.Writer) error {
+-	var f *os.File
+-	var err error
++	_, err := tx.WriteTo(w)
++	return err
++}
+ 
++// WriteTo writes the entire database to a writer.
++// If err == nil then exactly tx.Size() bytes will be written into the writer.
++func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
+ 	// Attempt to open reader directly.
++	var f *os.File
+ 	if f, err = os.OpenFile(tx.db.path, os.O_RDONLY|odirect, 0); err != nil {
+ 		// Fallback to a regular open if that doesn't work.
+ 		if f, err = os.OpenFile(tx.db.path, os.O_RDONLY, 0); err != nil {
+-			return err
++			return 0, err
+ 		}
+ 	}
+ 
+ 	// Copy the meta pages.
+ 	tx.db.metalock.Lock()
+-	_, err = io.CopyN(w, f, int64(tx.db.pageSize*2))
++	n, err = io.CopyN(w, f, int64(tx.db.pageSize*2))
+ 	tx.db.metalock.Unlock()
+ 	if err != nil {
+ 		_ = f.Close()
+-		return fmt.Errorf("meta copy: %s", err)
++		return n, fmt.Errorf("meta copy: %s", err)
+ 	}
+ 
+ 	// Copy data pages.
+-	if _, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)); err != nil {
++	wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
++	n += wn
++	if err != nil {
+ 		_ = f.Close()
+-		return err
++		return n, err
+ 	}
+ 
+-	return f.Close()
++	return n, f.Close()
+ }
+ 
+ // CopyFile copies the entire database to file at the given path.
+@@ -416,15 +432,39 @@ func (tx *Tx) write() error {
+ 	// Write pages to disk in order.
+ 	for _, p := range pages {
+ 		size := (int(p.overflow) + 1) * tx.db.pageSize
+-		buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:size]
+ 		offset := int64(p.id) * int64(tx.db.pageSize)
+-		if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
+-			return err
+-		}
+ 
+-		// Update statistics.
+-		tx.stats.Write++
++		// Write out page in "max allocation" sized chunks.
++		ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
++		for {
++			// Limit our write to our max allocation size.
++			sz := size
++			if sz > maxAllocSize-1 {
++				sz = maxAllocSize - 1
++			}
++
++			// Write chunk to disk.
++			buf := ptr[:sz]
++			if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
++				return err
++			}
++
++			// Update statistics.
++			tx.stats.Write++
++
++			// Exit inner for loop if we've written all the chunks.
++			size -= sz
++			if size == 0 {
++				break
++			}
++
++			// Otherwise move offset forward and move pointer to next chunk.
++			offset += int64(sz)
++			ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
++		}
+ 	}
++
++	// Ignore file sync if flag is set on DB.
+ 	if !tx.db.NoSync || IgnoreNoSync {
+ 		if err := fdatasync(tx.db); err != nil {
+ 			return err
+-- 
+1.9.1
+
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/files/disable_sha1sum_startup.patch b/import-layers/meta-virtualization/recipes-containers/docker/files/disable_sha1sum_startup.patch
new file mode 100644
index 0000000..d37d7a0
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker/files/disable_sha1sum_startup.patch
@@ -0,0 +1,56 @@
+From 12fd6388a033ab5ec9b3a7b144c4976031e6aa52 Mon Sep 17 00:00:00 2001
+From: Bogdan Purcareata <bogdan.purcareata@freescale.com>
+Date: Fri, 20 Nov 2015 10:02:09 +0000
+Subject: [PATCH] disable sha1sum startup
+
+Signed-off-by: Bogdan Purcareata <bogdan.purcareata@freescale.com>
+---
+ utils/utils.go | 18 +-----------------
+ 1 file changed, 1 insertion(+), 17 deletions(-)
+
+diff --git a/utils/utils.go b/utils/utils.go
+index a17ab9d..3fc514a 100644
+--- a/utils/utils.go
++++ b/utils/utils.go
+@@ -2,8 +2,6 @@ package utils
+ 
+ import (
+ 	"bufio"
+-	"crypto/sha1"
+-	"encoding/hex"
+ 	"fmt"
+ 	"io"
+ 	"io/ioutil"
+@@ -42,20 +40,6 @@ func SelfPath() string {
+ 	return path
+ }
+ 
+-func dockerInitSha1(target string) string {
+-	f, err := os.Open(target)
+-	if err != nil {
+-		return ""
+-	}
+-	defer f.Close()
+-	h := sha1.New()
+-	_, err = io.Copy(h, f)
+-	if err != nil {
+-		return ""
+-	}
+-	return hex.EncodeToString(h.Sum(nil))
+-}
+-
+ func isValidDockerInitPath(target string, selfPath string) bool { // target and selfPath should be absolute (InitPath and SelfPath already do this)
+ 	if target == "" {
+ 		return false
+@@ -77,7 +61,7 @@ func isValidDockerInitPath(target string, selfPath string) bool { // target and
+ 		}
+ 		return os.SameFile(targetFileInfo, selfPathFileInfo)
+ 	}
+-	return dockerversion.INITSHA1 != "" && dockerInitSha1(target) == dockerversion.INITSHA1
++	return true
+ }
+ 
+ // DockerInitPath figures out the path of our dockerinit (which may be SelfPath())
+-- 
+1.9.1
+
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/files/docker.init b/import-layers/meta-virtualization/recipes-containers/docker/files/docker.init
new file mode 100644
index 0000000..9c01c75
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker/files/docker.init
@@ -0,0 +1,126 @@
+#!/bin/sh
+#
+#       /etc/rc.d/init.d/docker
+#
+#       Daemon for docker.com
+#
+# chkconfig:   2345 95 95
+# description: Daemon for docker.com
+
+### BEGIN INIT INFO
+# Provides:       docker
+# Required-Start: $network cgconfig
+# Required-Stop:
+# Should-Start:
+# Should-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop:  0 1 6
+# Short-Description: start and stop docker
+# Description: Daemon for docker.com
+### END INIT INFO
+
+# Source function library.
+. /etc/init.d/functions
+
+prog="docker"
+unshare=/usr/bin/unshare
+exec="/usr/bin/$prog"
+pidfile="/var/run/$prog.pid"
+lockfile="/var/lock/subsys/$prog"
+logfile="/var/log/$prog"
+
+[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
+
+start() {
+    [ -x $exec ] || exit 5
+
+    check_for_cleanup
+
+    if ! [ -f $pidfile ]; then
+        printf "Starting $prog:\t"
+        echo "\n$(date)\n" >> $logfile
+        "$unshare" -m -- $exec -d $other_args &>> $logfile &
+        pid=$!
+        touch $lockfile
+        # wait up to 10 seconds for the pidfile to exist.  see
+        # https://github.com/docker/docker/issues/5359
+        tries=0
+        while [ ! -f $pidfile -a $tries -lt 10 ]; do
+            sleep 1
+            tries=$((tries + 1))
+        done
+        success
+        echo
+    else
+        failure
+        echo
+        printf "$pidfile still exists...\n"
+        exit 7
+    fi
+}
+
+stop() {
+    echo -n $"Stopping $prog: "
+    killproc $prog
+    retval=$?
+    echo
+    [ $retval -eq 0 ] && rm -f $lockfile
+    return $retval
+}
+
+restart() {
+    stop
+    start
+}
+
+reload() {
+    restart
+}
+
+force_reload() {
+    restart
+}
+
+rh_status() {
+    status -p $pidfile $prog
+}
+
+rh_status_q() {
+    rh_status >/dev/null 2>&1
+}
+
+
+check_for_cleanup() {
+    if [ -f ${pidfile} ]; then
+        /bin/ps -fp $(cat ${pidfile}) > /dev/null || rm ${pidfile}
+    fi
+}
+
+case "$1" in
+    start)
+        $1
+        ;;
+    stop)
+        $1
+        ;;
+    restart)
+        $1
+        ;;
+    reload)
+        $1
+        ;;
+    force-reload)
+        force_reload
+        ;;
+    status)
+        status
+        ;;
+    condrestart|try-restart)
+        restart
+        ;;
+    *)
+        echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
+        exit 2
+esac
+
+exit $?
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/files/docker.service b/import-layers/meta-virtualization/recipes-containers/docker/files/docker.service
new file mode 100644
index 0000000..6801031
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker/files/docker.service
@@ -0,0 +1,15 @@
+[Unit]
+Description=Docker Application Container Engine
+Documentation=http://docs.docker.com
+After=network.target docker.socket
+Requires=docker.socket
+
+[Service]
+ExecStart=/usr/bin/docker -d -H fd:// --registry-mirror=http://localhost:5000 --insecure-registry=http://localhost:5000
+MountFlags=slave
+LimitNOFILE=1048576
+LimitNPROC=1048576
+LimitCORE=infinity
+
+[Install]
+WantedBy=multi-user.target
diff --git a/import-layers/meta-virtualization/recipes-containers/docker/files/hi.Dockerfile b/import-layers/meta-virtualization/recipes-containers/docker/files/hi.Dockerfile
new file mode 100644
index 0000000..9af6805
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/docker/files/hi.Dockerfile
@@ -0,0 +1,7 @@
+FROM debian
+
+MAINTAINER amy.fong@windriver.com
+
+RUN apt-get update && apt-get install figlet
+
+ENTRYPOINT [ "/usr/bin/figlet", "hi" ]
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/Generate-lxc-restore-net-properly.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/Generate-lxc-restore-net-properly.patch
new file mode 100644
index 0000000..5adb730
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/Generate-lxc-restore-net-properly.patch
@@ -0,0 +1,121 @@
+From e08f3573b3561f1f0490624f7ca95b7ccd8157cb Mon Sep 17 00:00:00 2001
+Message-Id: <e08f3573b3561f1f0490624f7ca95b7ccd8157cb.1435177418.git.Jim.Somerville@windriver.com>
+From: Jim Somerville <Jim.Somerville@windriver.com>
+Date: Wed, 24 Jun 2015 16:16:38 -0400
+Subject: [PATCH 1/1] Generate lxc-restore-net properly
+
+It's a script that should be run through the configure
+mechanism the same as the others.  We simply rename it
+to have a .in extension and add it to configure.ac .
+
+Also, by generating the script from a .in file, it gets
+placed into the build directory.  This plays nice with
+build systems that keep the src separate from the build
+directory.  Without this change, the install step won't
+find the lxc-restore-net script as it still just resides
+in the src directory and not in the build directory.
+
+Upstream-Status: Not applicable.  This script has already
+been rearchitected out of existence by
+cba98d127bf490b018a016b792ae05fd2d29c5ee:
+"c/r: use criu option instead of lxc-restore-net
+
+As of criu 1.5, the --veth-pair argument supports an additional parameter that
+is the bridge name to attach to. This enables us to get rid of the goofy
+action-script hack that passed bridge names as environment variables.
+
+This patch is on top of the systemd/lxcfs mount rework patch, as we probably
+want to wait to use 1.5 options until it has been out for a while and is in
+distros.
+
+Signed-off-by: Tycho Andersen <tycho.andersen@canonical.com>
+Acked-by: Serge E. Hallyn <serge.hallyn@ubuntu.com>"
+
+Signed-off-by: Jim Somerville <Jim.Somerville@windriver.com>
+---
+ configure.ac               |  1 +
+ src/lxc/lxc-restore-net    | 26 --------------------------
+ src/lxc/lxc-restore-net.in | 26 ++++++++++++++++++++++++++
+ 3 files changed, 27 insertions(+), 26 deletions(-)
+ delete mode 100755 src/lxc/lxc-restore-net
+ create mode 100755 src/lxc/lxc-restore-net.in
+
+diff --git a/configure.ac b/configure.ac
+index 574b2cd..4972803 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -768,6 +768,7 @@ AC_CONFIG_FILES([
+ 	src/lxc/legacy/lxc-ls
+ 	src/lxc/lxc.functions
+ 	src/lxc/version.h
++	src/lxc/lxc-restore-net
+ 	src/python-lxc/Makefile
+ 	src/python-lxc/setup.py
+ 
+diff --git a/src/lxc/lxc-restore-net b/src/lxc/lxc-restore-net
+deleted file mode 100755
+index 6ae3c19..0000000
+--- a/src/lxc/lxc-restore-net
++++ /dev/null
+@@ -1,26 +0,0 @@
+-#!/bin/sh
+-
+-set -e
+-
+-i=0
+-while true; do
+-	eval "bridge=\$LXC_CRIU_BRIDGE$i"
+-	eval "veth=\$LXC_CRIU_VETH$i"
+-
+-	if [ -z "$bridge" ] || [ -z "$veth" ]; then
+-		exit 0
+-	fi
+-
+-	if [ "$CRTOOLS_SCRIPT_ACTION" = "network-lock" ]; then
+-		brctl delif $bridge $veth
+-	fi
+-
+-	if [ "$CRTOOLS_SCRIPT_ACTION" = "network-unlock" ]; then
+-		brctl addif $bridge $veth
+-		ip link set dev $veth up
+-	fi
+-
+-	i=$((i+1))
+-done
+-
+-exit 1
+diff --git a/src/lxc/lxc-restore-net.in b/src/lxc/lxc-restore-net.in
+new file mode 100755
+index 0000000..6ae3c19
+--- /dev/null
++++ b/src/lxc/lxc-restore-net.in
+@@ -0,0 +1,26 @@
++#!/bin/sh
++
++set -e
++
++i=0
++while true; do
++	eval "bridge=\$LXC_CRIU_BRIDGE$i"
++	eval "veth=\$LXC_CRIU_VETH$i"
++
++	if [ -z "$bridge" ] || [ -z "$veth" ]; then
++		exit 0
++	fi
++
++	if [ "$CRTOOLS_SCRIPT_ACTION" = "network-lock" ]; then
++		brctl delif $bridge $veth
++	fi
++
++	if [ "$CRTOOLS_SCRIPT_ACTION" = "network-unlock" ]; then
++		brctl addif $bridge $veth
++		ip link set dev $veth up
++	fi
++
++	i=$((i+1))
++done
++
++exit 1
+-- 
+1.8.3.2
+
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/automake-ensure-VPATH-builds-correctly.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/automake-ensure-VPATH-builds-correctly.patch
new file mode 100644
index 0000000..2b5c853
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/automake-ensure-VPATH-builds-correctly.patch
@@ -0,0 +1,26 @@
+From fe23085d9a40d6d78387d9ce8ddb65785fe8d6e5 Mon Sep 17 00:00:00 2001
+From: Bogdan Purcareata <bogdan.purcareata@freescale.com>
+Date: Thu, 2 Oct 2014 18:31:50 -0400
+Subject: [PATCH] automake: ensure VPATH builds correctly
+
+Signed-off-by: Bogdan Purcareata <bogdan.purcareata@freescale.com>
+---
+ src/tests/Makefile.am |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/tests/Makefile.am b/src/tests/Makefile.am
+index d74c10d..6225f78 100644
+--- a/src/tests/Makefile.am
++++ b/src/tests/Makefile.am
+@@ -66,7 +66,7 @@ buildtest-TESTS: $(TESTS)
+ install-ptest:
+ 	install -d $(TEST_DIR)
+ 	install -D ../lxc/liblxc.so $(TEST_DIR)/../lxc/liblxc.so
+-	install -D ../../config/test-driver $(TEST_DIR)/../../config/test-driver
++	install -D $(top_srcdir)/config/test-driver $(TEST_DIR)/../../config/test-driver
+ 	cp Makefile $(TEST_DIR)
+ 	@(for file in $(TESTS); do install $$file $(TEST_DIR);  done;)
+ 	sed -i 's|^Makefile:|_Makefile:|' $(TEST_DIR)/Makefile
+-- 
+1.7.10.4
+
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/logs-optionally-use-base-filenames-to-report-src-fil.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/logs-optionally-use-base-filenames-to-report-src-fil.patch
new file mode 100644
index 0000000..583b6f1
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/logs-optionally-use-base-filenames-to-report-src-fil.patch
@@ -0,0 +1,70 @@
+From 4729d0f4c4d1dacd150ddfd7061dda875eb94e34 Mon Sep 17 00:00:00 2001
+Message-Id: <4729d0f4c4d1dacd150ddfd7061dda875eb94e34.1443216870.git.Jim.Somerville@windriver.com>
+From: Jim Somerville <Jim.Somerville@windriver.com>
+Date: Fri, 25 Sep 2015 15:08:17 -0400
+Subject: [PATCH 1/1] logs: optionally use base filenames to report src files
+
+Problem:  Logs are nice in that they report the source file,
+routine, and line number where an issue occurs.  But the
+file is printed as the absolute filename.  Users do not
+need to see a long spew of path directory names where the package
+was built.  It just confuses things.
+
+Solution:  Optionally chop off all leading directories so that just
+the source filename ie. basename is printed.  This is done by
+setting a #ifdef LXC_LOG_USE_BASENAME check in the code.  That
+define is done via the optional --enable-log-src-basename provided
+at configure time.
+
+Using __BASE_FILE__ instead of __FILE__ did not work.  It
+refers to the file name as presented to the compile
+machinery, and that may still be the absolute pathname to
+the file.
+
+Signed-off-by: Jim Somerville <Jim.Somerville@windriver.com>
+---
+ configure.ac  | 9 +++++++++
+ src/lxc/log.h | 5 +++++
+ 2 files changed, 14 insertions(+)
+
+diff --git a/configure.ac b/configure.ac
+index deba90b..c1ed67b 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -357,6 +357,15 @@ AM_COND_IF([ENABLE_PYTHON],
+ 	PKG_CHECK_MODULES([PYTHONDEV], [python3 >= 3.2],[],[AC_MSG_ERROR([You must install python3-dev])])
+ 	AC_DEFINE_UNQUOTED([ENABLE_PYTHON], 1, [Python3 is available])])
+ 
++# Enable basenames in the logs for source files
++AC_ARG_ENABLE([log-src-basename],
++	[AC_HELP_STRING([--enable-log-src-basename], [Use the shorter source file basename in the logs [default=no]])],
++	[], [enable_log_src_basename=no])
++
++if test "x$enable_log_src_basename" = "xyes"; then
++	AC_DEFINE([LXC_LOG_USE_BASENAME], 1, [Enabling shorter src filenames in the logs])
++fi
++
+ # Enable dumping stack traces
+ AC_ARG_ENABLE([mutex-debugging],
+ 	[AC_HELP_STRING([--enable-mutex-debugging], [Makes mutexes to report error and provide stack trace [default=no]])],
+diff --git a/src/lxc/log.h b/src/lxc/log.h
+index 76bd4df..4365977 100644
+--- a/src/lxc/log.h
++++ b/src/lxc/log.h
+@@ -74,8 +74,13 @@ struct lxc_log_locinfo {
+ 	int		line;
+ };
+ 
++#ifdef LXC_LOG_USE_BASENAME
++#define LXC_LOG_LOCINFO_INIT						\
++	{ .file = (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__), .func = __func__, .line = __LINE__	}
++#else
+ #define LXC_LOG_LOCINFO_INIT						\
+ 	{ .file = __FILE__, .func = __func__, .line = __LINE__	}
++#endif
+ 
+ /* brief logging event object */
+ struct lxc_log_event {
+-- 
+1.8.3.2
+
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/lxc-1.0.0-disable-udhcp-from-busybox-template.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/lxc-1.0.0-disable-udhcp-from-busybox-template.patch
new file mode 100644
index 0000000..723be27
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/lxc-1.0.0-disable-udhcp-from-busybox-template.patch
@@ -0,0 +1,24 @@
+From d7e07e7acb1cbad33806f49143a2a30b4468c369 Mon Sep 17 00:00:00 2001
+From: Bogdan Purcareata <bogdan.purcareata@freescale.com>
+Date: Mon, 8 Apr 2013 18:30:19 +0300
+Subject: [PATCH] lxc-0.9.0-disable-udhcp-from-busybox-template
+
+---
+ templates/lxc-busybox.in | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/templates/lxc-busybox.in b/templates/lxc-busybox.in
+index cb425ec..bb8c951 100644
+--- a/templates/lxc-busybox.in
++++ b/templates/lxc-busybox.in
+@@ -84,7 +84,6 @@ EOF
+ #!/bin/sh
+ /bin/syslogd
+ /bin/mount -a
+-/bin/udhcpc
+ EOF
+ 
+     # executable
+-- 
+1.7.11.7
+
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/lxc-doc-upgrade-to-use-docbook-3.1-DTD.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/lxc-doc-upgrade-to-use-docbook-3.1-DTD.patch
new file mode 100644
index 0000000..5f9d771
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/lxc-doc-upgrade-to-use-docbook-3.1-DTD.patch
@@ -0,0 +1,32 @@
+From 4cf207ffd64c6f815e62ecbbf25b5a378e707182 Mon Sep 17 00:00:00 2001
+Message-Id: <4cf207ffd64c6f815e62ecbbf25b5a378e707182.1439319694.git.Jim.Somerville@windriver.com>
+From: Jim Somerville <Jim.Somerville@windriver.com>
+Date: Tue, 11 Aug 2015 14:05:00 -0400
+Subject: [PATCH 1/1] lxc: doc: upgrade to use docbook 3.1 DTD
+
+docbook2man fails to build the man pages in poky
+due to missing the ancient Davenport 3.0 DTD.
+Poky meta has the Oasis 3.1 version so upgrade
+to use that instead.
+
+Signed-off-by: Jim Somerville <Jim.Somerville@windriver.com>
+---
+ configure.ac | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/configure.ac b/configure.ac
+index 4972803..2e67b5e 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -179,7 +179,7 @@ AM_CONDITIONAL([ENABLE_DOCBOOK], [test "x$db2xman" != "x"])
+ AM_CONDITIONAL([USE_DOCBOOK2X], [test "x$db2xman" != "xdocbook2man"])
+ 
+ if test "x$db2xman" = "xdocbook2man"; then
+-	docdtd="\"-//Davenport//DTD DocBook V3.0//EN\""
++	docdtd="\"-//OASIS//DTD DocBook V3.1//EN\""
+ else
+ 	docdtd="\"-//OASIS//DTD DocBook XML\" \"http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd\""
+ fi
+-- 
+1.8.3.2
+
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/lxc-fix-B-S.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/lxc-fix-B-S.patch
new file mode 100644
index 0000000..a776b4f
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/lxc-fix-B-S.patch
@@ -0,0 +1,16 @@
+Index: lxc-2.0.0/config/init/upstart/Makefile.am
+===================================================================
+--- lxc-2.0.0.orig/config/init/upstart/Makefile.am
++++ lxc-2.0.0/config/init/upstart/Makefile.am
+@@ -3,9 +3,9 @@
+ if INIT_SCRIPT_UPSTART
+ install-upstart: lxc.conf lxc-instance.conf lxc-net.conf
+ 	$(MKDIR_P) $(DESTDIR)$(sysconfdir)/init/
+-	$(INSTALL_DATA) lxc.conf $(DESTDIR)$(sysconfdir)/init/
++	$(INSTALL_DATA) $(srcdir)/lxc.conf $(DESTDIR)$(sysconfdir)/init/
+ 	$(INSTALL_DATA) $(srcdir)/lxc-instance.conf $(DESTDIR)$(sysconfdir)/init/
+-	$(INSTALL_DATA) lxc-net.conf $(DESTDIR)$(sysconfdir)/init/
++	$(INSTALL_DATA) $(srcdir)/lxc-net.conf $(DESTDIR)$(sysconfdir)/init/
+ 
+ uninstall-upstart:
+ 	rm -f $(DESTDIR)$(sysconfdir)/init/lxc.conf
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/run-ptest b/import-layers/meta-virtualization/recipes-containers/lxc/files/run-ptest
new file mode 100644
index 0000000..23a6256
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/run-ptest
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+make -C src/tests -k check-TESTS
+
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/files/runtest.patch b/import-layers/meta-virtualization/recipes-containers/lxc/files/runtest.patch
new file mode 100644
index 0000000..e4e034b
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/files/runtest.patch
@@ -0,0 +1,32 @@
+Add install-ptest rule.
+
+Signed-off-by: Mihaela Sendrea <mihaela.sendrea@enea.com>
+Upstream-status: Pending
+
+diff -uNr a/src/tests/Makefile.am b/src/tests/Makefile.am
+--- a/src/tests/Makefile.am	2014-04-07 16:25:59.246238815 +0300
++++ b/src/tests/Makefile.am	2014-04-10 18:09:43.195772467 +0300
+@@ -54,6 +54,23 @@
+ 
+ endif
+ 
++TESTS = lxc-test-containertests lxc-test-locktests \
++        lxc-test-getkeys lxc-test-lxcpath lxc-test-cgpath lxc-test-console \
++        lxc-test-snapshot lxc-test-concurrent lxc-test-may-control \
++        lxc-test-reboot lxc-test-list lxc-test-attach lxc-test-device-add-remove
++
++buildtest-TESTS: $(TESTS)
++
++install-ptest:
++	install -d $(TEST_DIR)
++	install -D ../lxc/liblxc.so $(TEST_DIR)/../lxc/liblxc.so
++	install -D ../../config/test-driver $(TEST_DIR)/../../config/test-driver
++	cp Makefile $(TEST_DIR)
++	@(for file in $(TESTS); do install $$file $(TEST_DIR);  done;)
++	sed -i 's|^Makefile:|_Makefile:|' $(TEST_DIR)/Makefile
++	sed -i 's|^all-am:|_all-am:|' $(TEST_DIR)/Makefile
++	sed -i -e 's|^\(.*\.log:\) \(.*EXEEXT.*\)|\1|g' $(TEST_DIR)/Makefile
++
+ EXTRA_DIST = \
+ 	cgpath.c \
+ 	clonetest.c \
diff --git a/import-layers/meta-virtualization/recipes-containers/lxc/lxc_2.0.0.bb b/import-layers/meta-virtualization/recipes-containers/lxc/lxc_2.0.0.bb
new file mode 100644
index 0000000..34aab38
--- /dev/null
+++ b/import-layers/meta-virtualization/recipes-containers/lxc/lxc_2.0.0.bb
@@ -0,0 +1,168 @@
+DESCRIPTION = "lxc aims to use these new functionnalities to provide an userspace container object"
+SECTION = "console/utils"
+LICENSE = "GPLv2"
+LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c"
+PRIORITY = "optional"
+DEPENDS = "libxml2 libcap"
+RDEPENDS_${PN} = " \
+		rsync \
+		gzip \
+		libcap-bin \
+		bridge-utils \
+		dnsmasq \
+		perl-module-strict \
+		perl-module-getopt-long \
+		perl-module-vars \
+		perl-module-warnings-register \
+		perl-module-exporter \
+		perl-module-constant \
+		perl-module-overload \
+		perl-module-exporter-heavy \
+"
+RDEPENDS_${PN}-ptest += "file make"
+
+SRC_URI = "http://linuxcontainers.org/downloads/${BPN}-${PV}.tar.gz \
+	file://lxc-1.0.0-disable-udhcp-from-busybox-template.patch \
+	file://runtest.patch \
+	file://run-ptest \
+	file://automake-ensure-VPATH-builds-correctly.patch \
+	file://lxc-fix-B-S.patch \
+	file://lxc-doc-upgrade-to-use-docbook-3.1-DTD.patch \
+	file://logs-optionally-use-base-filenames-to-report-src-fil.patch \
+	"
+
+SRC_URI[md5sum] = "04a7245a614cd3296b0ae9ceeeb83fbb"
+SRC_URI[sha256sum] = "5b737e114d8ef1feb193fba936d77a5697a7c8a10199a068cdd90d1bd27c10e4"
+
+S = "${WORKDIR}/${BPN}-${PV}"
+
+# Let's not configure for the host distro.
+#
+PTEST_CONF = "${@base_contains('DISTRO_FEATURES', 'ptest', '--enable-tests', '', d)}"
+EXTRA_OECONF += "--with-distro=${DISTRO} ${PTEST_CONF}"
+
+EXTRA_OECONF += "--with-init-script=\
+${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'sysvinit,', '', d)}\
+${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'systemd', '', d)}"
+
+EXTRA_OECONF += "--enable-log-src-basename"
+
+PACKAGECONFIG ??= "templates \
+    ${@base_contains('DISTRO_FEATURES', 'selinux', 'selinux', '', d)} \
+"
+PACKAGECONFIG[doc] = "--enable-doc --enable-api-docs,--disable-doc --disable-api-docs,,"
+PACKAGECONFIG[rpath] = "--enable-rpath,--disable-rpath,,"
+PACKAGECONFIG[apparmour] = "--enable-apparmor,--disable-apparmor,apparmor,apparmor"
+PACKAGECONFIG[templates] = ",,, ${PN}-templates"
+PACKAGECONFIG[selinux] = "--enable-selinux,--disable-selinux,libselinux,libselinux"
+PACKAGECONFIG[seccomp] ="--enable-seccomp,--disable-seccomp,libseccomp,libseccomp"
+PACKAGECONFIG[python] = "--enable-python,--disable-python,python3,python3-core"
+
+# required by python3 to run setup.py
+export BUILD_SYS
+export HOST_SYS
+export STAGING_INCDIR
+export STAGING_LIBDIR
+
+inherit autotools pkgconfig ptest update-rc.d systemd python3native
+
+SYSTEMD_PACKAGES = "${PN}-setup"
+SYSTEMD_SERVICE_${PN}-setup = "lxc.service"
+SYSTEMD_AUTO_ENABLE_${PN}-setup = "disable"
+
+INITSCRIPT_PACKAGES = "${PN}-setup"
+INITSCRIPT_NAME_{PN}-setup = "lxc"
+INITSCRIPT_PARAMS_${PN}-setup = "${OS_DEFAULT_INITSCRIPT_PARAMS}"
+
+FILES_${PN}-doc = "${mandir} ${infodir}"
+# For LXC the docdir only contains example configuration files and should be included in the lxc package
+FILES_${PN} += "${docdir}"
+FILES_${PN} += "${libdir}/python3*"
+FILES_${PN}-dbg += "${libexecdir}/lxc/.debug"
+PACKAGES =+ "${PN}-templates ${PN}-setup ${PN}-networking"
+FILES_${PN}-templates += "${datadir}/lxc/templates"
+RDEPENDS_${PN}-templates += "bash"
+
+ALLOW_EMPTY_${PN}-networking = "1"
+
+FILES_${PN}-setup += "/etc/tmpfiles.d"
+FILES_${PN}-setup += "/lib/systemd/system"
+FILES_${PN}-setup += "/usr/lib/systemd/system"
+FILES_${PN}-setup += "/etc/init.d"
+
+PRIVATE_LIBS_${PN}-ptest = "liblxc.so.1"
+
+CACHED_CONFIGUREVARS += " \
+    ac_cv_path_PYTHON='${STAGING_BINDIR_NATIVE}/python3-native/python3' \
+    am_cv_python_pyexecdir='${exec_prefix}/${libdir}/python3.5/site-packages' \
+    am_cv_python_pythondir='${prefix}/${libdir}/python3.5/site-packages' \
+"
+
+do_install_append() {
+	# The /var/cache/lxc directory created by the Makefile
+	# is wiped out in volatile, we need to create this at boot.
+	rm -rf ${D}${localstatedir}/cache
+	install -d ${D}${sysconfdir}/default/volatiles
+	echo "d root root 0755 ${localstatedir}/cache/lxc none" \
+	     > ${D}${sysconfdir}/default/volatiles/99_lxc
+
+	for i in `grep -l "#! */bin/bash" ${D}${datadir}/lxc/hooks/*`; do \
+	    sed -e 's|#! */bin/bash|#!/bin/sh|' -i $i; done
+
+	if ${@base_contains('DISTRO_FEATURES', 'sysvinit', 'true', 'false', d)}; then
+	    install -d ${D}${sysconfdir}/init.d
+	    install -m 755 config/init/sysvinit/lxc* ${D}${sysconfdir}/init.d
+	fi
+
+	# since python3-native is used for install location this will not be
+	# suitable for the target and we will have to correct the package install
+	if ${@bb.utils.contains('PACKAGECONFIG', 'python', 'true', 'false', d)}; then
+	    if [ -d ${D}${exec_prefix}/lib/python* ]; then mv ${D}${exec_prefix}/lib/python* ${D}${libdir}/; fi
+	    rmdir --ignore-fail-on-non-empty ${D}${exec_prefix}/lib
+	fi
+}
+
+EXTRA_OEMAKE += "TEST_DIR=${D}${PTEST_PATH}/src/tests"
+
+do_install_ptest() {
+	oe_runmake -C src/tests install-ptest
+}
+
+pkg_postinst_${PN}() {
+	if [ -z "$D" ] && [ -e /etc/init.d/populate-volatile.sh ] ; then
+		/etc/init.d/populate-volatile.sh update
+	fi
+}
+
+pkg_postinst_${PN}-networking() {
+	if [ "x$D" != "x" ]; then
+		exit 1
+	fi
+
+	# setup for our bridge
+        echo "lxc.network.link=lxcbr0" >> ${sysconfdir}/lxc/default.conf
+
+cat >> /etc/network/interfaces << EOF
+
+auto lxcbr0
+iface lxcbr0 inet dhcp
+	bridge_ports eth0
+	bridge_fd 0
+	bridge_maxwait 0
+EOF
+
+cat<<EOF>/etc/network/if-pre-up.d/lxcbr0
+#! /bin/sh
+
+if test "x\$IFACE" = xlxcbr0 ; then
+        brctl show |grep lxcbr0 > /dev/null 2>/dev/null
+        if [ \$? != 0 ] ; then
+                brctl addbr lxcbr0
+                brctl addif lxcbr0 eth0
+                ip addr flush eth0
+                ifconfig eth0 up
+        fi
+fi
+EOF
+chmod 755 /etc/network/if-pre-up.d/lxcbr0
+}