Squashed 'import-layers/yocto-poky/' changes from dc8508f6099..67491b0c104

Yocto 2.2.2 (Morty)

Change-Id: Id9a452e28940d9f166957de243d9cb1d8818704e
git-subtree-dir: import-layers/yocto-poky
git-subtree-split: 67491b0c104101bb9f366d697edd23c895be4302
Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
diff --git a/import-layers/yocto-poky/bitbake/bin/bitbake-worker b/import-layers/yocto-poky/bitbake/bin/bitbake-worker
index 500f2ad..db3c4b1 100755
--- a/import-layers/yocto-poky/bitbake/bin/bitbake-worker
+++ b/import-layers/yocto-poky/bitbake/bin/bitbake-worker
@@ -11,7 +11,10 @@
 import errno
 import signal
 import pickle
+import traceback
+import queue
 from multiprocessing import Lock
+from threading import Thread
 
 if sys.getfilesystemencoding() != "utf-8":
     sys.exit("Please use a locale setting which supports utf-8.\nPython can't change the filesystem locale after loading so we need a utf-8 when python starts or things won't work.")
@@ -63,7 +66,7 @@
     consolelog.setFormatter(conlogformat)
     logger.addHandler(consolelog)
 
-worker_queue = b""
+worker_queue = queue.Queue()
 
 def worker_fire(event, d):
     data = b"<event>" + pickle.dumps(event) + b"</event>"
@@ -72,21 +75,38 @@
 def worker_fire_prepickled(event):
     global worker_queue
 
-    worker_queue = worker_queue + event
-    worker_flush()
+    worker_queue.put(event)
 
-def worker_flush():
-    global worker_queue, worker_pipe
+#
+# We can end up with write contention with the cooker, it can be trying to send commands
+# and we can be trying to send event data back. Therefore use a separate thread for writing 
+# back data to cooker.
+#
+worker_thread_exit = False
 
-    if not worker_queue:
-        return
+def worker_flush(worker_queue):
+    worker_queue_int = b""
+    global worker_pipe, worker_thread_exit
 
-    try:
-        written = os.write(worker_pipe, worker_queue)
-        worker_queue = worker_queue[written:]
-    except (IOError, OSError) as e:
-        if e.errno != errno.EAGAIN and e.errno != errno.EPIPE:
-            raise
+    while True:
+        try:
+            worker_queue_int = worker_queue_int + worker_queue.get(True, 1)
+        except queue.Empty:
+            pass
+        while (worker_queue_int or not worker_queue.empty()):
+            try:
+                if not worker_queue.empty():
+                    worker_queue_int = worker_queue_int + worker_queue.get()
+                written = os.write(worker_pipe, worker_queue_int)
+                worker_queue_int = worker_queue_int[written:]
+            except (IOError, OSError) as e:
+                if e.errno != errno.EAGAIN and e.errno != errno.EPIPE:
+                    raise
+        if worker_thread_exit and worker_queue.empty() and not worker_queue_int:
+            return
+
+worker_thread = Thread(target=worker_flush, args=(worker_queue,))
+worker_thread.start()
 
 def worker_child_fire(event, d):
     global worker_pipe
@@ -234,9 +254,9 @@
                 if quieterrors:
                     the_data.setVarFlag(taskname, "quieterrors", "1")
 
-            except Exception as exc:
+            except Exception:
                 if not quieterrors:
-                    logger.critical(str(exc))
+                    logger.critical(traceback.format_exc())
                 os._exit(1)
             try:
                 if cfg.dry_run:
@@ -352,7 +372,6 @@
                 self.build_pipes[pipe].read()
             if len(self.build_pids):
                 self.process_waitpid()
-            worker_flush()
 
 
     def handle_item(self, item, func):
@@ -457,8 +476,10 @@
         import traceback
         sys.stderr.write(traceback.format_exc())
         sys.stderr.write(str(e))
-while len(worker_queue):
-    worker_flush()
+
+worker_thread_exit = True
+worker_thread.join()
+
 workerlog_write("exitting")
 sys.exit(0)
 
diff --git a/import-layers/yocto-poky/bitbake/lib/bb/build.py b/import-layers/yocto-poky/bitbake/lib/bb/build.py
index c4c8aeb..b59a49b 100644
--- a/import-layers/yocto-poky/bitbake/lib/bb/build.py
+++ b/import-layers/yocto-poky/bitbake/lib/bb/build.py
@@ -92,6 +92,7 @@
     def __init__(self, t, logfile, d):
         self._task = t
         self._package = d.getVar("PF", True)
+        self._mc = d.getVar("BB_CURRENT_MC", True)
         self.taskfile = d.getVar("FILE", True)
         self.taskname = self._task
         self.logfile = logfile
@@ -723,7 +724,7 @@
     for mask in cleanmask:
         for name in glob.glob(mask):
             # Preserve sigdata files in the stamps directory
-            if "sigdata" in name:
+            if "sigdata" in name or "sigbasedata" in name:
                 continue
             # Preserve taint files in the stamps directory
             if name.endswith('.taint'):
diff --git a/import-layers/yocto-poky/bitbake/lib/bb/codeparser.py b/import-layers/yocto-poky/bitbake/lib/bb/codeparser.py
index 25938d6..5d2d440 100644
--- a/import-layers/yocto-poky/bitbake/lib/bb/codeparser.py
+++ b/import-layers/yocto-poky/bitbake/lib/bb/codeparser.py
@@ -342,8 +342,7 @@
         except pyshlex.NeedMore:
             raise sherrors.ShellSyntaxError("Unexpected EOF")
 
-        for token in tokens:
-            self.process_tokens(token)
+        self.process_tokens(tokens)
 
     def process_tokens(self, tokens):
         """Process a supplied portion of the syntax tree as returned by
@@ -389,18 +388,24 @@
             "case_clause": case_clause,
         }
 
-        for token in tokens:
-            name, value = token
-            try:
-                more_tokens, words = token_handlers[name](value)
-            except KeyError:
-                raise NotImplementedError("Unsupported token type " + name)
+        def process_token_list(tokens):
+            for token in tokens:
+                if isinstance(token, list):
+                    process_token_list(token)
+                    continue
+                name, value = token
+                try:
+                    more_tokens, words = token_handlers[name](value)
+                except KeyError:
+                    raise NotImplementedError("Unsupported token type " + name)
 
-            if more_tokens:
-                self.process_tokens(more_tokens)
+                if more_tokens:
+                    self.process_tokens(more_tokens)
 
-            if words:
-                self.process_words(words)
+                if words:
+                    self.process_words(words)
+
+        process_token_list(tokens)
 
     def process_words(self, words):
         """Process a set of 'words' in pyshyacc parlance, which includes
diff --git a/import-layers/yocto-poky/bitbake/lib/bb/cooker.py b/import-layers/yocto-poky/bitbake/lib/bb/cooker.py
index 42831e2..07897be 100644
--- a/import-layers/yocto-poky/bitbake/lib/bb/cooker.py
+++ b/import-layers/yocto-poky/bitbake/lib/bb/cooker.py
@@ -252,6 +252,10 @@
         signal.signal(signal.SIGHUP, self.sigterm_exception)
 
     def config_notifications(self, event):
+        if event.maskname == "IN_Q_OVERFLOW":
+            bb.warn("inotify event queue overflowed, invalidating caches.")
+            self.baseconfig_valid = False
+            return
         if not event.pathname in self.configwatcher.bbwatchedfiles:
             return
         if not event.pathname in self.inotify_modified_files:
@@ -259,6 +263,10 @@
         self.baseconfig_valid = False
 
     def notifications(self, event):
+        if event.maskname == "IN_Q_OVERFLOW":
+            bb.warn("inotify event queue overflowed, invalidating caches.")
+            self.parsecache_valid = False
+            return
         if not event.pathname in self.inotify_modified_files:
             self.inotify_modified_files.append(event.pathname)
         self.parsecache_valid = False
@@ -657,8 +665,40 @@
         # A task of None means use the default task
         if task is None:
             task = self.configuration.cmd
+        if not task.startswith("do_"):
+            task = "do_%s" % task
 
-        fulltargetlist = self.checkPackages(pkgs_to_build, task)
+        targetlist = self.checkPackages(pkgs_to_build, task)
+        fulltargetlist = []
+        defaulttask_implicit = ''
+        defaulttask_explicit = False
+        wildcard = False
+
+        # Wild card expansion:
+        # Replace string such as "multiconfig:*:bash"
+        # into "multiconfig:A:bash multiconfig:B:bash bash"
+        for k in targetlist:
+            if k.startswith("multiconfig:"):
+                if wildcard:
+                    bb.fatal('multiconfig conflict')
+                if k.split(":")[1] == "*":
+                    wildcard = True
+                    for mc in self.multiconfigs:
+                        if mc:
+                            fulltargetlist.append(k.replace('*', mc))
+                        # implicit default task
+                        else:
+                            defaulttask_implicit = k.split(":")[2]
+                else:
+                    fulltargetlist.append(k)
+            else:
+                defaulttask_explicit = True
+                fulltargetlist.append(k)
+
+        if not defaulttask_explicit and defaulttask_implicit != '':
+            fulltargetlist.append(defaulttask_implicit)
+
+        bb.debug(1,"Target list: %s" % (str(fulltargetlist)))
         taskdata = {}
         localdata = {}
 
@@ -715,6 +755,9 @@
         Create a dependency graph of pkgs_to_build including reverse dependency
         information.
         """
+        if not task.startswith("do_"):
+            task = "do_%s" % task
+
         runlist, taskdata = self.prepareTreeData(pkgs_to_build, task)
         rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
         rq.rqdata.prepare()
@@ -818,6 +861,9 @@
         """
         Create a dependency tree of pkgs_to_build, returning the data.
         """
+        if not task.startswith("do_"):
+            task = "do_%s" % task
+
         _, taskdata = self.prepareTreeData(pkgs_to_build, task)
 
         seen_fns = []
@@ -1318,6 +1364,8 @@
         # If we are told to do the None task then query the default task
         if (task == None):
             task = self.configuration.cmd
+        if not task.startswith("do_"):
+            task = "do_%s" % task
 
         fn, cls, mc = bb.cache.virtualfn2realfn(buildfile)
         fn = self.matchFile(fn)
@@ -1354,8 +1402,6 @@
         # Invalidate task for target if force mode active
         if self.configuration.force:
             logger.verbose("Invalidate task %s, %s", task, fn)
-            if not task.startswith("do_"):
-                task = "do_%s" % task
             bb.parse.siggen.invalidate_task(task, self.recipecaches[mc], fn)
 
         # Setup taskdata structure
@@ -1367,8 +1413,6 @@
         bb.event.fire(bb.event.BuildStarted(buildname, [item]), self.expanded_data)
 
         # Execute the runqueue
-        if not task.startswith("do_"):
-            task = "do_%s" % task
         runlist = [[mc, item, task, fn]]
 
         rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
@@ -1579,7 +1623,8 @@
         if self.state != state.parsing and not self.parsecache_valid:
             self.parseConfiguration ()
             if CookerFeatures.SEND_SANITYEVENTS in self.featureset:
-                bb.event.fire(bb.event.SanityCheck(False), self.data)
+                for mc in self.multiconfigs:
+                    bb.event.fire(bb.event.SanityCheck(False), self.databuilder.mcdata[mc])
 
             for mc in self.multiconfigs:
                 ignore = self.databuilder.mcdata[mc].getVar("ASSUME_PROVIDED", True) or ""
diff --git a/import-layers/yocto-poky/bitbake/lib/bb/cookerdata.py b/import-layers/yocto-poky/bitbake/lib/bb/cookerdata.py
index b07c266..98f56ac 100644
--- a/import-layers/yocto-poky/bitbake/lib/bb/cookerdata.py
+++ b/import-layers/yocto-poky/bitbake/lib/bb/cookerdata.py
@@ -288,7 +288,7 @@
 
             multiconfig = (self.data.getVar("BBMULTICONFIG", True) or "").split()
             for config in multiconfig:
-                mcdata = self.parseConfigurationFiles(['conf/multiconfig/%s.conf' % config] + self.prefiles, self.postfiles)
+                mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config)
                 bb.event.fire(bb.event.ConfigParsed(), mcdata)
                 self.mcdata[config] = mcdata
 
@@ -304,8 +304,9 @@
     def _findLayerConf(self, data):
         return findConfigFile("bblayers.conf", data)
 
-    def parseConfigurationFiles(self, prefiles, postfiles):
+    def parseConfigurationFiles(self, prefiles, postfiles, mc = "default"):
         data = bb.data.createCopy(self.basedata)
+        data.setVar("BB_CURRENT_MC", mc)
 
         # Parse files for loading *before* bitbake.conf and any includes
         for f in prefiles:
diff --git a/import-layers/yocto-poky/bitbake/lib/bb/data.py b/import-layers/yocto-poky/bitbake/lib/bb/data.py
index c1f27cd..c56965c 100644
--- a/import-layers/yocto-poky/bitbake/lib/bb/data.py
+++ b/import-layers/yocto-poky/bitbake/lib/bb/data.py
@@ -258,11 +258,13 @@
                                       not d.getVarFlag(key, 'unexport', False))
 
 def exported_vars(d):
-    for key in exported_keys(d):
+    k = list(exported_keys(d))
+    for key in k:
         try:
             value = d.getVar(key, True)
-        except Exception:
-            pass
+        except Exception as err:
+            bb.warn("%s: Unable to export ${%s}: %s" % (d.getVar("FILE", True), key, err))
+            continue
 
         if value is not None:
             yield key, str(value)
diff --git a/import-layers/yocto-poky/bitbake/lib/bb/data_smart.py b/import-layers/yocto-poky/bitbake/lib/bb/data_smart.py
index f100446..805a9a7 100644
--- a/import-layers/yocto-poky/bitbake/lib/bb/data_smart.py
+++ b/import-layers/yocto-poky/bitbake/lib/bb/data_smart.py
@@ -748,13 +748,14 @@
                 if match:
                     removes.extend(self.expand(r).split())
 
-            filtered = filter(lambda v: v not in removes,
-                              value.split())
-            value = " ".join(filtered)
-            if expand and var in self.expand_cache:
-                 # We need to ensure the expand cache has the correct value
-                 # flag == "_content" here
-                self.expand_cache[var].value = value
+            if removes:
+                filtered = filter(lambda v: v not in removes,
+                                  value.split())
+                value = " ".join(filtered)
+                if expand and var in self.expand_cache:
+                    # We need to ensure the expand cache has the correct value
+                    # flag == "_content" here
+                    self.expand_cache[var].value = value
         return value
 
     def delVarFlag(self, var, flag, **loginfo):
diff --git a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/wget.py b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/wget.py
index ecb946a..23d48ac 100644
--- a/import-layers/yocto-poky/bitbake/lib/bb/fetch2/wget.py
+++ b/import-layers/yocto-poky/bitbake/lib/bb/fetch2/wget.py
@@ -108,9 +108,8 @@
             bb.utils.mkdirhier(os.path.dirname(dldir + os.sep + ud.localfile))
             fetchcmd += " -O " + dldir + os.sep + ud.localfile
 
-        if ud.user:
-            up = ud.user.split(":")
-            fetchcmd += " --user=%s --password=%s --auth-no-challenge" % (up[0],up[1])
+        if ud.user and ud.pswd:
+            fetchcmd += " --user=%s --password=%s --auth-no-challenge" % (ud.user, ud.pswd)
 
         uri = ud.url.split(";")[0]
         if os.path.exists(ud.localpath):
diff --git a/import-layers/yocto-poky/bitbake/lib/bb/runqueue.py b/import-layers/yocto-poky/bitbake/lib/bb/runqueue.py
index 84b2685..9384c72 100644
--- a/import-layers/yocto-poky/bitbake/lib/bb/runqueue.py
+++ b/import-layers/yocto-poky/bitbake/lib/bb/runqueue.py
@@ -289,8 +289,8 @@
         return tid + task_name_suffix
 
     def get_short_user_idstring(self, task, task_name_suffix = ""):
-        (mc, fn, taskname, _) = split_tid_mcfn(task)
-        pn = self.dataCaches[mc].pkg_fn[fn]
+        (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
+        pn = self.dataCaches[mc].pkg_fn[taskfn]
         taskname = taskname_from_tid(task) + task_name_suffix
         return "%s:%s" % (pn, taskname)
 
@@ -884,14 +884,14 @@
         if not self.cooker.configuration.nosetscene:
             for tid in self.runtaskentries:
                 (mc, fn, taskname, _) = split_tid_mcfn(tid)
-                setscenetid = fn + ":" + taskname + "_setscene"
+                setscenetid = tid + "_setscene"
                 if setscenetid not in taskData[mc].taskentries:
                     continue
                 self.runq_setscene_tids.append(tid)
 
         def invalidate_task(tid, error_nostamp):
-            (mc, fn, taskname, _) = split_tid_mcfn(tid)
-            taskdep = self.dataCaches[mc].task_deps[fn]
+            (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+            taskdep = self.dataCaches[mc].task_deps[taskfn]
             if fn + ":" + taskname not in taskData[mc].taskentries:
                 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
             if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
@@ -997,8 +997,9 @@
             magic = "decafbadbad"
         if fakeroot:
             magic = magic + "beef"
-            fakerootcmd = self.cfgData.getVar("FAKEROOTCMD", True)
-            fakerootenv = (self.cfgData.getVar("FAKEROOTBASEENV", True) or "").split()
+            mcdata = self.cooker.databuilder.mcdata[mc]
+            fakerootcmd = mcdata.getVar("FAKEROOTCMD", True)
+            fakerootenv = (mcdata.getVar("FAKEROOTBASEENV", True) or "").split()
             env = os.environ.copy()
             for key, value in (var.split('=') for var in fakerootenv):
                 env[key] = value
@@ -1059,10 +1060,9 @@
         for mc in self.rqdata.dataCaches:
             self.worker[mc] = self._start_worker(mc)
 
-    def start_fakeworker(self, rqexec):
-        if not self.fakeworker:
-            for mc in self.rqdata.dataCaches:
-                self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
+    def start_fakeworker(self, rqexec, mc):
+        if not mc in self.fakeworker:
+            self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
 
     def teardown_workers(self):
         self.teardown = True
@@ -1322,7 +1322,7 @@
                 continue
 
             sq_fn.append(fn)
-            sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[fn])
+            sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
             sq_hash.append(self.rqdata.runtaskentries[tid].hash)
             sq_taskname.append(taskname)
             sq_task.append(tid)
@@ -1402,8 +1402,8 @@
 
 
         for tid in invalidtasks:
-            (mc, fn, taskname, _) = split_tid_mcfn(tid)
-            pn = self.rqdata.dataCaches[mc].pkg_fn[fn]
+            (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+            pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
             h = self.rqdata.runtaskentries[tid].hash
             matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
             match = None
@@ -1506,8 +1506,8 @@
         taskdata = {}
         taskdeps.add(task)
         for dep in taskdeps:
-            (mc, fn, taskname, _) = split_tid_mcfn(dep)
-            pn = self.rqdata.dataCaches[mc].pkg_fn[fn]
+            (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
+            pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
             taskdata[dep] = [pn, taskname, fn]
         call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
         locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.expanded_data }
@@ -1707,7 +1707,7 @@
 
             # Check tasks that are going to run against the whitelist
             def check_norun_task(tid, showerror=False):
-                (mc, fn, taskname, _) = split_tid_mcfn(tid)
+                (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
                 # Ignore covered tasks
                 if tid in self.rq.scenequeue_covered:
                     return False
@@ -1715,11 +1715,11 @@
                 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
                     return False
                 # Ignore noexec tasks
-                taskdep = self.rqdata.dataCaches[mc].task_deps[fn]
+                taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
                 if 'noexec' in taskdep and taskname in taskdep['noexec']:
                     return False
 
-                pn = self.rqdata.dataCaches[mc].pkg_fn[fn]
+                pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
                 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
                     if showerror:
                         if tid in self.rqdata.runq_setscene_tids:
@@ -1787,9 +1787,9 @@
 
             taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
             if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
-                if not self.rq.fakeworker:
+                if not mc in self.rq.fakeworker:
                     try:
-                        self.rq.start_fakeworker(self)
+                        self.rq.start_fakeworker(self, mc)
                     except OSError as exc:
                         logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
                         self.rq.state = runQueueFailed
@@ -1868,6 +1868,7 @@
         sq_revdeps_new = {}
         sq_revdeps_squash = {}
         self.sq_harddeps = {}
+        self.stamps = {}
 
         # We need to construct a dependency graph for the setscene functions. Intermediate
         # dependencies between the setscene tasks only complicate the code. This code
@@ -1978,9 +1979,10 @@
         # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
         # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
         for tid in self.rqdata.runq_setscene_tids:
-                (mc, fn, taskname, _) = split_tid_mcfn(tid)
-                realtid = fn + ":" + taskname + "_setscene"
+                (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
+                realtid = tid + "_setscene"
                 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
+                self.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
                 for (depname, idependtask) in idepends:
 
                     if depname not in self.rqdata.taskData[mc].build_targets:
@@ -2044,7 +2046,7 @@
             for tid in self.sq_revdeps:
                 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
 
-                taskdep = self.rqdata.dataCaches[mc].task_deps[fn]
+                taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
 
                 if 'noexec' in taskdep and taskname in taskdep['noexec']:
                     noexec.append(tid)
@@ -2065,7 +2067,7 @@
                     continue
 
                 sq_fn.append(fn)
-                sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[fn])
+                sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
                 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
                 sq_taskname.append(taskname)
                 sq_task.append(tid)
@@ -2113,8 +2115,8 @@
     def check_taskfail(self, task):
         if self.rqdata.setscenewhitelist:
             realtask = task.split('_setscene')[0]
-            (mc, fn, taskname, _) = split_tid_mcfn(realtask)
-            pn = self.rqdata.dataCaches[mc].pkg_fn[fn]
+            (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
+            pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
             if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
                 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
                 self.rq.state = runQueueCleanUp
@@ -2157,7 +2159,7 @@
         if self.stats.active < self.number_tasks:
             # Find the next setscene to run
             for nexttask in self.rqdata.runq_setscene_tids:
-                if nexttask in self.runq_buildable and nexttask not in self.runq_running:
+                if nexttask in self.runq_buildable and nexttask not in self.runq_running and self.stamps[nexttask] not in self.build_stamps.values():
                     if nexttask in self.unskippable:
                         logger.debug(2, "Setscene task %s is unskippable" % nexttask)
                     if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
@@ -2199,14 +2201,16 @@
 
             taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
             if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
-                if not self.rq.fakeworker:
-                    self.rq.start_fakeworker(self)
+                if not mc in self.rq.fakeworker:
+                    self.rq.start_fakeworker(self, mc)
                 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), None)) + b"</runtask>")
                 self.rq.fakeworker[mc].process.stdin.flush()
             else:
                 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), None)) + b"</runtask>")
                 self.rq.worker[mc].process.stdin.flush()
 
+            self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
+            self.build_stamps2.append(self.build_stamps[task])
             self.runq_running.add(task)
             self.stats.taskActive()
             if self.stats.active < self.number_tasks:
diff --git a/import-layers/yocto-poky/bitbake/lib/bb/siggen.py b/import-layers/yocto-poky/bitbake/lib/bb/siggen.py
index 3a7dac4..542bbb9 100644
--- a/import-layers/yocto-poky/bitbake/lib/bb/siggen.py
+++ b/import-layers/yocto-poky/bitbake/lib/bb/siggen.py
@@ -30,6 +30,7 @@
     name = "noop"
 
     def __init__(self, data):
+        self.basehash = {}
         self.taskhash = {}
         self.runtaskdeps = {}
         self.file_checksum_values = {}
@@ -61,11 +62,10 @@
         return
 
     def get_taskdata(self):
-       return (self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints)
+        return (self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash)
 
     def set_taskdata(self, data):
-        self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints = data
-
+        self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash = data
 
 class SignatureGeneratorBasic(SignatureGenerator):
     """
@@ -133,7 +133,11 @@
                 var = lookupcache[dep]
                 if var is not None:
                     data = data + str(var)
-            self.basehash[fn + "." + task] = hashlib.md5(data.encode("utf-8")).hexdigest()
+            datahash = hashlib.md5(data.encode("utf-8")).hexdigest()
+            k = fn + "." + task
+            if k in self.basehash and self.basehash[k] != datahash:
+                bb.error("When reparsing %s, the basehash value changed from %s to %s. The metadata is not deterministic and this needs to be fixed." % (k, self.basehash[k], datahash))
+            self.basehash[k] = datahash
             taskdeps[task] = alldeps
 
         self.taskdeps[fn] = taskdeps
@@ -182,6 +186,7 @@
     def get_taskhash(self, fn, task, deps, dataCache):
         k = fn + "." + task
         data = dataCache.basetaskhash[k]
+        self.basehash[k] = data
         self.runtaskdeps[k] = []
         self.file_checksum_values[k] = []
         recipename = dataCache.pkg_fn[fn]
@@ -278,6 +283,15 @@
             if 'nostamp:' in self.taints[k]:
                 data['taint'] = self.taints[k]
 
+        computed_basehash = calc_basehash(data)
+        if computed_basehash != self.basehash[k]:
+            bb.error("Basehash mismatch %s versus %s for %s" % (computed_basehash, self.basehash[k], k))
+        if runtime and k in self.taskhash:
+            computed_taskhash = calc_taskhash(data)
+            if computed_taskhash != self.taskhash[k]:
+                bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[k], k))
+                sigfile = sigfile.replace(self.taskhash[k], computed_taskhash)
+
         fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
         try:
             with os.fdopen(fd, "wb") as stream:
@@ -292,15 +306,6 @@
                 pass
             raise err
 
-        computed_basehash = calc_basehash(data)
-        if computed_basehash != self.basehash[k]:
-            bb.error("Basehash mismatch %s versus %s for %s" % (computed_basehash, self.basehash[k], k))
-        if runtime and k in self.taskhash:
-            computed_taskhash = calc_taskhash(data)
-            if computed_taskhash != self.taskhash[k]:
-                bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[k], k))
-
-
     def dump_sigs(self, dataCaches, options):
         for fn in self.taskdeps:
             for task in self.taskdeps[fn]:
@@ -346,9 +351,14 @@
     bb.parse.siggen.dump_sigtask(fn, task, outfile, "customfile:" + referencestamp)
 
 def clean_basepath(a):
+    mc = None
+    if a.startswith("multiconfig:"):
+        _, mc, a = a.split(":", 2)
     b = a.rsplit("/", 2)[1] + a.rsplit("/", 2)[2]
     if a.startswith("virtual:"):
         b = b + ":" + a.rsplit(":", 1)[0]
+    if mc:
+        b = b + ":multiconfig:" + mc
     return b
 
 def clean_basepaths(a):
@@ -554,7 +564,8 @@
         data = data + sigdata['runtaskhashes'][dep]
 
     for c in sigdata['file_checksum_values']:
-        data = data + c[1]
+        if c[1]:
+            data = data + c[1]
 
     if 'taint' in sigdata:
         if 'nostamp:' in sigdata['taint']:
diff --git a/import-layers/yocto-poky/bitbake/lib/bb/tinfoil.py b/import-layers/yocto-poky/bitbake/lib/bb/tinfoil.py
index 8899e86..9fa5b5b 100644
--- a/import-layers/yocto-poky/bitbake/lib/bb/tinfoil.py
+++ b/import-layers/yocto-poky/bitbake/lib/bb/tinfoil.py
@@ -51,10 +51,13 @@
         features = []
         if tracking:
             features.append(CookerFeatures.BASEDATASTORE_TRACKING)
+        cleanedvars = bb.utils.clean_environment()
         self.cooker = BBCooker(self.config, features)
         self.config_data = self.cooker.data
         bb.providers.logger.setLevel(logging.ERROR)
         self.cooker_data = None
+        for k in cleanedvars:
+            os.environ[k] = cleanedvars[k]
 
     def register_idle_function(self, function, data):
         pass
diff --git a/import-layers/yocto-poky/bitbake/lib/bb/ui/buildinfohelper.py b/import-layers/yocto-poky/bitbake/lib/bb/ui/buildinfohelper.py
index 5b69660..3ddcb2a 100644
--- a/import-layers/yocto-poky/bitbake/lib/bb/ui/buildinfohelper.py
+++ b/import-layers/yocto-poky/bitbake/lib/bb/ui/buildinfohelper.py
@@ -982,6 +982,31 @@
             pass
         return task_information
 
+    def _get_layer_version_for_dependency(self, pathRE):
+        """ Returns the layer in the toaster db that has a full regex match to the pathRE.
+        pathRE - the layer path passed as a regex in the event. It is created in
+          cooker.py as a collection for the layer priorities.
+        """
+        self._ensure_build()
+
+        def _sort_longest_path(layer_version):
+            assert isinstance(layer_version, Layer_Version)
+            return len(layer_version.local_path)
+
+        # we don't care if we match the trailing slashes
+        p = re.compile(re.sub("/[^/]*?$","",pathRE))
+        # Heuristics: we always match recipe to the deepest layer path in the discovered layers
+        for lvo in sorted(self.orm_wrapper.layer_version_objects, reverse=True, key=_sort_longest_path):
+            if p.fullmatch(lvo.local_path):
+                return lvo
+            if lvo.layer.local_source_dir:
+                if p.fullmatch(lvo.layer.local_source_dir):
+                    return lvo
+        #if we get here, we didn't read layers correctly; dump whatever information we have on the error log
+        logger.warning("Could not match layer dependency for path %s : %s", path, self.orm_wrapper.layer_version_objects)
+
+
+
     def _get_layer_version_for_path(self, path):
         self._ensure_build()
 
@@ -1372,7 +1397,7 @@
         if 'layer-priorities' in event._depgraph.keys():
             for lv in event._depgraph['layer-priorities']:
                 (_, path, _, priority) = lv
-                layer_version_obj = self._get_layer_version_for_path(path[1:]) # paths start with a ^
+                layer_version_obj = self._get_layer_version_for_dependency(path)
                 assert layer_version_obj is not None
                 layer_version_obj.priority = priority
                 layer_version_obj.save()
diff --git a/import-layers/yocto-poky/bitbake/lib/bb/ui/uihelper.py b/import-layers/yocto-poky/bitbake/lib/bb/ui/uihelper.py
index fda7cc2..113fced 100644
--- a/import-layers/yocto-poky/bitbake/lib/bb/ui/uihelper.py
+++ b/import-layers/yocto-poky/bitbake/lib/bb/ui/uihelper.py
@@ -32,7 +32,10 @@
 
     def eventHandler(self, event):
         if isinstance(event, bb.build.TaskStarted):
-            self.running_tasks[event.pid] = { 'title' : "%s %s" % (event._package, event._task), 'starttime' : time.time() }
+            if event._mc != "default":
+                self.running_tasks[event.pid] = { 'title' : "mc:%s:%s %s" % (event._mc, event._package, event._task), 'starttime' : time.time() }
+            else:
+                self.running_tasks[event.pid] = { 'title' : "%s %s" % (event._package, event._task), 'starttime' : time.time() }
             self.running_pids.append(event.pid)
             self.needUpdate = True
         elif isinstance(event, bb.build.TaskSucceeded):
diff --git a/import-layers/yocto-poky/bitbake/lib/bb/utils.py b/import-layers/yocto-poky/bitbake/lib/bb/utils.py
index 729848a..16fc9db 100644
--- a/import-layers/yocto-poky/bitbake/lib/bb/utils.py
+++ b/import-layers/yocto-poky/bitbake/lib/bb/utils.py
@@ -378,7 +378,7 @@
 
         # If the exception is from spwaning a task, let's be helpful and display
         # the output (which hopefully includes stderr).
-        if isinstance(value, subprocess.CalledProcessError):
+        if isinstance(value, subprocess.CalledProcessError) and value.output:
             error.append("Subprocess output:")
             error.append(value.output.decode("utf-8", errors="ignore"))
     finally:
diff --git a/import-layers/yocto-poky/bitbake/lib/toaster/toastermain/settings.py b/import-layers/yocto-poky/bitbake/lib/toaster/toastermain/settings.py
index 3dfa2b2..aec9dbb 100644
--- a/import-layers/yocto-poky/bitbake/lib/toaster/toastermain/settings.py
+++ b/import-layers/yocto-poky/bitbake/lib/toaster/toastermain/settings.py
@@ -60,9 +60,19 @@
 if 'sqlite' in DATABASES['default']['ENGINE']:
     DATABASES['default']['OPTIONS'] = { 'timeout': 20 }
 
-# Hosts/domain names that are valid for this site; required if DEBUG is False
-# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
-ALLOWED_HOSTS = []
+# Update as of django 1.8.16 release, the '*' is needed to allow us to connect while running
+# on hosts without explicitly setting the fqdn for the toaster server.
+# See https://docs.djangoproject.com/en/dev/ref/settings/ for info on ALLOWED_HOSTS
+# Previously this setting was not enforced if DEBUG was set but it is now.
+# The previous behavior was such that ALLOWED_HOSTS defaulted to ['localhost','127.0.0.1','::1']
+# and if you bound to 0.0.0.0:<port #> then accessing toaster as localhost or fqdn would both work.
+# To have that same behavior, with a fqdn explicitly enabled you would set
+# ALLOWED_HOSTS= ['localhost','127.0.0.1','::1','myserver.mycompany.com'] for
+# Django >= 1.8.16. By default, we are not enforcing this restriction in
+# DEBUG mode.
+if DEBUG is True:
+    # this will allow connection via localhost,hostname, or fqdn
+    ALLOWED_HOSTS = ['*']
 
 # Local time zone for this installation. Choices can be found here:
 # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name