blob: 6a2de240cc87a23a9a197942f0709c36a942b66d [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001"""
2BitBake 'RunQueue' implementation
3
4Handles preparation and execution of a queue of tasks
5"""
6
7# Copyright (C) 2006-2007 Richard Purdie
8#
Brad Bishopc342db32019-05-15 21:57:59 -04009# SPDX-License-Identifier: GPL-2.0-only
Patrick Williamsc124f4f2015-09-15 14:41:29 -050010#
Patrick Williamsc124f4f2015-09-15 14:41:29 -050011
12import copy
13import os
14import sys
15import signal
16import stat
17import fcntl
18import errno
19import logging
20import re
21import bb
22from bb import msg, data, event
23from bb import monitordisk
24import subprocess
Patrick Williamsc0f7c042017-02-23 20:41:17 -060025import pickle
Brad Bishop6e60e8b2018-02-01 10:27:11 -050026from multiprocessing import Process
Brad Bishop19323692019-04-05 15:28:33 -040027import shlex
Brad Bishop96ff1982019-08-19 13:50:42 -040028import pprint
Patrick Williamsc124f4f2015-09-15 14:41:29 -050029
30bblogger = logging.getLogger("BitBake")
31logger = logging.getLogger("BitBake.RunQueue")
32
Brad Bishop19323692019-04-05 15:28:33 -040033__find_sha256__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{64}(?![a-z0-9])' )
Patrick Williamsc124f4f2015-09-15 14:41:29 -050034
Patrick Williamsc0f7c042017-02-23 20:41:17 -060035def fn_from_tid(tid):
36 return tid.rsplit(":", 1)[0]
37
38def taskname_from_tid(tid):
39 return tid.rsplit(":", 1)[1]
40
Andrew Geissler99467da2019-02-25 18:54:23 -060041def mc_from_tid(tid):
Brad Bishop15ae2502019-06-18 21:44:24 -040042 if tid.startswith('mc:'):
Andrew Geissler99467da2019-02-25 18:54:23 -060043 return tid.split(':')[1]
44 return ""
45
Patrick Williamsc0f7c042017-02-23 20:41:17 -060046def split_tid(tid):
47 (mc, fn, taskname, _) = split_tid_mcfn(tid)
48 return (mc, fn, taskname)
49
50def split_tid_mcfn(tid):
Brad Bishop15ae2502019-06-18 21:44:24 -040051 if tid.startswith('mc:'):
Patrick Williamsc0f7c042017-02-23 20:41:17 -060052 elems = tid.split(':')
53 mc = elems[1]
54 fn = ":".join(elems[2:-1])
55 taskname = elems[-1]
Brad Bishop15ae2502019-06-18 21:44:24 -040056 mcfn = "mc:" + mc + ":" + fn
Patrick Williamsc0f7c042017-02-23 20:41:17 -060057 else:
58 tid = tid.rsplit(":", 1)
59 mc = ""
60 fn = tid[0]
61 taskname = tid[1]
62 mcfn = fn
63
64 return (mc, fn, taskname, mcfn)
65
66def build_tid(mc, fn, taskname):
67 if mc:
Brad Bishop15ae2502019-06-18 21:44:24 -040068 return "mc:" + mc + ":" + fn + ":" + taskname
Patrick Williamsc0f7c042017-02-23 20:41:17 -060069 return fn + ":" + taskname
70
Brad Bishop96ff1982019-08-19 13:50:42 -040071# Index used to pair up potentially matching multiconfig tasks
72# We match on PN, taskname and hash being equal
73def pending_hash_index(tid, rqdata):
74 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
75 pn = rqdata.dataCaches[mc].pkg_fn[taskfn]
76 h = rqdata.runtaskentries[tid].hash
77 return pn + ":" + "taskname" + h
78
Patrick Williamsc124f4f2015-09-15 14:41:29 -050079class RunQueueStats:
80 """
81 Holds statistics on the tasks handled by the associated runQueue
82 """
83 def __init__(self, total):
84 self.completed = 0
85 self.skipped = 0
86 self.failed = 0
87 self.active = 0
88 self.total = total
89
90 def copy(self):
91 obj = self.__class__(self.total)
92 obj.__dict__.update(self.__dict__)
93 return obj
94
95 def taskFailed(self):
96 self.active = self.active - 1
97 self.failed = self.failed + 1
98
Brad Bishop1a4b7ee2018-12-16 17:11:34 -080099 def taskCompleted(self):
100 self.active = self.active - 1
101 self.completed = self.completed + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500102
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800103 def taskSkipped(self):
104 self.active = self.active + 1
105 self.skipped = self.skipped + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500106
107 def taskActive(self):
108 self.active = self.active + 1
109
110# These values indicate the next step due to be run in the
111# runQueue state machine
112runQueuePrepare = 2
113runQueueSceneInit = 3
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500114runQueueRunning = 6
115runQueueFailed = 7
116runQueueCleanUp = 8
117runQueueComplete = 9
118
119class RunQueueScheduler(object):
120 """
121 Control the order tasks are scheduled in.
122 """
123 name = "basic"
124
125 def __init__(self, runqueue, rqdata):
126 """
127 The default scheduler just returns the first buildable task (the
128 priority map is sorted by task number)
129 """
130 self.rq = runqueue
131 self.rqdata = rqdata
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600132 self.numTasks = len(self.rqdata.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500133
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600134 self.prio_map = [self.rqdata.runtaskentries.keys()]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500135
136 self.buildable = []
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800137 self.skip_maxthread = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500138 self.stamps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600139 for tid in self.rqdata.runtaskentries:
140 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
141 self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
142 if tid in self.rq.runq_buildable:
143 self.buildable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500144
145 self.rev_prio_map = None
146
147 def next_buildable_task(self):
148 """
149 Return the id of the first task we find that is buildable
150 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600151 self.buildable = [x for x in self.buildable if x not in self.rq.runq_running]
Brad Bishop96ff1982019-08-19 13:50:42 -0400152 buildable = [x for x in self.buildable if (x in self.rq.tasks_covered or x in self.rq.tasks_notcovered)]
153 if not buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500154 return None
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800155
156 # Filter out tasks that have a max number of threads that have been exceeded
157 skip_buildable = {}
158 for running in self.rq.runq_running.difference(self.rq.runq_complete):
159 rtaskname = taskname_from_tid(running)
160 if rtaskname not in self.skip_maxthread:
161 self.skip_maxthread[rtaskname] = self.rq.cfgData.getVarFlag(rtaskname, "number_threads")
162 if not self.skip_maxthread[rtaskname]:
163 continue
164 if rtaskname in skip_buildable:
165 skip_buildable[rtaskname] += 1
166 else:
167 skip_buildable[rtaskname] = 1
168
Brad Bishop96ff1982019-08-19 13:50:42 -0400169 if len(buildable) == 1:
170 tid = buildable[0]
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800171 taskname = taskname_from_tid(tid)
172 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
173 return None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600174 stamp = self.stamps[tid]
175 if stamp not in self.rq.build_stamps.values():
176 return tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500177
178 if not self.rev_prio_map:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600179 self.rev_prio_map = {}
180 for tid in self.rqdata.runtaskentries:
181 self.rev_prio_map[tid] = self.prio_map.index(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500182
183 best = None
184 bestprio = None
Brad Bishop96ff1982019-08-19 13:50:42 -0400185 for tid in buildable:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800186 taskname = taskname_from_tid(tid)
187 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
188 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600189 prio = self.rev_prio_map[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500190 if bestprio is None or bestprio > prio:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600191 stamp = self.stamps[tid]
192 if stamp in self.rq.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500193 continue
194 bestprio = prio
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600195 best = tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500196
197 return best
198
199 def next(self):
200 """
201 Return the id of the task we should build next
202 """
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800203 if self.rq.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500204 return self.next_buildable_task()
205
Brad Bishop316dfdd2018-06-25 12:45:53 -0400206 def newbuildable(self, task):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500207 self.buildable.append(task)
208
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500209 def describe_task(self, taskid):
210 result = 'ID %s' % taskid
211 if self.rev_prio_map:
212 result = result + (' pri %d' % self.rev_prio_map[taskid])
213 return result
214
215 def dump_prio(self, comment):
216 bb.debug(3, '%s (most important first):\n%s' %
217 (comment,
218 '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
219 index, taskid in enumerate(self.prio_map)])))
220
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500221class RunQueueSchedulerSpeed(RunQueueScheduler):
222 """
223 A scheduler optimised for speed. The priority map is sorted by task weight,
224 heavier weighted tasks (tasks needed by the most other tasks) are run first.
225 """
226 name = "speed"
227
228 def __init__(self, runqueue, rqdata):
229 """
230 The priority map is sorted by task weight.
231 """
232 RunQueueScheduler.__init__(self, runqueue, rqdata)
233
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600234 weights = {}
235 for tid in self.rqdata.runtaskentries:
236 weight = self.rqdata.runtaskentries[tid].weight
237 if not weight in weights:
238 weights[weight] = []
239 weights[weight].append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500240
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600241 self.prio_map = []
242 for weight in sorted(weights):
243 for w in weights[weight]:
244 self.prio_map.append(w)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500245
246 self.prio_map.reverse()
247
248class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
249 """
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500250 A scheduler optimised to complete .bb files as quickly as possible. The
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500251 priority map is sorted by task weight, but then reordered so once a given
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500252 .bb file starts to build, it's completed as quickly as possible by
253 running all tasks related to the same .bb file one after the after.
254 This works well where disk space is at a premium and classes like OE's
255 rm_work are in force.
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500256 """
257 name = "completion"
258
259 def __init__(self, runqueue, rqdata):
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500260 super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500261
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500262 # Extract list of tasks for each recipe, with tasks sorted
263 # ascending from "must run first" (typically do_fetch) to
264 # "runs last" (do_build). The speed scheduler prioritizes
265 # tasks that must run first before the ones that run later;
266 # this is what we depend on here.
267 task_lists = {}
268 for taskid in self.prio_map:
269 fn, taskname = taskid.rsplit(':', 1)
270 task_lists.setdefault(fn, []).append(taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500271
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500272 # Now unify the different task lists. The strategy is that
273 # common tasks get skipped and new ones get inserted after the
274 # preceeding common one(s) as they are found. Because task
275 # lists should differ only by their number of tasks, but not
276 # the ordering of the common tasks, this should result in a
277 # deterministic result that is a superset of the individual
278 # task ordering.
279 all_tasks = []
280 for recipe, new_tasks in task_lists.items():
281 index = 0
282 old_task = all_tasks[index] if index < len(all_tasks) else None
283 for new_task in new_tasks:
284 if old_task == new_task:
285 # Common task, skip it. This is the fast-path which
286 # avoids a full search.
287 index += 1
288 old_task = all_tasks[index] if index < len(all_tasks) else None
289 else:
290 try:
291 index = all_tasks.index(new_task)
292 # Already present, just not at the current
293 # place. We re-synchronized by changing the
294 # index so that it matches again. Now
295 # move on to the next existing task.
296 index += 1
297 old_task = all_tasks[index] if index < len(all_tasks) else None
298 except ValueError:
299 # Not present. Insert before old_task, which
300 # remains the same (but gets shifted back).
301 all_tasks.insert(index, new_task)
302 index += 1
303 bb.debug(3, 'merged task list: %s' % all_tasks)
304
305 # Now reverse the order so that tasks that finish the work on one
306 # recipe are considered more imporant (= come first). The ordering
307 # is now so that do_build is most important.
308 all_tasks.reverse()
309
310 # Group tasks of the same kind before tasks of less important
311 # kinds at the head of the queue (because earlier = lower
312 # priority number = runs earlier), while preserving the
313 # ordering by recipe. If recipe foo is more important than
314 # bar, then the goal is to work on foo's do_populate_sysroot
315 # before bar's do_populate_sysroot and on the more important
316 # tasks of foo before any of the less important tasks in any
317 # other recipe (if those other recipes are more important than
318 # foo).
319 #
320 # All of this only applies when tasks are runable. Explicit
321 # dependencies still override this ordering by priority.
322 #
323 # Here's an example why this priority re-ordering helps with
324 # minimizing disk usage. Consider a recipe foo with a higher
325 # priority than bar where foo DEPENDS on bar. Then the
326 # implicit rule (from base.bbclass) is that foo's do_configure
327 # depends on bar's do_populate_sysroot. This ensures that
328 # bar's do_populate_sysroot gets done first. Normally the
329 # tasks from foo would continue to run once that is done, and
330 # bar only gets completed and cleaned up later. By ordering
331 # bar's task that depend on bar's do_populate_sysroot before foo's
332 # do_configure, that problem gets avoided.
333 task_index = 0
334 self.dump_prio('original priorities')
335 for task in all_tasks:
336 for index in range(task_index, self.numTasks):
337 taskid = self.prio_map[index]
338 taskname = taskid.rsplit(':', 1)[1]
339 if taskname == task:
340 del self.prio_map[index]
341 self.prio_map.insert(task_index, taskid)
342 task_index += 1
343 self.dump_prio('completion priorities')
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500344
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600345class RunTaskEntry(object):
346 def __init__(self):
347 self.depends = set()
348 self.revdeps = set()
349 self.hash = None
Brad Bishop19323692019-04-05 15:28:33 -0400350 self.unihash = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600351 self.task = None
352 self.weight = 1
353
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500354class RunQueueData:
355 """
356 BitBake Run Queue implementation
357 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600358 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500359 self.cooker = cooker
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600360 self.dataCaches = dataCaches
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500361 self.taskData = taskData
362 self.targets = targets
363 self.rq = rq
364 self.warn_multi_bb = False
365
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500366 self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
367 self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600368 self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
369 self.setscenewhitelist_checked = False
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500370 self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600371 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500372
373 self.reset()
374
375 def reset(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600376 self.runtaskentries = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500377
378 def runq_depends_names(self, ids):
379 import re
380 ret = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600381 for id in ids:
382 nam = os.path.basename(id)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500383 nam = re.sub("_[^,]*,", ",", nam)
384 ret.extend([nam])
385 return ret
386
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600387 def get_task_hash(self, tid):
388 return self.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500389
Brad Bishop19323692019-04-05 15:28:33 -0400390 def get_task_unihash(self, tid):
391 return self.runtaskentries[tid].unihash
392
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600393 def get_user_idstring(self, tid, task_name_suffix = ""):
394 return tid + task_name_suffix
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500395
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500396 def get_short_user_idstring(self, task, task_name_suffix = ""):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500397 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
398 pn = self.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600399 taskname = taskname_from_tid(task) + task_name_suffix
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500400 return "%s:%s" % (pn, taskname)
401
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500402 def circular_depchains_handler(self, tasks):
403 """
404 Some tasks aren't buildable, likely due to circular dependency issues.
405 Identify the circular dependencies and print them in a user readable format.
406 """
407 from copy import deepcopy
408
409 valid_chains = []
410 explored_deps = {}
411 msgs = []
412
Andrew Geissler99467da2019-02-25 18:54:23 -0600413 class TooManyLoops(Exception):
414 pass
415
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500416 def chain_reorder(chain):
417 """
418 Reorder a dependency chain so the lowest task id is first
419 """
420 lowest = 0
421 new_chain = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600422 for entry in range(len(chain)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500423 if chain[entry] < chain[lowest]:
424 lowest = entry
425 new_chain.extend(chain[lowest:])
426 new_chain.extend(chain[:lowest])
427 return new_chain
428
429 def chain_compare_equal(chain1, chain2):
430 """
431 Compare two dependency chains and see if they're the same
432 """
433 if len(chain1) != len(chain2):
434 return False
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600435 for index in range(len(chain1)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500436 if chain1[index] != chain2[index]:
437 return False
438 return True
439
440 def chain_array_contains(chain, chain_array):
441 """
442 Return True if chain_array contains chain
443 """
444 for ch in chain_array:
445 if chain_compare_equal(ch, chain):
446 return True
447 return False
448
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600449 def find_chains(tid, prev_chain):
450 prev_chain.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500451 total_deps = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600452 total_deps.extend(self.runtaskentries[tid].revdeps)
453 for revdep in self.runtaskentries[tid].revdeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500454 if revdep in prev_chain:
455 idx = prev_chain.index(revdep)
456 # To prevent duplicates, reorder the chain to start with the lowest taskid
457 # and search through an array of those we've already printed
458 chain = prev_chain[idx:]
459 new_chain = chain_reorder(chain)
460 if not chain_array_contains(new_chain, valid_chains):
461 valid_chains.append(new_chain)
462 msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
463 for dep in new_chain:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600464 msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500465 msgs.append("\n")
466 if len(valid_chains) > 10:
467 msgs.append("Aborted dependency loops search after 10 matches.\n")
Andrew Geissler99467da2019-02-25 18:54:23 -0600468 raise TooManyLoops
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500469 continue
470 scan = False
471 if revdep not in explored_deps:
472 scan = True
473 elif revdep in explored_deps[revdep]:
474 scan = True
475 else:
476 for dep in prev_chain:
477 if dep in explored_deps[revdep]:
478 scan = True
479 if scan:
480 find_chains(revdep, copy.deepcopy(prev_chain))
481 for dep in explored_deps[revdep]:
482 if dep not in total_deps:
483 total_deps.append(dep)
484
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600485 explored_deps[tid] = total_deps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500486
Andrew Geissler99467da2019-02-25 18:54:23 -0600487 try:
488 for task in tasks:
489 find_chains(task, [])
490 except TooManyLoops:
491 pass
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500492
493 return msgs
494
495 def calculate_task_weights(self, endpoints):
496 """
497 Calculate a number representing the "weight" of each task. Heavier weighted tasks
498 have more dependencies and hence should be executed sooner for maximum speed.
499
500 This function also sanity checks the task list finding tasks that are not
501 possible to execute due to circular dependencies.
502 """
503
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600504 numTasks = len(self.runtaskentries)
505 weight = {}
506 deps_left = {}
507 task_done = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500508
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600509 for tid in self.runtaskentries:
510 task_done[tid] = False
511 weight[tid] = 1
512 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500513
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600514 for tid in endpoints:
515 weight[tid] = 10
516 task_done[tid] = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500517
518 while True:
519 next_points = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600520 for tid in endpoints:
521 for revdep in self.runtaskentries[tid].depends:
522 weight[revdep] = weight[revdep] + weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500523 deps_left[revdep] = deps_left[revdep] - 1
524 if deps_left[revdep] == 0:
525 next_points.append(revdep)
526 task_done[revdep] = True
527 endpoints = next_points
528 if len(next_points) == 0:
529 break
530
531 # Circular dependency sanity check
532 problem_tasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600533 for tid in self.runtaskentries:
534 if task_done[tid] is False or deps_left[tid] != 0:
535 problem_tasks.append(tid)
536 logger.debug(2, "Task %s is not buildable", tid)
537 logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
538 self.runtaskentries[tid].weight = weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500539
540 if problem_tasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600541 message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500542 message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
543 message = message + "Identifying dependency loops (this may take a short while)...\n"
544 logger.error(message)
545
546 msgs = self.circular_depchains_handler(problem_tasks)
547
548 message = "\n"
549 for msg in msgs:
550 message = message + msg
551 bb.msg.fatal("RunQueue", message)
552
553 return weight
554
555 def prepare(self):
556 """
557 Turn a set of taskData into a RunQueue and compute data needed
558 to optimise the execution order.
559 """
560
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600561 runq_build = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500562 recursivetasks = {}
563 recursiveitasks = {}
564 recursivetasksselfref = set()
565
566 taskData = self.taskData
567
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600568 found = False
569 for mc in self.taskData:
570 if len(taskData[mc].taskentries) > 0:
571 found = True
572 break
573 if not found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500574 # Nothing to do
575 return 0
576
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600577 self.init_progress_reporter.start()
578 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500579
580 # Step A - Work out a list of tasks to run
581 #
582 # Taskdata gives us a list of possible providers for every build and run
583 # target ordered by priority. It also gives information on each of those
584 # providers.
585 #
586 # To create the actual list of tasks to execute we fix the list of
587 # providers and then resolve the dependencies into task IDs. This
588 # process is repeated for each type of dependency (tdepends, deptask,
589 # rdeptast, recrdeptask, idepends).
590
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600591 def add_build_dependencies(depids, tasknames, depends, mc):
592 for depname in depids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500593 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600594 if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500595 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600596 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500597 if depdata is None:
598 continue
599 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600600 t = depdata + ":" + taskname
601 if t in taskData[mc].taskentries:
602 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500603
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600604 def add_runtime_dependencies(depids, tasknames, depends, mc):
605 for depname in depids:
606 if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500607 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600608 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500609 if depdata is None:
610 continue
611 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600612 t = depdata + ":" + taskname
613 if t in taskData[mc].taskentries:
614 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500615
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800616 def add_mc_dependencies(mc, tid):
617 mcdeps = taskData[mc].get_mcdepends()
618 for dep in mcdeps:
619 mcdependency = dep.split(':')
620 pn = mcdependency[3]
621 frommc = mcdependency[1]
622 mcdep = mcdependency[2]
623 deptask = mcdependency[4]
624 if mc == frommc:
625 fn = taskData[mcdep].build_targets[pn][0]
626 newdep = '%s:%s' % (fn,deptask)
627 taskData[mc].taskentries[tid].tdepends.append(newdep)
628
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600629 for mc in taskData:
630 for tid in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500631
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600632 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
633 #runtid = build_tid(mc, fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500634
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600635 #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
636
637 depends = set()
638 task_deps = self.dataCaches[mc].task_deps[taskfn]
639
640 self.runtaskentries[tid] = RunTaskEntry()
641
642 if fn in taskData[mc].failed_fns:
643 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500644
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800645 # We add multiconfig dependencies before processing internal task deps (tdepends)
646 if 'mcdepends' in task_deps and taskname in task_deps['mcdepends']:
647 add_mc_dependencies(mc, tid)
648
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500649 # Resolve task internal dependencies
650 #
651 # e.g. addtask before X after Y
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600652 for t in taskData[mc].taskentries[tid].tdepends:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800653 (depmc, depfn, deptaskname, _) = split_tid_mcfn(t)
654 depends.add(build_tid(depmc, depfn, deptaskname))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500655
656 # Resolve 'deptask' dependencies
657 #
658 # e.g. do_sometask[deptask] = "do_someothertask"
659 # (makes sure sometask runs after someothertask of all DEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600660 if 'deptask' in task_deps and taskname in task_deps['deptask']:
661 tasknames = task_deps['deptask'][taskname].split()
662 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500663
664 # Resolve 'rdeptask' dependencies
665 #
666 # e.g. do_sometask[rdeptask] = "do_someothertask"
667 # (makes sure sometask runs after someothertask of all RDEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600668 if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
669 tasknames = task_deps['rdeptask'][taskname].split()
670 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500671
672 # Resolve inter-task dependencies
673 #
674 # e.g. do_sometask[depends] = "targetname:do_someothertask"
675 # (makes sure sometask runs after targetname's someothertask)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600676 idepends = taskData[mc].taskentries[tid].idepends
677 for (depname, idependtask) in idepends:
678 if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500679 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600680 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500681 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600682 t = depdata + ":" + idependtask
683 depends.add(t)
684 if t not in taskData[mc].taskentries:
685 bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
686 irdepends = taskData[mc].taskentries[tid].irdepends
687 for (depname, idependtask) in irdepends:
688 if depname in taskData[mc].run_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500689 # Won't be in run_targets if ASSUME_PROVIDED
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500690 if not taskData[mc].run_targets[depname]:
691 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600692 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500693 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600694 t = depdata + ":" + idependtask
695 depends.add(t)
696 if t not in taskData[mc].taskentries:
697 bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500698
699 # Resolve recursive 'recrdeptask' dependencies (Part A)
700 #
701 # e.g. do_sometask[recrdeptask] = "do_someothertask"
702 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
703 # We cover the recursive part of the dependencies below
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600704 if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
705 tasknames = task_deps['recrdeptask'][taskname].split()
706 recursivetasks[tid] = tasknames
707 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
708 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
709 if taskname in tasknames:
710 recursivetasksselfref.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500711
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600712 if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
713 recursiveitasks[tid] = []
714 for t in task_deps['recideptask'][taskname].split():
715 newdep = build_tid(mc, fn, t)
716 recursiveitasks[tid].append(newdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500717
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600718 self.runtaskentries[tid].depends = depends
Brad Bishop316dfdd2018-06-25 12:45:53 -0400719 # Remove all self references
720 self.runtaskentries[tid].depends.discard(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500721
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600722 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500723
Brad Bishop316dfdd2018-06-25 12:45:53 -0400724 self.init_progress_reporter.next_stage()
725
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500726 # Resolve recursive 'recrdeptask' dependencies (Part B)
727 #
728 # e.g. do_sometask[recrdeptask] = "do_someothertask"
729 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600730 # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600731
Brad Bishop316dfdd2018-06-25 12:45:53 -0400732 # Generating/interating recursive lists of dependencies is painful and potentially slow
733 # Precompute recursive task dependencies here by:
734 # a) create a temp list of reverse dependencies (revdeps)
735 # b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0)
736 # c) combine the total list of dependencies in cumulativedeps
737 # d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500738
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500739
Brad Bishop316dfdd2018-06-25 12:45:53 -0400740 revdeps = {}
741 deps = {}
742 cumulativedeps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600743 for tid in self.runtaskentries:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400744 deps[tid] = set(self.runtaskentries[tid].depends)
745 revdeps[tid] = set()
746 cumulativedeps[tid] = set()
747 # Generate a temp list of reverse dependencies
748 for tid in self.runtaskentries:
749 for dep in self.runtaskentries[tid].depends:
750 revdeps[dep].add(tid)
751 # Find the dependency chain endpoints
752 endpoints = set()
753 for tid in self.runtaskentries:
754 if len(deps[tid]) == 0:
755 endpoints.add(tid)
756 # Iterate the chains collating dependencies
757 while endpoints:
758 next = set()
759 for tid in endpoints:
760 for dep in revdeps[tid]:
761 cumulativedeps[dep].add(fn_from_tid(tid))
762 cumulativedeps[dep].update(cumulativedeps[tid])
763 if tid in deps[dep]:
764 deps[dep].remove(tid)
765 if len(deps[dep]) == 0:
766 next.add(dep)
767 endpoints = next
768 #for tid in deps:
769 # if len(deps[tid]) != 0:
770 # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
771
772 # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
773 # resolve these recursively until we aren't adding any further extra dependencies
774 extradeps = True
775 while extradeps:
776 extradeps = 0
777 for tid in recursivetasks:
778 tasknames = recursivetasks[tid]
779
780 totaldeps = set(self.runtaskentries[tid].depends)
781 if tid in recursiveitasks:
782 totaldeps.update(recursiveitasks[tid])
783 for dep in recursiveitasks[tid]:
784 if dep not in self.runtaskentries:
785 continue
786 totaldeps.update(self.runtaskentries[dep].depends)
787
788 deps = set()
789 for dep in totaldeps:
790 if dep in cumulativedeps:
791 deps.update(cumulativedeps[dep])
792
793 for t in deps:
794 for taskname in tasknames:
795 newtid = t + ":" + taskname
796 if newtid == tid:
797 continue
798 if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
799 extradeps += 1
800 self.runtaskentries[tid].depends.add(newtid)
801
802 # Handle recursive tasks which depend upon other recursive tasks
803 deps = set()
804 for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
805 deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
806 for newtid in deps:
807 for taskname in tasknames:
808 if not newtid.endswith(":" + taskname):
809 continue
810 if newtid in self.runtaskentries:
811 extradeps += 1
812 self.runtaskentries[tid].depends.add(newtid)
813
814 bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps)
815
816 # Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work
817 for tid in recursivetasksselfref:
818 self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600819
820 self.init_progress_reporter.next_stage()
821
822 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500823
824 # Step B - Mark all active tasks
825 #
826 # Start with the tasks we were asked to run and mark all dependencies
827 # as active too. If the task is to be 'forced', clear its stamp. Once
828 # all active tasks are marked, prune the ones we don't need.
829
830 logger.verbose("Marking Active Tasks")
831
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600832 def mark_active(tid, depth):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500833 """
834 Mark an item as active along with its depends
835 (calls itself recursively)
836 """
837
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600838 if tid in runq_build:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500839 return
840
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600841 runq_build[tid] = 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500842
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600843 depends = self.runtaskentries[tid].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500844 for depend in depends:
845 mark_active(depend, depth+1)
846
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600847 self.target_tids = []
848 for (mc, target, task, fn) in self.targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500849
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600850 if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500851 continue
852
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600853 if target in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500854 continue
855
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500856 parents = False
857 if task.endswith('-'):
858 parents = True
859 task = task[:-1]
860
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600861 if fn in taskData[mc].failed_fns:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500862 continue
863
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600864 # fn already has mc prefix
865 tid = fn + ":" + task
866 self.target_tids.append(tid)
867 if tid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500868 import difflib
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600869 tasks = []
870 for x in taskData[mc].taskentries:
871 if x.startswith(fn + ":"):
872 tasks.append(taskname_from_tid(x))
873 close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500874 if close_matches:
875 extra = ". Close matches:\n %s" % "\n ".join(close_matches)
876 else:
877 extra = ""
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600878 bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
879
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500880 # For tasks called "XXXX-", ony run their dependencies
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500881 if parents:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600882 for i in self.runtaskentries[tid].depends:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500883 mark_active(i, 1)
884 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600885 mark_active(tid, 1)
886
887 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500888
889 # Step C - Prune all inactive tasks
890 #
891 # Once all active tasks are marked, prune the ones we don't need.
892
Brad Bishop316dfdd2018-06-25 12:45:53 -0400893 delcount = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600894 for tid in list(self.runtaskentries.keys()):
895 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400896 delcount[tid] = self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600897 del self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600898
Brad Bishop316dfdd2018-06-25 12:45:53 -0400899 # Handle --runall
900 if self.cooker.configuration.runall:
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500901 # re-run the mark_active and then drop unused tasks from new list
902 runq_build = {}
Brad Bishop316dfdd2018-06-25 12:45:53 -0400903
904 for task in self.cooker.configuration.runall:
905 runall_tids = set()
906 for tid in list(self.runtaskentries):
907 wanttid = fn_from_tid(tid) + ":do_%s" % task
908 if wanttid in delcount:
909 self.runtaskentries[wanttid] = delcount[wanttid]
910 if wanttid in self.runtaskentries:
911 runall_tids.add(wanttid)
912
913 for tid in list(runall_tids):
914 mark_active(tid,1)
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500915
916 for tid in list(self.runtaskentries.keys()):
917 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400918 delcount[tid] = self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500919 del self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500920
921 if len(self.runtaskentries) == 0:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400922 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
923
924 self.init_progress_reporter.next_stage()
925
926 # Handle runonly
927 if self.cooker.configuration.runonly:
928 # re-run the mark_active and then drop unused tasks from new list
929 runq_build = {}
930
931 for task in self.cooker.configuration.runonly:
932 runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == "do_%s" % task }
933
934 for tid in list(runonly_tids):
935 mark_active(tid,1)
936
937 for tid in list(self.runtaskentries.keys()):
938 if tid not in runq_build:
939 delcount[tid] = self.runtaskentries[tid]
940 del self.runtaskentries[tid]
941
942 if len(self.runtaskentries) == 0:
943 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500944
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500945 #
946 # Step D - Sanity checks and computation
947 #
948
949 # Check to make sure we still have tasks to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600950 if len(self.runtaskentries) == 0:
951 if not taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500952 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
953 else:
954 bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
955
Brad Bishop316dfdd2018-06-25 12:45:53 -0400956 logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500957
958 logger.verbose("Assign Weightings")
959
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600960 self.init_progress_reporter.next_stage()
961
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500962 # Generate a list of reverse dependencies to ease future calculations
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600963 for tid in self.runtaskentries:
964 for dep in self.runtaskentries[tid].depends:
965 self.runtaskentries[dep].revdeps.add(tid)
966
967 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500968
969 # Identify tasks at the end of dependency chains
970 # Error on circular dependency loops (length two)
971 endpoints = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600972 for tid in self.runtaskentries:
973 revdeps = self.runtaskentries[tid].revdeps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500974 if len(revdeps) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600975 endpoints.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500976 for dep in revdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600977 if dep in self.runtaskentries[tid].depends:
978 bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
979
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500980
981 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
982
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600983 self.init_progress_reporter.next_stage()
984
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500985 # Calculate task weights
986 # Check of higher length circular dependencies
987 self.runq_weight = self.calculate_task_weights(endpoints)
988
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600989 self.init_progress_reporter.next_stage()
990
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500991 # Sanity Check - Check for multiple tasks building the same provider
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600992 for mc in self.dataCaches:
993 prov_list = {}
994 seen_fn = []
995 for tid in self.runtaskentries:
996 (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
997 if taskfn in seen_fn:
998 continue
999 if mc != tidmc:
1000 continue
1001 seen_fn.append(taskfn)
1002 for prov in self.dataCaches[mc].fn_provides[taskfn]:
1003 if prov not in prov_list:
1004 prov_list[prov] = [taskfn]
1005 elif taskfn not in prov_list[prov]:
1006 prov_list[prov].append(taskfn)
1007 for prov in prov_list:
1008 if len(prov_list[prov]) < 2:
1009 continue
1010 if prov in self.multi_provider_whitelist:
1011 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001012 seen_pn = []
1013 # If two versions of the same PN are being built its fatal, we don't support it.
1014 for fn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001015 pn = self.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001016 if pn not in seen_pn:
1017 seen_pn.append(pn)
1018 else:
1019 bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001020 msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
1021 #
1022 # Construct a list of things which uniquely depend on each provider
1023 # since this may help the user figure out which dependency is triggering this warning
1024 #
1025 msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
1026 deplist = {}
1027 commondeps = None
1028 for provfn in prov_list[prov]:
1029 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001030 for tid in self.runtaskentries:
1031 fn = fn_from_tid(tid)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001032 if fn != provfn:
1033 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001034 for dep in self.runtaskentries[tid].revdeps:
1035 fn = fn_from_tid(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001036 if fn == provfn:
1037 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001038 deps.add(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001039 if not commondeps:
1040 commondeps = set(deps)
1041 else:
1042 commondeps &= deps
1043 deplist[provfn] = deps
1044 for provfn in deplist:
1045 msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
1046 #
1047 # Construct a list of provides and runtime providers for each recipe
1048 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
1049 #
1050 msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
1051 provide_results = {}
1052 rprovide_results = {}
1053 commonprovs = None
1054 commonrprovs = None
1055 for provfn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001056 provides = set(self.dataCaches[mc].fn_provides[provfn])
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001057 rprovides = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001058 for rprovide in self.dataCaches[mc].rproviders:
1059 if provfn in self.dataCaches[mc].rproviders[rprovide]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001060 rprovides.add(rprovide)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001061 for package in self.dataCaches[mc].packages:
1062 if provfn in self.dataCaches[mc].packages[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001063 rprovides.add(package)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001064 for package in self.dataCaches[mc].packages_dynamic:
1065 if provfn in self.dataCaches[mc].packages_dynamic[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001066 rprovides.add(package)
1067 if not commonprovs:
1068 commonprovs = set(provides)
1069 else:
1070 commonprovs &= provides
1071 provide_results[provfn] = provides
1072 if not commonrprovs:
1073 commonrprovs = set(rprovides)
1074 else:
1075 commonrprovs &= rprovides
1076 rprovide_results[provfn] = rprovides
1077 #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
1078 #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
1079 for provfn in prov_list[prov]:
1080 msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
1081 msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
1082
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001083 if self.warn_multi_bb:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001084 logger.verbnote(msg)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001085 else:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001086 logger.error(msg)
1087
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001088 self.init_progress_reporter.next_stage()
1089
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001090 # Create a whitelist usable by the stamp checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001091 self.stampfnwhitelist = {}
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001092 for mc in self.taskData:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001093 self.stampfnwhitelist[mc] = []
1094 for entry in self.stampwhitelist.split():
1095 if entry not in self.taskData[mc].build_targets:
1096 continue
1097 fn = self.taskData.build_targets[entry][0]
1098 self.stampfnwhitelist[mc].append(fn)
1099
1100 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001101
1102 # Iterate over the task list looking for tasks with a 'setscene' function
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001103 self.runq_setscene_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001104 if not self.cooker.configuration.nosetscene:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001105 for tid in self.runtaskentries:
1106 (mc, fn, taskname, _) = split_tid_mcfn(tid)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001107 setscenetid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001108 if setscenetid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001109 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001110 self.runq_setscene_tids.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001111
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001112 def invalidate_task(tid, error_nostamp):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001113 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1114 taskdep = self.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001115 if fn + ":" + taskname not in taskData[mc].taskentries:
1116 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001117 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1118 if error_nostamp:
1119 bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
1120 else:
1121 bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
1122 else:
1123 logger.verbose("Invalidate task %s, %s", taskname, fn)
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001124 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001125
1126 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001127
1128 # Invalidate task if force mode active
1129 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001130 for tid in self.target_tids:
1131 invalidate_task(tid, False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001132
1133 # Invalidate task if invalidate mode active
1134 if self.cooker.configuration.invalidate_stamp:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001135 for tid in self.target_tids:
1136 fn = fn_from_tid(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001137 for st in self.cooker.configuration.invalidate_stamp.split(','):
1138 if not st.startswith("do_"):
1139 st = "do_%s" % st
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001140 invalidate_task(fn + ":" + st, True)
1141
1142 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001143
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001144 # Create and print to the logs a virtual/xxxx -> PN (fn) table
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001145 for mc in taskData:
1146 virtmap = taskData[mc].get_providermap(prefix="virtual/")
1147 virtpnmap = {}
1148 for v in virtmap:
1149 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
1150 bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
1151 if hasattr(bb.parse.siggen, "tasks_resolved"):
1152 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1153
1154 self.init_progress_reporter.next_stage()
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001155
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001156 # Iterate over the task list and call into the siggen code
1157 dealtwith = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001158 todeal = set(self.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001159 while len(todeal) > 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001160 for tid in todeal.copy():
1161 if len(self.runtaskentries[tid].depends - dealtwith) == 0:
1162 dealtwith.add(tid)
1163 todeal.remove(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001164 self.prepare_task_hash(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001165
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001166 bb.parse.siggen.writeout_file_checksum_cache()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001167
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001168 #self.dump_data()
1169 return len(self.runtaskentries)
1170
Brad Bishop19323692019-04-05 15:28:33 -04001171 def prepare_task_hash(self, tid):
1172 procdep = []
1173 for dep in self.runtaskentries[tid].depends:
1174 procdep.append(fn_from_tid(dep) + "." + taskname_from_tid(dep))
1175 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1176 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(taskfn, taskname, procdep, self.dataCaches[mc])
1177 self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(taskfn + "." + taskname)
1178
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001179 def dump_data(self):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001180 """
1181 Dump some debug information on the internal data structures
1182 """
1183 logger.debug(3, "run_tasks:")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001184 for tid in self.runtaskentries:
1185 logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
1186 self.runtaskentries[tid].weight,
1187 self.runtaskentries[tid].depends,
1188 self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001189
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001190class RunQueueWorker():
1191 def __init__(self, process, pipe):
1192 self.process = process
1193 self.pipe = pipe
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001194
1195class RunQueue:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001196 def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001197
1198 self.cooker = cooker
1199 self.cfgData = cfgData
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001200 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001201
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001202 self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
1203 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001204 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001205
1206 self.state = runQueuePrepare
1207
1208 # For disk space monitor
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001209 # Invoked at regular time intervals via the bitbake heartbeat event
1210 # while the build is running. We generate a unique name for the handler
1211 # here, just in case that there ever is more than one RunQueue instance,
Brad Bishop96ff1982019-08-19 13:50:42 -04001212 # start the handler when reaching runQueueSceneInit, and stop it when
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001213 # done with the build.
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001214 self.dm = monitordisk.diskMonitor(cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001215 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
1216 self.dm_event_handler_registered = False
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001217 self.rqexe = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001218 self.worker = {}
1219 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001220
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001221 def _start_worker(self, mc, fakeroot = False, rqexec = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001222 logger.debug(1, "Starting bitbake-worker")
1223 magic = "decafbad"
1224 if self.cooker.configuration.profile:
1225 magic = "decafbadbad"
1226 if fakeroot:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001227 magic = magic + "beef"
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001228 mcdata = self.cooker.databuilder.mcdata[mc]
Brad Bishop19323692019-04-05 15:28:33 -04001229 fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD"))
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001230 fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001231 env = os.environ.copy()
1232 for key, value in (var.split('=') for var in fakerootenv):
1233 env[key] = value
Brad Bishop19323692019-04-05 15:28:33 -04001234 worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001235 else:
1236 worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
1237 bb.utils.nonblockingfd(worker.stdout)
1238 workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
1239
1240 workerdata = {
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001241 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1242 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1243 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1244 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001245 "sigdata" : bb.parse.siggen.get_taskdata(),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001246 "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
1247 "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
1248 "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
1249 "logdefaultdomain" : bb.msg.loggerDefaultDomains,
1250 "prhost" : self.cooker.prhost,
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001251 "buildname" : self.cfgData.getVar("BUILDNAME"),
1252 "date" : self.cfgData.getVar("DATE"),
1253 "time" : self.cfgData.getVar("TIME"),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001254 }
1255
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001256 worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001257 worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001258 worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001259 worker.stdin.flush()
1260
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001261 return RunQueueWorker(worker, workerpipe)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001262
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001263 def _teardown_worker(self, worker):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001264 if not worker:
1265 return
1266 logger.debug(1, "Teardown for bitbake-worker")
1267 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001268 worker.process.stdin.write(b"<quit></quit>")
1269 worker.process.stdin.flush()
1270 worker.process.stdin.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001271 except IOError:
1272 pass
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001273 while worker.process.returncode is None:
1274 worker.pipe.read()
1275 worker.process.poll()
1276 while worker.pipe.read():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001277 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001278 worker.pipe.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001279
1280 def start_worker(self):
1281 if self.worker:
1282 self.teardown_workers()
1283 self.teardown = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001284 for mc in self.rqdata.dataCaches:
1285 self.worker[mc] = self._start_worker(mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001286
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001287 def start_fakeworker(self, rqexec, mc):
1288 if not mc in self.fakeworker:
1289 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001290
1291 def teardown_workers(self):
1292 self.teardown = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001293 for mc in self.worker:
1294 self._teardown_worker(self.worker[mc])
1295 self.worker = {}
1296 for mc in self.fakeworker:
1297 self._teardown_worker(self.fakeworker[mc])
1298 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001299
1300 def read_workers(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001301 for mc in self.worker:
1302 self.worker[mc].pipe.read()
1303 for mc in self.fakeworker:
1304 self.fakeworker[mc].pipe.read()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001305
1306 def active_fds(self):
1307 fds = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001308 for mc in self.worker:
1309 fds.append(self.worker[mc].pipe.input)
1310 for mc in self.fakeworker:
1311 fds.append(self.fakeworker[mc].pipe.input)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001312 return fds
1313
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001314 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001315 def get_timestamp(f):
1316 try:
1317 if not os.access(f, os.F_OK):
1318 return None
1319 return os.stat(f)[stat.ST_MTIME]
1320 except:
1321 return None
1322
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001323 (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
1324 if taskname is None:
1325 taskname = tn
1326
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001327 if self.stamppolicy == "perfile":
1328 fulldeptree = False
1329 else:
1330 fulldeptree = True
1331 stampwhitelist = []
1332 if self.stamppolicy == "whitelist":
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001333 stampwhitelist = self.rqdata.stampfnwhitelist[mc]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001334
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001335 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001336
1337 # If the stamp is missing, it's not current
1338 if not os.access(stampfile, os.F_OK):
1339 logger.debug(2, "Stampfile %s not available", stampfile)
1340 return False
1341 # If it's a 'nostamp' task, it's not current
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001342 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001343 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1344 logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
1345 return False
1346
1347 if taskname != "do_setscene" and taskname.endswith("_setscene"):
1348 return True
1349
1350 if cache is None:
1351 cache = {}
1352
1353 iscurrent = True
1354 t1 = get_timestamp(stampfile)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001355 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001356 if iscurrent:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001357 (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
1358 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1359 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001360 t2 = get_timestamp(stampfile2)
1361 t3 = get_timestamp(stampfile3)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001362 if t3 and not t2:
1363 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001364 if t3 and t3 > t2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001365 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001366 if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
1367 if not t2:
1368 logger.debug(2, 'Stampfile %s does not exist', stampfile2)
1369 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001370 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001371 if t1 < t2:
1372 logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
1373 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001374 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001375 if recurse and iscurrent:
1376 if dep in cache:
1377 iscurrent = cache[dep]
1378 if not iscurrent:
1379 logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
1380 else:
1381 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1382 cache[dep] = iscurrent
1383 if recurse:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001384 cache[tid] = iscurrent
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001385 return iscurrent
1386
Brad Bishop96ff1982019-08-19 13:50:42 -04001387 def validate_hashes(self, tocheck, data, presentcount=None, siginfo=False):
1388 valid = set()
1389 if self.hashvalidate:
1390 sq_hash = []
1391 sq_hashfn = []
1392 sq_unihash = []
1393 sq_fn = []
1394 sq_taskname = []
1395 sq_task = []
1396 for tid in tocheck:
1397 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1398
1399 sq_fn.append(fn)
1400 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
1401 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
1402 sq_unihash.append(self.rqdata.runtaskentries[tid].unihash)
1403 sq_taskname.append(taskname)
1404 sq_task.append(tid)
1405
1406 if presentcount is not None:
1407 data.setVar("BB_SETSCENE_STAMPCURRENT_COUNT", presentcount)
1408
1409 valid_ids = self.validate_hash(sq_fn, sq_taskname, sq_hash, sq_hashfn, siginfo, sq_unihash, data, presentcount)
1410
1411 if presentcount is not None:
1412 data.delVar("BB_SETSCENE_STAMPCURRENT_COUNT")
1413
1414 for v in valid_ids:
1415 valid.add(sq_task[v])
1416
1417 return valid
1418
1419 def validate_hash(self, sq_fn, sq_task, sq_hash, sq_hashfn, siginfo, sq_unihash, d, presentcount):
Brad Bishop19323692019-04-05 15:28:33 -04001420 locs = {"sq_fn" : sq_fn, "sq_task" : sq_task, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn,
1421 "sq_unihash" : sq_unihash, "siginfo" : siginfo, "d" : d}
1422
Brad Bishop96ff1982019-08-19 13:50:42 -04001423 # Backwards compatibility
Brad Bishop19323692019-04-05 15:28:33 -04001424 hashvalidate_args = ("(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=siginfo, sq_unihash=sq_unihash)",
1425 "(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=siginfo)",
1426 "(sq_fn, sq_task, sq_hash, sq_hashfn, d)")
1427
1428 for args in hashvalidate_args[:-1]:
1429 try:
1430 call = self.hashvalidate + args
1431 return bb.utils.better_eval(call, locs)
1432 except TypeError:
1433 continue
1434
1435 # Call the last entry without a try...catch to propagate any thrown
1436 # TypeError
1437 call = self.hashvalidate + hashvalidate_args[-1]
1438 return bb.utils.better_eval(call, locs)
1439
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001440 def _execute_runqueue(self):
1441 """
1442 Run the tasks in a queue prepared by rqdata.prepare()
1443 Upon failure, optionally try to recover the build using any alternate providers
1444 (if the abort on failure configuration option isn't set)
1445 """
1446
1447 retval = True
1448
1449 if self.state is runQueuePrepare:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001450 # NOTE: if you add, remove or significantly refactor the stages of this
1451 # process then you should recalculate the weightings here. This is quite
1452 # easy to do - just change the next line temporarily to pass debug=True as
1453 # the last parameter and you'll get a printout of the weightings as well
1454 # as a map to the lines where next_stage() was called. Of course this isn't
1455 # critical, but it helps to keep the progress reporting accurate.
1456 self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
1457 "Initialising tasks",
1458 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001459 if self.rqdata.prepare() == 0:
1460 self.state = runQueueComplete
1461 else:
1462 self.state = runQueueSceneInit
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001463
1464 if self.state is runQueueSceneInit:
Brad Bishop96ff1982019-08-19 13:50:42 -04001465 self.rqdata.init_progress_reporter.next_stage()
1466
1467 # we are ready to run, emit dependency info to any UI or class which
1468 # needs it
1469 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1470 self.rqdata.init_progress_reporter.next_stage()
1471 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
1472
Brad Bishope2d5b612018-11-23 10:55:50 +13001473 if not self.dm_event_handler_registered:
1474 res = bb.event.register(self.dm_event_handler_name,
Brad Bishop96ff1982019-08-19 13:50:42 -04001475 lambda x: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False,
Brad Bishope2d5b612018-11-23 10:55:50 +13001476 ('bb.event.HeartbeatEvent',))
1477 self.dm_event_handler_registered = True
1478
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001479 dump = self.cooker.configuration.dump_signatures
1480 if dump:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001481 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001482 if 'printdiff' in dump:
1483 invalidtasks = self.print_diffscenetasks()
1484 self.dump_signatures(dump)
1485 if 'printdiff' in dump:
1486 self.write_diffscenetasks(invalidtasks)
1487 self.state = runQueueComplete
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001488
Brad Bishop96ff1982019-08-19 13:50:42 -04001489 if self.state is runQueueSceneInit:
1490 self.rqdata.init_progress_reporter.next_stage()
1491 self.start_worker()
1492 self.rqdata.init_progress_reporter.next_stage()
1493 self.rqexe = RunQueueExecute(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001494
Brad Bishop96ff1982019-08-19 13:50:42 -04001495 # If we don't have any setscene functions, skip execution
1496 if len(self.rqdata.runq_setscene_tids) == 0:
1497 logger.info('No setscene tasks')
1498 for tid in self.rqdata.runtaskentries:
1499 if len(self.rqdata.runtaskentries[tid].depends) == 0:
1500 self.rqexe.setbuildable(tid)
1501 self.rqexe.tasks_notcovered.add(tid)
1502 self.rqexe.sqdone = True
1503 logger.info('Executing Tasks')
1504 self.state = runQueueRunning
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001505
1506 if self.state is runQueueRunning:
1507 retval = self.rqexe.execute()
1508
1509 if self.state is runQueueCleanUp:
1510 retval = self.rqexe.finish()
1511
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001512 build_done = self.state is runQueueComplete or self.state is runQueueFailed
1513
1514 if build_done and self.dm_event_handler_registered:
1515 bb.event.remove(self.dm_event_handler_name, None)
1516 self.dm_event_handler_registered = False
1517
1518 if build_done and self.rqexe:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001519 self.teardown_workers()
Brad Bishop96ff1982019-08-19 13:50:42 -04001520 if self.rqexe:
1521 if self.rqexe.stats.failed:
1522 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
1523 else:
1524 # Let's avoid the word "failed" if nothing actually did
1525 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001526
1527 if self.state is runQueueFailed:
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001528 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001529
1530 if self.state is runQueueComplete:
1531 # All done
1532 return False
1533
1534 # Loop
1535 return retval
1536
1537 def execute_runqueue(self):
1538 # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
1539 try:
1540 return self._execute_runqueue()
1541 except bb.runqueue.TaskFailure:
1542 raise
1543 except SystemExit:
1544 raise
1545 except bb.BBHandledException:
1546 try:
1547 self.teardown_workers()
1548 except:
1549 pass
1550 self.state = runQueueComplete
1551 raise
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001552 except Exception as err:
1553 logger.exception("An uncaught exception occurred in runqueue")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001554 try:
1555 self.teardown_workers()
1556 except:
1557 pass
1558 self.state = runQueueComplete
1559 raise
1560
1561 def finish_runqueue(self, now = False):
1562 if not self.rqexe:
1563 self.state = runQueueComplete
1564 return
1565
1566 if now:
1567 self.rqexe.finish_now()
1568 else:
1569 self.rqexe.finish()
1570
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001571 def rq_dump_sigfn(self, fn, options):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001572 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001573 the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
1574 siggen = bb.parse.siggen
1575 dataCaches = self.rqdata.dataCaches
1576 siggen.dump_sigfn(fn, dataCaches, options)
1577
1578 def dump_signatures(self, options):
1579 fns = set()
1580 bb.note("Reparsing files to collect dependency data")
1581
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001582 for tid in self.rqdata.runtaskentries:
1583 fn = fn_from_tid(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001584 fns.add(fn)
1585
1586 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1587 # We cannot use the real multiprocessing.Pool easily due to some local data
1588 # that can't be pickled. This is a cheap multi-process solution.
1589 launched = []
1590 while fns:
1591 if len(launched) < max_process:
1592 p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
1593 p.start()
1594 launched.append(p)
1595 for q in launched:
1596 # The finished processes are joined when calling is_alive()
1597 if not q.is_alive():
1598 launched.remove(q)
1599 for p in launched:
1600 p.join()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001601
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001602 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001603
1604 return
1605
1606 def print_diffscenetasks(self):
1607
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001608 noexec = []
Brad Bishop96ff1982019-08-19 13:50:42 -04001609 tocheck = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001610
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001611 for tid in self.rqdata.runtaskentries:
1612 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1613 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001614
1615 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001616 noexec.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001617 continue
1618
Brad Bishop96ff1982019-08-19 13:50:42 -04001619 tocheck.add(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001620
Brad Bishop96ff1982019-08-19 13:50:42 -04001621 valid_new = self.validate_hashes(tocheck, self.cooker.data, None, True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001622
1623 # Tasks which are both setscene and noexec never care about dependencies
1624 # We therefore find tasks which are setscene and noexec and mark their
1625 # unique dependencies as valid.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001626 for tid in noexec:
1627 if tid not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001628 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001629 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001630 hasnoexecparents = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001631 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1632 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001633 continue
1634 hasnoexecparents = False
1635 break
1636 if hasnoexecparents:
1637 valid_new.add(dep)
1638
1639 invalidtasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001640 for tid in self.rqdata.runtaskentries:
1641 if tid not in valid_new and tid not in noexec:
1642 invalidtasks.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001643
1644 found = set()
1645 processed = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001646 for tid in invalidtasks:
1647 toprocess = set([tid])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001648 while toprocess:
1649 next = set()
1650 for t in toprocess:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001651 for dep in self.rqdata.runtaskentries[t].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001652 if dep in invalidtasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001653 found.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001654 if dep not in processed:
1655 processed.add(dep)
1656 next.add(dep)
1657 toprocess = next
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001658 if tid in found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001659 toprocess = set()
1660
1661 tasklist = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001662 for tid in invalidtasks.difference(found):
1663 tasklist.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001664
1665 if tasklist:
1666 bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
1667
1668 return invalidtasks.difference(found)
1669
1670 def write_diffscenetasks(self, invalidtasks):
1671
1672 # Define recursion callback
1673 def recursecb(key, hash1, hash2):
1674 hashes = [hash1, hash2]
1675 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1676
1677 recout = []
1678 if len(hashfiles) == 2:
1679 out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
Brad Bishopc342db32019-05-15 21:57:59 -04001680 recout.extend(list(' ' + l for l in out2))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001681 else:
1682 recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
1683
1684 return recout
1685
1686
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001687 for tid in invalidtasks:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001688 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1689 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001690 h = self.rqdata.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001691 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
1692 match = None
1693 for m in matches:
1694 if h in m:
1695 match = m
1696 if match is None:
1697 bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001698 matches = {k : v for k, v in iter(matches.items()) if h not in k}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001699 if matches:
1700 latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
Brad Bishop19323692019-04-05 15:28:33 -04001701 prevh = __find_sha256__.search(latestmatch).group(0)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001702 output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
1703 bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
1704
Brad Bishop96ff1982019-08-19 13:50:42 -04001705
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001706class RunQueueExecute:
1707
1708 def __init__(self, rq):
1709 self.rq = rq
1710 self.cooker = rq.cooker
1711 self.cfgData = rq.cfgData
1712 self.rqdata = rq.rqdata
1713
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001714 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
1715 self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001716
Brad Bishop96ff1982019-08-19 13:50:42 -04001717 self.sq_buildable = set()
1718 self.sq_running = set()
1719 self.sq_live = set()
1720
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001721 self.runq_buildable = set()
1722 self.runq_running = set()
1723 self.runq_complete = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001724
1725 self.build_stamps = {}
1726 self.build_stamps2 = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001727 self.failed_tids = []
Brad Bishop96ff1982019-08-19 13:50:42 -04001728 self.sq_deferred = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001729
1730 self.stampcache = {}
1731
Brad Bishop96ff1982019-08-19 13:50:42 -04001732 self.sqdone = False
1733
1734 self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
1735 self.sq_stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
1736
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001737 for mc in rq.worker:
1738 rq.worker[mc].pipe.setrunqueueexec(self)
1739 for mc in rq.fakeworker:
1740 rq.fakeworker[mc].pipe.setrunqueueexec(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001741
1742 if self.number_tasks <= 0:
1743 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1744
Brad Bishop96ff1982019-08-19 13:50:42 -04001745 # List of setscene tasks which we've covered
1746 self.scenequeue_covered = set()
1747 # List of tasks which are covered (including setscene ones)
1748 self.tasks_covered = set()
1749 self.tasks_scenequeue_done = set()
1750 self.scenequeue_notcovered = set()
1751 self.tasks_notcovered = set()
1752 self.scenequeue_notneeded = set()
1753
1754 self.coveredtopocess = set()
1755
1756 schedulers = self.get_schedulers()
1757 for scheduler in schedulers:
1758 if self.scheduler == scheduler.name:
1759 self.sched = scheduler(self, self.rqdata)
1760 logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
1761 break
1762 else:
1763 bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
1764 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1765
1766 if len(self.rqdata.runq_setscene_tids) > 0:
1767 self.sqdata = SQData()
1768 build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
1769
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001770 def runqueue_process_waitpid(self, task, status):
1771
1772 # self.build_stamps[pid] may not exist when use shared work directory.
1773 if task in self.build_stamps:
1774 self.build_stamps2.remove(self.build_stamps[task])
1775 del self.build_stamps[task]
1776
Brad Bishop96ff1982019-08-19 13:50:42 -04001777 if task in self.sq_live:
1778 if status != 0:
1779 self.sq_task_fail(task, status)
1780 else:
1781 self.sq_task_complete(task)
1782 self.sq_live.remove(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001783 else:
Brad Bishop96ff1982019-08-19 13:50:42 -04001784 if status != 0:
1785 self.task_fail(task, status)
1786 else:
1787 self.task_complete(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001788 return True
1789
1790 def finish_now(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001791 for mc in self.rq.worker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001792 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001793 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1794 self.rq.worker[mc].process.stdin.flush()
1795 except IOError:
1796 # worker must have died?
1797 pass
1798 for mc in self.rq.fakeworker:
1799 try:
1800 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1801 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001802 except IOError:
1803 # worker must have died?
1804 pass
1805
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001806 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001807 self.rq.state = runQueueFailed
1808 return
1809
1810 self.rq.state = runQueueComplete
1811 return
1812
1813 def finish(self):
1814 self.rq.state = runQueueCleanUp
1815
Brad Bishop96ff1982019-08-19 13:50:42 -04001816 active = self.stats.active + self.sq_stats.active
1817 if active > 0:
1818 bb.event.fire(runQueueExitWait(active), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001819 self.rq.read_workers()
1820 return self.rq.active_fds()
1821
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001822 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001823 self.rq.state = runQueueFailed
1824 return True
1825
1826 self.rq.state = runQueueComplete
1827 return True
1828
Brad Bishop96ff1982019-08-19 13:50:42 -04001829 # Used by setscene only
1830 def check_dependencies(self, task, taskdeps):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001831 if not self.rq.depvalidate:
1832 return False
1833
1834 taskdata = {}
1835 taskdeps.add(task)
1836 for dep in taskdeps:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001837 (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
1838 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001839 taskdata[dep] = [pn, taskname, fn]
1840 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001841 locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001842 valid = bb.utils.better_eval(call, locs)
1843 return valid
1844
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001845 def can_start_task(self):
Brad Bishop96ff1982019-08-19 13:50:42 -04001846 active = self.stats.active + self.sq_stats.active
1847 can_start = active < self.number_tasks
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001848 return can_start
1849
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001850 def get_schedulers(self):
1851 schedulers = set(obj for obj in globals().values()
1852 if type(obj) is type and
1853 issubclass(obj, RunQueueScheduler))
1854
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001855 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001856 if user_schedulers:
1857 for sched in user_schedulers.split():
1858 if not "." in sched:
1859 bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
1860 continue
1861
1862 modname, name = sched.rsplit(".", 1)
1863 try:
1864 module = __import__(modname, fromlist=(name,))
1865 except ImportError as exc:
1866 logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
1867 raise SystemExit(1)
1868 else:
1869 schedulers.add(getattr(module, name))
1870 return schedulers
1871
1872 def setbuildable(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001873 self.runq_buildable.add(task)
Brad Bishop316dfdd2018-06-25 12:45:53 -04001874 self.sched.newbuildable(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001875
1876 def task_completeoutright(self, task):
1877 """
1878 Mark a task as completed
1879 Look at the reverse dependencies and mark any task with
1880 completed dependencies as buildable
1881 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001882 self.runq_complete.add(task)
1883 for revdep in self.rqdata.runtaskentries[task].revdeps:
1884 if revdep in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001885 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001886 if revdep in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001887 continue
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001888 alldeps = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001889 for dep in self.rqdata.runtaskentries[revdep].depends:
1890 if dep not in self.runq_complete:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001891 alldeps = False
1892 break
1893 if alldeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001894 self.setbuildable(revdep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001895 logger.debug(1, "Marking task %s as buildable", revdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001896
1897 def task_complete(self, task):
1898 self.stats.taskCompleted()
1899 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1900 self.task_completeoutright(task)
1901
1902 def task_fail(self, task, exitcode):
1903 """
1904 Called when a task has failed
1905 Updates the state engine with the failure
1906 """
1907 self.stats.taskFailed()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001908 self.failed_tids.append(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001909 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001910 if self.rqdata.taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001911 self.rq.state = runQueueCleanUp
1912
1913 def task_skip(self, task, reason):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001914 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001915 self.setbuildable(task)
1916 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
1917 self.task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001918 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001919 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001920
1921 def execute(self):
1922 """
Brad Bishop96ff1982019-08-19 13:50:42 -04001923 Run the tasks in a queue prepared by prepare_runqueue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001924 """
1925
1926 self.rq.read_workers()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001927
Brad Bishop96ff1982019-08-19 13:50:42 -04001928 task = None
1929 if not self.sqdone and self.can_start_task():
1930 # Find the next setscene to run
1931 for nexttask in self.rqdata.runq_setscene_tids:
1932 if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values():
1933 if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
1934 if nexttask not in self.rqdata.target_tids:
1935 logger.debug(2, "Skipping setscene for task %s" % nexttask)
1936 self.sq_task_skip(nexttask)
1937 self.scenequeue_notneeded.add(nexttask)
1938 if nexttask in self.sq_deferred:
1939 del self.sq_deferred[nexttask]
1940 return True
1941 if nexttask in self.sq_deferred:
1942 if self.sq_deferred[nexttask] not in self.runq_complete:
1943 continue
1944 logger.debug(1, "Task %s no longer deferred" % nexttask)
1945 del self.sq_deferred[nexttask]
1946 valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, None, False)
1947 if not valid:
1948 logger.debug(1, "%s didn't become valid, skipping setscene" % nexttask)
1949 self.sq_task_failoutright(nexttask)
1950 return True
1951 else:
1952 self.sqdata.outrightfail.remove(nexttask)
1953 if nexttask in self.sqdata.outrightfail:
1954 logger.debug(2, 'No package found, so skipping setscene task %s', nexttask)
1955 self.sq_task_failoutright(nexttask)
1956 return True
1957 if nexttask in self.sqdata.unskippable:
1958 logger.debug(2, "Setscene task %s is unskippable" % nexttask)
1959 task = nexttask
1960 break
1961 if task is not None:
1962 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
1963 taskname = taskname + "_setscene"
1964 if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
1965 logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
1966 self.sq_task_failoutright(task)
1967 return True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001968
Brad Bishop96ff1982019-08-19 13:50:42 -04001969 if self.cooker.configuration.force:
1970 if task in self.rqdata.target_tids:
1971 self.sq_task_failoutright(task)
1972 return True
1973
1974 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
1975 logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
1976 self.sq_task_skip(task)
1977 return True
1978
1979 if self.cooker.configuration.skipsetscene:
1980 logger.debug(2, 'No setscene tasks should be executed. Skipping %s', task)
1981 self.sq_task_failoutright(task)
1982 return True
1983
1984 startevent = sceneQueueTaskStarted(task, self.sq_stats, self.rq)
1985 bb.event.fire(startevent, self.cfgData)
1986
1987 taskdepdata = self.sq_build_taskdepdata(task)
1988
1989 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
1990 taskhash = self.rqdata.get_task_hash(task)
1991 unihash = self.rqdata.get_task_unihash(task)
1992 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
1993 if not mc in self.rq.fakeworker:
1994 self.rq.start_fakeworker(self, mc)
1995 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
1996 self.rq.fakeworker[mc].process.stdin.flush()
1997 else:
1998 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
1999 self.rq.worker[mc].process.stdin.flush()
2000
2001 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2002 self.build_stamps2.append(self.build_stamps[task])
2003 self.sq_running.add(task)
2004 self.sq_live.add(task)
2005 self.sq_stats.taskActive()
2006 if self.can_start_task():
2007 return True
2008
2009 if not self.sq_live and not self.sqdone and not self.sq_deferred:
2010 logger.info("Setscene tasks completed")
2011 logger.debug(1, 'We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered)))
2012
2013 completeevent = sceneQueueComplete(self.sq_stats, self.rq)
2014 bb.event.fire(completeevent, self.cfgData)
2015
2016 err = False
2017 for x in self.rqdata.runtaskentries:
2018 if x not in self.tasks_covered and x not in self.tasks_notcovered:
2019 logger.error("Task %s was never moved from the setscene queue" % x)
2020 err = True
2021 if x not in self.tasks_scenequeue_done:
2022 logger.error("Task %s was never processed by the setscene code" % x)
2023 err = True
2024 if len(self.rqdata.runtaskentries[x].depends) == 0 and x not in self.runq_buildable:
2025 logger.error("Task %s was never marked as buildable by the setscene code" % x)
2026 err = True
2027 if err:
2028 self.rq.state = runQueueFailed
2029 return True
2030
2031 if self.cooker.configuration.setsceneonly:
2032 self.rq.state = runQueueComplete
2033 return True
2034 self.sqdone = True
2035
2036 if self.stats.total == 0:
2037 # nothing to do
2038 self.rq.state = runQueueComplete
2039 return True
2040
2041 if self.cooker.configuration.setsceneonly:
2042 task = None
2043 else:
2044 task = self.sched.next()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002045 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002046 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002047
Brad Bishop96ff1982019-08-19 13:50:42 -04002048 if self.rqdata.setscenewhitelist is not None:
2049 if self.check_setscenewhitelist(task):
2050 self.task_fail(task, "setscene whitelist")
2051 return True
2052
2053 if task in self.tasks_covered:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002054 logger.debug(2, "Setscene covered task %s", task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002055 self.task_skip(task, "covered")
2056 return True
2057
2058 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002059 logger.debug(2, "Stamp current task %s", task)
2060
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002061 self.task_skip(task, "existing")
2062 return True
2063
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002064 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002065 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2066 startevent = runQueueTaskStarted(task, self.stats, self.rq,
2067 noexec=True)
2068 bb.event.fire(startevent, self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002069 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002070 self.stats.taskActive()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002071 if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002072 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002073 self.task_complete(task)
2074 return True
2075 else:
2076 startevent = runQueueTaskStarted(task, self.stats, self.rq)
2077 bb.event.fire(startevent, self.cfgData)
2078
2079 taskdepdata = self.build_taskdepdata(task)
2080
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002081 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop19323692019-04-05 15:28:33 -04002082 taskhash = self.rqdata.get_task_hash(task)
2083 unihash = self.rqdata.get_task_unihash(task)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002084 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002085 if not mc in self.rq.fakeworker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002086 try:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002087 self.rq.start_fakeworker(self, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002088 except OSError as exc:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002089 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002090 self.rq.state = runQueueFailed
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002091 self.stats.taskFailed()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002092 return True
Brad Bishop19323692019-04-05 15:28:33 -04002093 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002094 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002095 else:
Brad Bishop19323692019-04-05 15:28:33 -04002096 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002097 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002098
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002099 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2100 self.build_stamps2.append(self.build_stamps[task])
2101 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002102 self.stats.taskActive()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002103 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002104 return True
2105
Brad Bishop96ff1982019-08-19 13:50:42 -04002106 if self.stats.active > 0 or self.sq_stats.active > 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002107 self.rq.read_workers()
2108 return self.rq.active_fds()
2109
Brad Bishop96ff1982019-08-19 13:50:42 -04002110 # No more tasks can be run. If we have deferred setscene tasks we should run them.
2111 if self.sq_deferred:
2112 tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0])
2113 logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid)
2114 self.sq_task_failoutright(tid)
2115 return True
2116
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002117 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002118 self.rq.state = runQueueFailed
2119 return True
2120
2121 # Sanity Checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002122 for task in self.rqdata.runtaskentries:
2123 if task not in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002124 logger.error("Task %s never buildable!", task)
Brad Bishop96ff1982019-08-19 13:50:42 -04002125 elif task not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002126 logger.error("Task %s never ran!", task)
Brad Bishop96ff1982019-08-19 13:50:42 -04002127 elif task not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002128 logger.error("Task %s never completed!", task)
2129 self.rq.state = runQueueComplete
2130
2131 return True
2132
Andrew Geissler99467da2019-02-25 18:54:23 -06002133 def filtermcdeps(self, task, deps):
2134 ret = set()
2135 mainmc = mc_from_tid(task)
2136 for dep in deps:
2137 mc = mc_from_tid(dep)
2138 if mc != mainmc:
2139 continue
2140 ret.add(dep)
2141 return ret
2142
2143 # We filter out multiconfig dependencies from taskdepdata we pass to the tasks
2144 # as most code can't handle them
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002145 def build_taskdepdata(self, task):
2146 taskdepdata = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002147 next = self.rqdata.runtaskentries[task].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002148 next.add(task)
Andrew Geissler99467da2019-02-25 18:54:23 -06002149 next = self.filtermcdeps(task, next)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002150 while next:
2151 additional = []
2152 for revdep in next:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002153 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2154 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2155 deps = self.rqdata.runtaskentries[revdep].depends
2156 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002157 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002158 unihash = self.rqdata.runtaskentries[revdep].unihash
Andrew Geissler99467da2019-02-25 18:54:23 -06002159 deps = self.filtermcdeps(task, deps)
Brad Bishop19323692019-04-05 15:28:33 -04002160 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002161 for revdep2 in deps:
2162 if revdep2 not in taskdepdata:
2163 additional.append(revdep2)
2164 next = additional
2165
2166 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2167 return taskdepdata
2168
Brad Bishop96ff1982019-08-19 13:50:42 -04002169 def scenequeue_process_notcovered(self, task):
2170 if len(self.rqdata.runtaskentries[task].depends) == 0:
2171 self.setbuildable(task)
2172 notcovered = set([task])
2173 while notcovered:
2174 new = set()
2175 for t in notcovered:
2176 for deptask in self.rqdata.runtaskentries[t].depends:
2177 if deptask in notcovered or deptask in new or deptask in self.rqdata.runq_setscene_tids or deptask in self.tasks_notcovered:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002178 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04002179 logger.debug(1, 'Task %s depends on non-setscene task %s so not skipping' % (t, deptask))
2180 new.add(deptask)
2181 self.tasks_notcovered.add(deptask)
2182 if len(self.rqdata.runtaskentries[deptask].depends) == 0:
2183 self.setbuildable(deptask)
2184 notcovered = new
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002185
Brad Bishop96ff1982019-08-19 13:50:42 -04002186 def scenequeue_process_unskippable(self, task):
2187 # Look up the dependency chain for non-setscene things which depend on this task
2188 # and mark as 'done'/notcovered
2189 ready = set([task])
2190 while ready:
2191 new = set()
2192 for t in ready:
2193 for deptask in self.rqdata.runtaskentries[t].revdeps:
2194 if deptask in ready or deptask in new or deptask in self.tasks_scenequeue_done or deptask in self.rqdata.runq_setscene_tids:
2195 continue
2196 if self.rqdata.runtaskentries[deptask].depends.issubset(self.tasks_scenequeue_done):
2197 new.add(deptask)
2198 self.tasks_scenequeue_done.add(deptask)
2199 self.tasks_notcovered.add(deptask)
2200 #logger.warning("Up: " + str(deptask))
2201 ready = new
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002202
Brad Bishop96ff1982019-08-19 13:50:42 -04002203 def scenequeue_updatecounters(self, task, fail=False):
2204 for dep in self.sqdata.sq_deps[task]:
2205 if fail and task in self.sqdata.sq_harddeps and dep in self.sqdata.sq_harddeps[task]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002206 logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
Brad Bishop96ff1982019-08-19 13:50:42 -04002207 self.sq_task_failoutright(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002208 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04002209 if task not in self.sqdata.sq_revdeps2[dep]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002210 # May already have been removed by the fail case above
2211 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04002212 self.sqdata.sq_revdeps2[dep].remove(task)
2213 if len(self.sqdata.sq_revdeps2[dep]) == 0:
2214 self.sq_buildable.add(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002215
Brad Bishop96ff1982019-08-19 13:50:42 -04002216 next = set([task])
2217 while next:
2218 new = set()
2219 for t in next:
2220 self.tasks_scenequeue_done.add(t)
2221 # Look down the dependency chain for non-setscene things which this task depends on
2222 # and mark as 'done'
2223 for dep in self.rqdata.runtaskentries[t].depends:
2224 if dep in self.rqdata.runq_setscene_tids or dep in self.tasks_scenequeue_done:
2225 continue
2226 if self.rqdata.runtaskentries[dep].revdeps.issubset(self.tasks_scenequeue_done):
2227 new.add(dep)
2228 #logger.warning(" Down: " + dep)
2229 next = new
2230
2231 if task in self.sqdata.unskippable:
2232 self.scenequeue_process_unskippable(task)
2233
2234 if task in self.scenequeue_notcovered:
2235 logger.debug(1, 'Not skipping setscene task %s', task)
2236 self.scenequeue_process_notcovered(task)
2237 elif task in self.scenequeue_covered:
2238 logger.debug(1, 'Queued setscene task %s', task)
2239 self.coveredtopocess.add(task)
2240
2241 for task in self.coveredtopocess.copy():
2242 if self.sqdata.sq_covered_tasks[task].issubset(self.tasks_scenequeue_done):
2243 logger.debug(1, 'Processing setscene task %s', task)
2244 covered = self.sqdata.sq_covered_tasks[task]
2245 covered.add(task)
2246
2247 # If a task is in target_tids and isn't a setscene task, we can't skip it.
2248 cantskip = covered.intersection(self.rqdata.target_tids).difference(self.rqdata.runq_setscene_tids)
2249 for tid in cantskip:
2250 self.tasks_notcovered.add(tid)
2251 self.scenequeue_process_notcovered(tid)
2252 covered.difference_update(cantskip)
2253
2254 # Remove notcovered tasks
2255 covered.difference_update(self.tasks_notcovered)
2256 self.tasks_covered.update(covered)
2257 self.coveredtopocess.remove(task)
2258 for tid in covered:
2259 if len(self.rqdata.runtaskentries[tid].depends) == 0:
2260 self.setbuildable(tid)
2261
2262 def sq_task_completeoutright(self, task):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002263 """
2264 Mark a task as completed
2265 Look at the reverse dependencies and mark any task with
2266 completed dependencies as buildable
2267 """
2268
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002269 logger.debug(1, 'Found task %s which could be accelerated', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002270 self.scenequeue_covered.add(task)
Brad Bishop96ff1982019-08-19 13:50:42 -04002271 self.tasks_covered.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002272 self.scenequeue_updatecounters(task)
2273
Brad Bishop96ff1982019-08-19 13:50:42 -04002274 def sq_check_taskfail(self, task):
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002275 if self.rqdata.setscenewhitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002276 realtask = task.split('_setscene')[0]
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002277 (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
2278 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002279 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2280 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2281 self.rq.state = runQueueCleanUp
2282
Brad Bishop96ff1982019-08-19 13:50:42 -04002283 def sq_task_complete(self, task):
2284 self.sq_stats.taskCompleted()
2285 bb.event.fire(sceneQueueTaskCompleted(task, self.sq_stats, self.rq), self.cfgData)
2286 self.sq_task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002287
Brad Bishop96ff1982019-08-19 13:50:42 -04002288 def sq_task_fail(self, task, result):
2289 self.sq_stats.taskFailed()
2290 bb.event.fire(sceneQueueTaskFailed(task, self.sq_stats, result, self), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002291 self.scenequeue_notcovered.add(task)
Brad Bishop96ff1982019-08-19 13:50:42 -04002292 self.tasks_notcovered.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002293 self.scenequeue_updatecounters(task, True)
Brad Bishop96ff1982019-08-19 13:50:42 -04002294 self.sq_check_taskfail(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002295
Brad Bishop96ff1982019-08-19 13:50:42 -04002296 def sq_task_failoutright(self, task):
2297 self.sq_running.add(task)
2298 self.sq_buildable.add(task)
2299 self.sq_stats.taskSkipped()
2300 self.sq_stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002301 self.scenequeue_notcovered.add(task)
Brad Bishop96ff1982019-08-19 13:50:42 -04002302 self.tasks_notcovered.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002303 self.scenequeue_updatecounters(task, True)
2304
Brad Bishop96ff1982019-08-19 13:50:42 -04002305 def sq_task_skip(self, task):
2306 self.sq_running.add(task)
2307 self.sq_buildable.add(task)
2308 self.sq_task_completeoutright(task)
2309 self.sq_stats.taskSkipped()
2310 self.sq_stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002311
Brad Bishop96ff1982019-08-19 13:50:42 -04002312 def sq_build_taskdepdata(self, task):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002313 def getsetscenedeps(tid):
2314 deps = set()
2315 (mc, fn, taskname, _) = split_tid_mcfn(tid)
2316 realtid = tid + "_setscene"
2317 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
2318 for (depname, idependtask) in idepends:
2319 if depname not in self.rqdata.taskData[mc].build_targets:
2320 continue
2321
2322 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2323 if depfn is None:
2324 continue
2325 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2326 deps.add(deptid)
2327 return deps
2328
2329 taskdepdata = {}
2330 next = getsetscenedeps(task)
2331 next.add(task)
2332 while next:
2333 additional = []
2334 for revdep in next:
2335 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2336 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2337 deps = getsetscenedeps(revdep)
2338 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2339 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002340 unihash = self.rqdata.runtaskentries[revdep].unihash
2341 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002342 for revdep2 in deps:
2343 if revdep2 not in taskdepdata:
2344 additional.append(revdep2)
2345 next = additional
2346
2347 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2348 return taskdepdata
2349
Brad Bishop96ff1982019-08-19 13:50:42 -04002350 def check_setscenewhitelist(self, tid):
2351 # Check task that is going to run against the whitelist
2352 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2353 # Ignore covered tasks
2354 if tid in self.tasks_covered:
2355 return False
2356 # Ignore stamped tasks
2357 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
2358 return False
2359 # Ignore noexec tasks
2360 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2361 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2362 return False
2363
2364 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2365 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2366 if tid in self.rqdata.runq_setscene_tids:
2367 msg = 'Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)
2368 else:
2369 msg = 'Task %s.%s attempted to execute unexpectedly' % (pn, taskname)
2370 logger.error(msg + '\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered))
2371 return True
2372 return False
2373
2374class SQData(object):
2375 def __init__(self):
2376 # SceneQueue dependencies
2377 self.sq_deps = {}
2378 # SceneQueue reverse dependencies
2379 self.sq_revdeps = {}
2380 # Copy of reverse dependencies used by sq processing code
2381 self.sq_revdeps2 = {}
2382 # Injected inter-setscene task dependencies
2383 self.sq_harddeps = {}
2384 # Cache of stamp files so duplicates can't run in parallel
2385 self.stamps = {}
2386 # Setscene tasks directly depended upon by the build
2387 self.unskippable = set()
2388 # List of setscene tasks which aren't present
2389 self.outrightfail = set()
2390 # A list of normal tasks a setscene task covers
2391 self.sq_covered_tasks = {}
2392
2393def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
2394
2395 sq_revdeps = {}
2396 sq_revdeps_squash = {}
2397 sq_collated_deps = {}
2398
2399 # We need to construct a dependency graph for the setscene functions. Intermediate
2400 # dependencies between the setscene tasks only complicate the code. This code
2401 # therefore aims to collapse the huge runqueue dependency tree into a smaller one
2402 # only containing the setscene functions.
2403
2404 rqdata.init_progress_reporter.next_stage()
2405
2406 # First process the chains up to the first setscene task.
2407 endpoints = {}
2408 for tid in rqdata.runtaskentries:
2409 sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps)
2410 sq_revdeps_squash[tid] = set()
2411 if (len(sq_revdeps[tid]) == 0) and tid not in rqdata.runq_setscene_tids:
2412 #bb.warn("Added endpoint %s" % (tid))
2413 endpoints[tid] = set()
2414
2415 rqdata.init_progress_reporter.next_stage()
2416
2417 # Secondly process the chains between setscene tasks.
2418 for tid in rqdata.runq_setscene_tids:
2419 sq_collated_deps[tid] = set()
2420 #bb.warn("Added endpoint 2 %s" % (tid))
2421 for dep in rqdata.runtaskentries[tid].depends:
2422 if tid in sq_revdeps[dep]:
2423 sq_revdeps[dep].remove(tid)
2424 if dep not in endpoints:
2425 endpoints[dep] = set()
2426 #bb.warn(" Added endpoint 3 %s" % (dep))
2427 endpoints[dep].add(tid)
2428
2429 rqdata.init_progress_reporter.next_stage()
2430
2431 def process_endpoints(endpoints):
2432 newendpoints = {}
2433 for point, task in endpoints.items():
2434 tasks = set()
2435 if task:
2436 tasks |= task
2437 if sq_revdeps_squash[point]:
2438 tasks |= sq_revdeps_squash[point]
2439 if point not in rqdata.runq_setscene_tids:
2440 for t in tasks:
2441 sq_collated_deps[t].add(point)
2442 sq_revdeps_squash[point] = set()
2443 if point in rqdata.runq_setscene_tids:
2444 sq_revdeps_squash[point] = tasks
2445 tasks = set()
2446 continue
2447 for dep in rqdata.runtaskentries[point].depends:
2448 if point in sq_revdeps[dep]:
2449 sq_revdeps[dep].remove(point)
2450 if tasks:
2451 sq_revdeps_squash[dep] |= tasks
2452 if len(sq_revdeps[dep]) == 0 and dep not in rqdata.runq_setscene_tids:
2453 newendpoints[dep] = task
2454 if len(newendpoints) != 0:
2455 process_endpoints(newendpoints)
2456
2457 process_endpoints(endpoints)
2458
2459 rqdata.init_progress_reporter.next_stage()
2460
2461 # Build a list of setscene tasks which are "unskippable"
2462 # These are direct endpoints referenced by the build
2463 # Take the build endpoints (no revdeps) and find the sstate tasks they depend upon
2464 new = True
2465 for tid in rqdata.runtaskentries:
2466 if len(rqdata.runtaskentries[tid].revdeps) == 0:
2467 sqdata.unskippable.add(tid)
2468 while new:
2469 new = False
2470 for tid in sqdata.unskippable.copy():
2471 if tid in rqdata.runq_setscene_tids:
2472 continue
2473 sqdata.unskippable.remove(tid)
2474 if len(rqdata.runtaskentries[tid].depends) == 0:
2475 # These are tasks which have no setscene tasks in their chain, need to mark as directly buildable
2476 sqrq.tasks_notcovered.add(tid)
2477 sqrq.tasks_scenequeue_done.add(tid)
2478 sqrq.setbuildable(tid)
2479 sqrq.scenequeue_process_unskippable(tid)
2480 sqdata.unskippable |= rqdata.runtaskentries[tid].depends
2481 new = True
2482
2483 rqdata.init_progress_reporter.next_stage(len(rqdata.runtaskentries))
2484
2485 # Sanity check all dependencies could be changed to setscene task references
2486 for taskcounter, tid in enumerate(rqdata.runtaskentries):
2487 if tid in rqdata.runq_setscene_tids:
2488 pass
2489 elif len(sq_revdeps_squash[tid]) != 0:
2490 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
2491 else:
2492 del sq_revdeps_squash[tid]
2493 rqdata.init_progress_reporter.update(taskcounter)
2494
2495 rqdata.init_progress_reporter.next_stage()
2496
2497 # Resolve setscene inter-task dependencies
2498 # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
2499 # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
2500 for tid in rqdata.runq_setscene_tids:
2501 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2502 realtid = tid + "_setscene"
2503 idepends = rqdata.taskData[mc].taskentries[realtid].idepends
2504 sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", rqdata.dataCaches[mc], taskfn, noextra=True)
2505 for (depname, idependtask) in idepends:
2506
2507 if depname not in rqdata.taskData[mc].build_targets:
2508 continue
2509
2510 depfn = rqdata.taskData[mc].build_targets[depname][0]
2511 if depfn is None:
2512 continue
2513 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2514 if deptid not in rqdata.runtaskentries:
2515 bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
2516
2517 if not deptid in sqdata.sq_harddeps:
2518 sqdata.sq_harddeps[deptid] = set()
2519 sqdata.sq_harddeps[deptid].add(tid)
2520
2521 sq_revdeps_squash[tid].add(deptid)
2522 # Have to zero this to avoid circular dependencies
2523 sq_revdeps_squash[deptid] = set()
2524
2525 rqdata.init_progress_reporter.next_stage()
2526
2527 for task in sqdata.sq_harddeps:
2528 for dep in sqdata.sq_harddeps[task]:
2529 sq_revdeps_squash[dep].add(task)
2530
2531 rqdata.init_progress_reporter.next_stage()
2532
2533 #for tid in sq_revdeps_squash:
2534 # data = ""
2535 # for dep in sq_revdeps_squash[tid]:
2536 # data = data + "\n %s" % dep
2537 # bb.warn("Task %s_setscene: is %s " % (tid, data))
2538
2539 sqdata.sq_revdeps = sq_revdeps_squash
2540 sqdata.sq_revdeps2 = copy.deepcopy(sqdata.sq_revdeps)
2541 sqdata.sq_covered_tasks = sq_collated_deps
2542
2543 # Build reverse version of revdeps to populate deps structure
2544 for tid in sqdata.sq_revdeps:
2545 sqdata.sq_deps[tid] = set()
2546 for tid in sqdata.sq_revdeps:
2547 for dep in sqdata.sq_revdeps[tid]:
2548 sqdata.sq_deps[dep].add(tid)
2549
2550 rqdata.init_progress_reporter.next_stage()
2551
2552 multiconfigs = set()
2553 for tid in sqdata.sq_revdeps:
2554 multiconfigs.add(mc_from_tid(tid))
2555 if len(sqdata.sq_revdeps[tid]) == 0:
2556 sqrq.sq_buildable.add(tid)
2557
2558 rqdata.init_progress_reporter.finish()
2559
2560 if rq.hashvalidate:
2561 noexec = []
2562 stamppresent = []
2563 tocheck = set()
2564
2565 for tid in sqdata.sq_revdeps:
2566 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2567
2568 taskdep = rqdata.dataCaches[mc].task_deps[taskfn]
2569
2570 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2571 noexec.append(tid)
2572 sqrq.sq_task_skip(tid)
2573 bb.build.make_stamp(taskname + "_setscene", rqdata.dataCaches[mc], taskfn)
2574 continue
2575
2576 if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache):
2577 logger.debug(2, 'Setscene stamp current for task %s', tid)
2578 stamppresent.append(tid)
2579 sqrq.sq_task_skip(tid)
2580 continue
2581
2582 if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache):
2583 logger.debug(2, 'Normal stamp current for task %s', tid)
2584 stamppresent.append(tid)
2585 sqrq.sq_task_skip(tid)
2586 continue
2587
2588 tocheck.add(tid)
2589
2590 valid = rq.validate_hashes(tocheck, cooker.data, len(stamppresent), False)
2591
2592 valid_new = stamppresent
2593 for v in valid:
2594 valid_new.append(v)
2595
2596 hashes = {}
2597 for mc in sorted(multiconfigs):
2598 for tid in sqdata.sq_revdeps:
2599 if mc_from_tid(tid) != mc:
2600 continue
2601 if tid not in valid_new and tid not in noexec and tid not in sqrq.scenequeue_notcovered:
2602 sqdata.outrightfail.add(tid)
2603
2604 h = pending_hash_index(tid, rqdata)
2605 if h not in hashes:
2606 hashes[h] = tid
2607 else:
2608 sqrq.sq_deferred[tid] = hashes[h]
2609 bb.warn("Deferring %s after %s" % (tid, hashes[h]))
2610
2611
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002612class TaskFailure(Exception):
2613 """
2614 Exception raised when a task in a runqueue fails
2615 """
2616 def __init__(self, x):
2617 self.args = x
2618
2619
2620class runQueueExitWait(bb.event.Event):
2621 """
2622 Event when waiting for task processes to exit
2623 """
2624
2625 def __init__(self, remain):
2626 self.remain = remain
2627 self.message = "Waiting for %s active tasks to finish" % remain
2628 bb.event.Event.__init__(self)
2629
2630class runQueueEvent(bb.event.Event):
2631 """
2632 Base runQueue event class
2633 """
2634 def __init__(self, task, stats, rq):
2635 self.taskid = task
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002636 self.taskstring = task
2637 self.taskname = taskname_from_tid(task)
2638 self.taskfile = fn_from_tid(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002639 self.taskhash = rq.rqdata.get_task_hash(task)
2640 self.stats = stats.copy()
2641 bb.event.Event.__init__(self)
2642
2643class sceneQueueEvent(runQueueEvent):
2644 """
2645 Base sceneQueue event class
2646 """
2647 def __init__(self, task, stats, rq, noexec=False):
2648 runQueueEvent.__init__(self, task, stats, rq)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002649 self.taskstring = task + "_setscene"
2650 self.taskname = taskname_from_tid(task) + "_setscene"
2651 self.taskfile = fn_from_tid(task)
2652 self.taskhash = rq.rqdata.get_task_hash(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002653
2654class runQueueTaskStarted(runQueueEvent):
2655 """
2656 Event notifying a task was started
2657 """
2658 def __init__(self, task, stats, rq, noexec=False):
2659 runQueueEvent.__init__(self, task, stats, rq)
2660 self.noexec = noexec
2661
2662class sceneQueueTaskStarted(sceneQueueEvent):
2663 """
2664 Event notifying a setscene task was started
2665 """
2666 def __init__(self, task, stats, rq, noexec=False):
2667 sceneQueueEvent.__init__(self, task, stats, rq)
2668 self.noexec = noexec
2669
2670class runQueueTaskFailed(runQueueEvent):
2671 """
2672 Event notifying a task failed
2673 """
2674 def __init__(self, task, stats, exitcode, rq):
2675 runQueueEvent.__init__(self, task, stats, rq)
2676 self.exitcode = exitcode
2677
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002678 def __str__(self):
2679 return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
2680
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002681class sceneQueueTaskFailed(sceneQueueEvent):
2682 """
2683 Event notifying a setscene task failed
2684 """
2685 def __init__(self, task, stats, exitcode, rq):
2686 sceneQueueEvent.__init__(self, task, stats, rq)
2687 self.exitcode = exitcode
2688
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002689 def __str__(self):
2690 return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
2691
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002692class sceneQueueComplete(sceneQueueEvent):
2693 """
2694 Event when all the sceneQueue tasks are complete
2695 """
2696 def __init__(self, stats, rq):
2697 self.stats = stats.copy()
2698 bb.event.Event.__init__(self)
2699
2700class runQueueTaskCompleted(runQueueEvent):
2701 """
2702 Event notifying a task completed
2703 """
2704
2705class sceneQueueTaskCompleted(sceneQueueEvent):
2706 """
2707 Event notifying a setscene task completed
2708 """
2709
2710class runQueueTaskSkipped(runQueueEvent):
2711 """
2712 Event notifying a task was skipped
2713 """
2714 def __init__(self, task, stats, rq, reason):
2715 runQueueEvent.__init__(self, task, stats, rq)
2716 self.reason = reason
2717
2718class runQueuePipe():
2719 """
2720 Abstraction for a pipe between a worker thread and the server
2721 """
2722 def __init__(self, pipein, pipeout, d, rq, rqexec):
2723 self.input = pipein
2724 if pipeout:
2725 pipeout.close()
2726 bb.utils.nonblockingfd(self.input)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002727 self.queue = b""
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002728 self.d = d
2729 self.rq = rq
2730 self.rqexec = rqexec
2731
2732 def setrunqueueexec(self, rqexec):
2733 self.rqexec = rqexec
2734
2735 def read(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002736 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
2737 for worker in workers.values():
2738 worker.process.poll()
2739 if worker.process.returncode is not None and not self.rq.teardown:
2740 bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
2741 self.rq.finish_runqueue(True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002742
2743 start = len(self.queue)
2744 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002745 self.queue = self.queue + (self.input.read(102400) or b"")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002746 except (OSError, IOError) as e:
2747 if e.errno != errno.EAGAIN:
2748 raise
2749 end = len(self.queue)
2750 found = True
2751 while found and len(self.queue):
2752 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002753 index = self.queue.find(b"</event>")
2754 while index != -1 and self.queue.startswith(b"<event>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002755 try:
2756 event = pickle.loads(self.queue[7:index])
2757 except ValueError as e:
2758 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
2759 bb.event.fire_from_worker(event, self.d)
2760 found = True
2761 self.queue = self.queue[index+8:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002762 index = self.queue.find(b"</event>")
2763 index = self.queue.find(b"</exitcode>")
2764 while index != -1 and self.queue.startswith(b"<exitcode>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002765 try:
2766 task, status = pickle.loads(self.queue[10:index])
2767 except ValueError as e:
2768 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
2769 self.rqexec.runqueue_process_waitpid(task, status)
2770 found = True
2771 self.queue = self.queue[index+11:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002772 index = self.queue.find(b"</exitcode>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002773 return (end > start)
2774
2775 def close(self):
2776 while self.read():
2777 continue
2778 if len(self.queue) > 0:
2779 print("Warning, worker left partial message: %s" % self.queue)
2780 self.input.close()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002781
2782def get_setscene_enforce_whitelist(d):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002783 if d.getVar('BB_SETSCENE_ENFORCE') != '1':
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002784 return None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002785 whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002786 outlist = []
2787 for item in whitelist[:]:
2788 if item.startswith('%:'):
2789 for target in sys.argv[1:]:
2790 if not target.startswith('-'):
2791 outlist.append(target.split(':')[0] + ':' + item.split(':')[1])
2792 else:
2793 outlist.append(item)
2794 return outlist
2795
2796def check_setscene_enforce_whitelist(pn, taskname, whitelist):
2797 import fnmatch
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002798 if whitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002799 item = '%s:%s' % (pn, taskname)
2800 for whitelist_item in whitelist:
2801 if fnmatch.fnmatch(item, whitelist_item):
2802 return True
2803 return False
2804 return True