blob: addb2bb82fd7214dc434eb66180a895e1385b3cf [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001"""
2BitBake 'RunQueue' implementation
3
4Handles preparation and execution of a queue of tasks
5"""
6
7# Copyright (C) 2006-2007 Richard Purdie
8#
Brad Bishopc342db32019-05-15 21:57:59 -04009# SPDX-License-Identifier: GPL-2.0-only
Patrick Williamsc124f4f2015-09-15 14:41:29 -050010#
Patrick Williamsc124f4f2015-09-15 14:41:29 -050011
12import copy
13import os
14import sys
15import signal
16import stat
17import fcntl
18import errno
19import logging
20import re
21import bb
22from bb import msg, data, event
23from bb import monitordisk
24import subprocess
Patrick Williamsc0f7c042017-02-23 20:41:17 -060025import pickle
Brad Bishop6e60e8b2018-02-01 10:27:11 -050026from multiprocessing import Process
Brad Bishop19323692019-04-05 15:28:33 -040027import shlex
Brad Bishop96ff1982019-08-19 13:50:42 -040028import pprint
Patrick Williamsc124f4f2015-09-15 14:41:29 -050029
30bblogger = logging.getLogger("BitBake")
31logger = logging.getLogger("BitBake.RunQueue")
32
Brad Bishop19323692019-04-05 15:28:33 -040033__find_sha256__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{64}(?![a-z0-9])' )
Patrick Williamsc124f4f2015-09-15 14:41:29 -050034
Patrick Williamsc0f7c042017-02-23 20:41:17 -060035def fn_from_tid(tid):
36 return tid.rsplit(":", 1)[0]
37
38def taskname_from_tid(tid):
39 return tid.rsplit(":", 1)[1]
40
Andrew Geissler99467da2019-02-25 18:54:23 -060041def mc_from_tid(tid):
Brad Bishop15ae2502019-06-18 21:44:24 -040042 if tid.startswith('mc:'):
Andrew Geissler99467da2019-02-25 18:54:23 -060043 return tid.split(':')[1]
44 return ""
45
Patrick Williamsc0f7c042017-02-23 20:41:17 -060046def split_tid(tid):
47 (mc, fn, taskname, _) = split_tid_mcfn(tid)
48 return (mc, fn, taskname)
49
50def split_tid_mcfn(tid):
Brad Bishop15ae2502019-06-18 21:44:24 -040051 if tid.startswith('mc:'):
Patrick Williamsc0f7c042017-02-23 20:41:17 -060052 elems = tid.split(':')
53 mc = elems[1]
54 fn = ":".join(elems[2:-1])
55 taskname = elems[-1]
Brad Bishop15ae2502019-06-18 21:44:24 -040056 mcfn = "mc:" + mc + ":" + fn
Patrick Williamsc0f7c042017-02-23 20:41:17 -060057 else:
58 tid = tid.rsplit(":", 1)
59 mc = ""
60 fn = tid[0]
61 taskname = tid[1]
62 mcfn = fn
63
64 return (mc, fn, taskname, mcfn)
65
66def build_tid(mc, fn, taskname):
67 if mc:
Brad Bishop15ae2502019-06-18 21:44:24 -040068 return "mc:" + mc + ":" + fn + ":" + taskname
Patrick Williamsc0f7c042017-02-23 20:41:17 -060069 return fn + ":" + taskname
70
Brad Bishop96ff1982019-08-19 13:50:42 -040071# Index used to pair up potentially matching multiconfig tasks
72# We match on PN, taskname and hash being equal
73def pending_hash_index(tid, rqdata):
74 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
75 pn = rqdata.dataCaches[mc].pkg_fn[taskfn]
76 h = rqdata.runtaskentries[tid].hash
77 return pn + ":" + "taskname" + h
78
Patrick Williamsc124f4f2015-09-15 14:41:29 -050079class RunQueueStats:
80 """
81 Holds statistics on the tasks handled by the associated runQueue
82 """
83 def __init__(self, total):
84 self.completed = 0
85 self.skipped = 0
86 self.failed = 0
87 self.active = 0
88 self.total = total
89
90 def copy(self):
91 obj = self.__class__(self.total)
92 obj.__dict__.update(self.__dict__)
93 return obj
94
95 def taskFailed(self):
96 self.active = self.active - 1
97 self.failed = self.failed + 1
98
Brad Bishop1a4b7ee2018-12-16 17:11:34 -080099 def taskCompleted(self):
100 self.active = self.active - 1
101 self.completed = self.completed + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500102
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800103 def taskSkipped(self):
104 self.active = self.active + 1
105 self.skipped = self.skipped + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500106
107 def taskActive(self):
108 self.active = self.active + 1
109
110# These values indicate the next step due to be run in the
111# runQueue state machine
112runQueuePrepare = 2
113runQueueSceneInit = 3
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500114runQueueRunning = 6
115runQueueFailed = 7
116runQueueCleanUp = 8
117runQueueComplete = 9
118
119class RunQueueScheduler(object):
120 """
121 Control the order tasks are scheduled in.
122 """
123 name = "basic"
124
125 def __init__(self, runqueue, rqdata):
126 """
127 The default scheduler just returns the first buildable task (the
128 priority map is sorted by task number)
129 """
130 self.rq = runqueue
131 self.rqdata = rqdata
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600132 self.numTasks = len(self.rqdata.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500133
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600134 self.prio_map = [self.rqdata.runtaskentries.keys()]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500135
Brad Bishop08902b02019-08-20 09:16:51 -0400136 self.buildable = set()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800137 self.skip_maxthread = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500138 self.stamps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600139 for tid in self.rqdata.runtaskentries:
140 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
141 self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
142 if tid in self.rq.runq_buildable:
143 self.buildable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500144
145 self.rev_prio_map = None
146
147 def next_buildable_task(self):
148 """
149 Return the id of the first task we find that is buildable
150 """
Brad Bishop08902b02019-08-20 09:16:51 -0400151 buildable = set(self.buildable)
152 buildable.difference_update(self.rq.runq_running)
153 buildable.difference_update(self.rq.holdoff_tasks)
154 buildable.intersection_update(self.rq.tasks_covered | self.rq.tasks_notcovered)
Brad Bishop96ff1982019-08-19 13:50:42 -0400155 if not buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500156 return None
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800157
158 # Filter out tasks that have a max number of threads that have been exceeded
159 skip_buildable = {}
160 for running in self.rq.runq_running.difference(self.rq.runq_complete):
161 rtaskname = taskname_from_tid(running)
162 if rtaskname not in self.skip_maxthread:
163 self.skip_maxthread[rtaskname] = self.rq.cfgData.getVarFlag(rtaskname, "number_threads")
164 if not self.skip_maxthread[rtaskname]:
165 continue
166 if rtaskname in skip_buildable:
167 skip_buildable[rtaskname] += 1
168 else:
169 skip_buildable[rtaskname] = 1
170
Brad Bishop96ff1982019-08-19 13:50:42 -0400171 if len(buildable) == 1:
Brad Bishop08902b02019-08-20 09:16:51 -0400172 tid = buildable.pop()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800173 taskname = taskname_from_tid(tid)
174 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
175 return None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600176 stamp = self.stamps[tid]
177 if stamp not in self.rq.build_stamps.values():
178 return tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500179
180 if not self.rev_prio_map:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600181 self.rev_prio_map = {}
182 for tid in self.rqdata.runtaskentries:
183 self.rev_prio_map[tid] = self.prio_map.index(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500184
185 best = None
186 bestprio = None
Brad Bishop96ff1982019-08-19 13:50:42 -0400187 for tid in buildable:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800188 taskname = taskname_from_tid(tid)
189 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
190 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600191 prio = self.rev_prio_map[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500192 if bestprio is None or bestprio > prio:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600193 stamp = self.stamps[tid]
194 if stamp in self.rq.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500195 continue
196 bestprio = prio
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600197 best = tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500198
199 return best
200
201 def next(self):
202 """
203 Return the id of the task we should build next
204 """
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800205 if self.rq.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500206 return self.next_buildable_task()
207
Brad Bishop316dfdd2018-06-25 12:45:53 -0400208 def newbuildable(self, task):
Brad Bishop08902b02019-08-20 09:16:51 -0400209 self.buildable.add(task)
210
211 def removebuildable(self, task):
212 self.buildable.remove(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500213
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500214 def describe_task(self, taskid):
215 result = 'ID %s' % taskid
216 if self.rev_prio_map:
217 result = result + (' pri %d' % self.rev_prio_map[taskid])
218 return result
219
220 def dump_prio(self, comment):
221 bb.debug(3, '%s (most important first):\n%s' %
222 (comment,
223 '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
224 index, taskid in enumerate(self.prio_map)])))
225
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500226class RunQueueSchedulerSpeed(RunQueueScheduler):
227 """
228 A scheduler optimised for speed. The priority map is sorted by task weight,
229 heavier weighted tasks (tasks needed by the most other tasks) are run first.
230 """
231 name = "speed"
232
233 def __init__(self, runqueue, rqdata):
234 """
235 The priority map is sorted by task weight.
236 """
237 RunQueueScheduler.__init__(self, runqueue, rqdata)
238
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600239 weights = {}
240 for tid in self.rqdata.runtaskentries:
241 weight = self.rqdata.runtaskentries[tid].weight
242 if not weight in weights:
243 weights[weight] = []
244 weights[weight].append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500245
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600246 self.prio_map = []
247 for weight in sorted(weights):
248 for w in weights[weight]:
249 self.prio_map.append(w)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500250
251 self.prio_map.reverse()
252
253class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
254 """
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500255 A scheduler optimised to complete .bb files as quickly as possible. The
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500256 priority map is sorted by task weight, but then reordered so once a given
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500257 .bb file starts to build, it's completed as quickly as possible by
258 running all tasks related to the same .bb file one after the after.
259 This works well where disk space is at a premium and classes like OE's
260 rm_work are in force.
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500261 """
262 name = "completion"
263
264 def __init__(self, runqueue, rqdata):
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500265 super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500266
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500267 # Extract list of tasks for each recipe, with tasks sorted
268 # ascending from "must run first" (typically do_fetch) to
269 # "runs last" (do_build). The speed scheduler prioritizes
270 # tasks that must run first before the ones that run later;
271 # this is what we depend on here.
272 task_lists = {}
273 for taskid in self.prio_map:
274 fn, taskname = taskid.rsplit(':', 1)
275 task_lists.setdefault(fn, []).append(taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500276
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500277 # Now unify the different task lists. The strategy is that
278 # common tasks get skipped and new ones get inserted after the
279 # preceeding common one(s) as they are found. Because task
280 # lists should differ only by their number of tasks, but not
281 # the ordering of the common tasks, this should result in a
282 # deterministic result that is a superset of the individual
283 # task ordering.
284 all_tasks = []
285 for recipe, new_tasks in task_lists.items():
286 index = 0
287 old_task = all_tasks[index] if index < len(all_tasks) else None
288 for new_task in new_tasks:
289 if old_task == new_task:
290 # Common task, skip it. This is the fast-path which
291 # avoids a full search.
292 index += 1
293 old_task = all_tasks[index] if index < len(all_tasks) else None
294 else:
295 try:
296 index = all_tasks.index(new_task)
297 # Already present, just not at the current
298 # place. We re-synchronized by changing the
299 # index so that it matches again. Now
300 # move on to the next existing task.
301 index += 1
302 old_task = all_tasks[index] if index < len(all_tasks) else None
303 except ValueError:
304 # Not present. Insert before old_task, which
305 # remains the same (but gets shifted back).
306 all_tasks.insert(index, new_task)
307 index += 1
308 bb.debug(3, 'merged task list: %s' % all_tasks)
309
310 # Now reverse the order so that tasks that finish the work on one
311 # recipe are considered more imporant (= come first). The ordering
312 # is now so that do_build is most important.
313 all_tasks.reverse()
314
315 # Group tasks of the same kind before tasks of less important
316 # kinds at the head of the queue (because earlier = lower
317 # priority number = runs earlier), while preserving the
318 # ordering by recipe. If recipe foo is more important than
319 # bar, then the goal is to work on foo's do_populate_sysroot
320 # before bar's do_populate_sysroot and on the more important
321 # tasks of foo before any of the less important tasks in any
322 # other recipe (if those other recipes are more important than
323 # foo).
324 #
325 # All of this only applies when tasks are runable. Explicit
326 # dependencies still override this ordering by priority.
327 #
328 # Here's an example why this priority re-ordering helps with
329 # minimizing disk usage. Consider a recipe foo with a higher
330 # priority than bar where foo DEPENDS on bar. Then the
331 # implicit rule (from base.bbclass) is that foo's do_configure
332 # depends on bar's do_populate_sysroot. This ensures that
333 # bar's do_populate_sysroot gets done first. Normally the
334 # tasks from foo would continue to run once that is done, and
335 # bar only gets completed and cleaned up later. By ordering
336 # bar's task that depend on bar's do_populate_sysroot before foo's
337 # do_configure, that problem gets avoided.
338 task_index = 0
339 self.dump_prio('original priorities')
340 for task in all_tasks:
341 for index in range(task_index, self.numTasks):
342 taskid = self.prio_map[index]
343 taskname = taskid.rsplit(':', 1)[1]
344 if taskname == task:
345 del self.prio_map[index]
346 self.prio_map.insert(task_index, taskid)
347 task_index += 1
348 self.dump_prio('completion priorities')
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500349
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600350class RunTaskEntry(object):
351 def __init__(self):
352 self.depends = set()
353 self.revdeps = set()
354 self.hash = None
Brad Bishop19323692019-04-05 15:28:33 -0400355 self.unihash = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600356 self.task = None
357 self.weight = 1
358
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500359class RunQueueData:
360 """
361 BitBake Run Queue implementation
362 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600363 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500364 self.cooker = cooker
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600365 self.dataCaches = dataCaches
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500366 self.taskData = taskData
367 self.targets = targets
368 self.rq = rq
369 self.warn_multi_bb = False
370
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500371 self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
372 self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600373 self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
374 self.setscenewhitelist_checked = False
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500375 self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600376 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500377
378 self.reset()
379
380 def reset(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600381 self.runtaskentries = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500382
383 def runq_depends_names(self, ids):
384 import re
385 ret = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600386 for id in ids:
387 nam = os.path.basename(id)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500388 nam = re.sub("_[^,]*,", ",", nam)
389 ret.extend([nam])
390 return ret
391
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600392 def get_task_hash(self, tid):
393 return self.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500394
Brad Bishop19323692019-04-05 15:28:33 -0400395 def get_task_unihash(self, tid):
396 return self.runtaskentries[tid].unihash
397
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600398 def get_user_idstring(self, tid, task_name_suffix = ""):
399 return tid + task_name_suffix
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500400
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500401 def get_short_user_idstring(self, task, task_name_suffix = ""):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500402 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
403 pn = self.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600404 taskname = taskname_from_tid(task) + task_name_suffix
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500405 return "%s:%s" % (pn, taskname)
406
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500407 def circular_depchains_handler(self, tasks):
408 """
409 Some tasks aren't buildable, likely due to circular dependency issues.
410 Identify the circular dependencies and print them in a user readable format.
411 """
412 from copy import deepcopy
413
414 valid_chains = []
415 explored_deps = {}
416 msgs = []
417
Andrew Geissler99467da2019-02-25 18:54:23 -0600418 class TooManyLoops(Exception):
419 pass
420
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500421 def chain_reorder(chain):
422 """
423 Reorder a dependency chain so the lowest task id is first
424 """
425 lowest = 0
426 new_chain = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600427 for entry in range(len(chain)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500428 if chain[entry] < chain[lowest]:
429 lowest = entry
430 new_chain.extend(chain[lowest:])
431 new_chain.extend(chain[:lowest])
432 return new_chain
433
434 def chain_compare_equal(chain1, chain2):
435 """
436 Compare two dependency chains and see if they're the same
437 """
438 if len(chain1) != len(chain2):
439 return False
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600440 for index in range(len(chain1)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500441 if chain1[index] != chain2[index]:
442 return False
443 return True
444
445 def chain_array_contains(chain, chain_array):
446 """
447 Return True if chain_array contains chain
448 """
449 for ch in chain_array:
450 if chain_compare_equal(ch, chain):
451 return True
452 return False
453
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600454 def find_chains(tid, prev_chain):
455 prev_chain.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500456 total_deps = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600457 total_deps.extend(self.runtaskentries[tid].revdeps)
458 for revdep in self.runtaskentries[tid].revdeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500459 if revdep in prev_chain:
460 idx = prev_chain.index(revdep)
461 # To prevent duplicates, reorder the chain to start with the lowest taskid
462 # and search through an array of those we've already printed
463 chain = prev_chain[idx:]
464 new_chain = chain_reorder(chain)
465 if not chain_array_contains(new_chain, valid_chains):
466 valid_chains.append(new_chain)
467 msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
468 for dep in new_chain:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600469 msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500470 msgs.append("\n")
471 if len(valid_chains) > 10:
472 msgs.append("Aborted dependency loops search after 10 matches.\n")
Andrew Geissler99467da2019-02-25 18:54:23 -0600473 raise TooManyLoops
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500474 continue
475 scan = False
476 if revdep not in explored_deps:
477 scan = True
478 elif revdep in explored_deps[revdep]:
479 scan = True
480 else:
481 for dep in prev_chain:
482 if dep in explored_deps[revdep]:
483 scan = True
484 if scan:
485 find_chains(revdep, copy.deepcopy(prev_chain))
486 for dep in explored_deps[revdep]:
487 if dep not in total_deps:
488 total_deps.append(dep)
489
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600490 explored_deps[tid] = total_deps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500491
Andrew Geissler99467da2019-02-25 18:54:23 -0600492 try:
493 for task in tasks:
494 find_chains(task, [])
495 except TooManyLoops:
496 pass
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500497
498 return msgs
499
500 def calculate_task_weights(self, endpoints):
501 """
502 Calculate a number representing the "weight" of each task. Heavier weighted tasks
503 have more dependencies and hence should be executed sooner for maximum speed.
504
505 This function also sanity checks the task list finding tasks that are not
506 possible to execute due to circular dependencies.
507 """
508
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600509 numTasks = len(self.runtaskentries)
510 weight = {}
511 deps_left = {}
512 task_done = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500513
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600514 for tid in self.runtaskentries:
515 task_done[tid] = False
516 weight[tid] = 1
517 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500518
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600519 for tid in endpoints:
520 weight[tid] = 10
521 task_done[tid] = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500522
523 while True:
524 next_points = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600525 for tid in endpoints:
526 for revdep in self.runtaskentries[tid].depends:
527 weight[revdep] = weight[revdep] + weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500528 deps_left[revdep] = deps_left[revdep] - 1
529 if deps_left[revdep] == 0:
530 next_points.append(revdep)
531 task_done[revdep] = True
532 endpoints = next_points
533 if len(next_points) == 0:
534 break
535
536 # Circular dependency sanity check
537 problem_tasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600538 for tid in self.runtaskentries:
539 if task_done[tid] is False or deps_left[tid] != 0:
540 problem_tasks.append(tid)
541 logger.debug(2, "Task %s is not buildable", tid)
542 logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
543 self.runtaskentries[tid].weight = weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500544
545 if problem_tasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600546 message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500547 message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
548 message = message + "Identifying dependency loops (this may take a short while)...\n"
549 logger.error(message)
550
551 msgs = self.circular_depchains_handler(problem_tasks)
552
553 message = "\n"
554 for msg in msgs:
555 message = message + msg
556 bb.msg.fatal("RunQueue", message)
557
558 return weight
559
560 def prepare(self):
561 """
562 Turn a set of taskData into a RunQueue and compute data needed
563 to optimise the execution order.
564 """
565
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600566 runq_build = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500567 recursivetasks = {}
568 recursiveitasks = {}
569 recursivetasksselfref = set()
570
571 taskData = self.taskData
572
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600573 found = False
574 for mc in self.taskData:
575 if len(taskData[mc].taskentries) > 0:
576 found = True
577 break
578 if not found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500579 # Nothing to do
580 return 0
581
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600582 self.init_progress_reporter.start()
583 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500584
585 # Step A - Work out a list of tasks to run
586 #
587 # Taskdata gives us a list of possible providers for every build and run
588 # target ordered by priority. It also gives information on each of those
589 # providers.
590 #
591 # To create the actual list of tasks to execute we fix the list of
592 # providers and then resolve the dependencies into task IDs. This
593 # process is repeated for each type of dependency (tdepends, deptask,
594 # rdeptast, recrdeptask, idepends).
595
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600596 def add_build_dependencies(depids, tasknames, depends, mc):
597 for depname in depids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500598 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600599 if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500600 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600601 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500602 if depdata is None:
603 continue
604 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600605 t = depdata + ":" + taskname
606 if t in taskData[mc].taskentries:
607 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500608
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600609 def add_runtime_dependencies(depids, tasknames, depends, mc):
610 for depname in depids:
611 if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500612 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600613 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500614 if depdata is None:
615 continue
616 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600617 t = depdata + ":" + taskname
618 if t in taskData[mc].taskentries:
619 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500620
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800621 def add_mc_dependencies(mc, tid):
622 mcdeps = taskData[mc].get_mcdepends()
623 for dep in mcdeps:
624 mcdependency = dep.split(':')
625 pn = mcdependency[3]
626 frommc = mcdependency[1]
627 mcdep = mcdependency[2]
628 deptask = mcdependency[4]
629 if mc == frommc:
630 fn = taskData[mcdep].build_targets[pn][0]
631 newdep = '%s:%s' % (fn,deptask)
632 taskData[mc].taskentries[tid].tdepends.append(newdep)
633
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600634 for mc in taskData:
635 for tid in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500636
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600637 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
638 #runtid = build_tid(mc, fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500639
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600640 #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
641
642 depends = set()
643 task_deps = self.dataCaches[mc].task_deps[taskfn]
644
645 self.runtaskentries[tid] = RunTaskEntry()
646
647 if fn in taskData[mc].failed_fns:
648 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500649
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800650 # We add multiconfig dependencies before processing internal task deps (tdepends)
651 if 'mcdepends' in task_deps and taskname in task_deps['mcdepends']:
652 add_mc_dependencies(mc, tid)
653
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500654 # Resolve task internal dependencies
655 #
656 # e.g. addtask before X after Y
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600657 for t in taskData[mc].taskentries[tid].tdepends:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800658 (depmc, depfn, deptaskname, _) = split_tid_mcfn(t)
659 depends.add(build_tid(depmc, depfn, deptaskname))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500660
661 # Resolve 'deptask' dependencies
662 #
663 # e.g. do_sometask[deptask] = "do_someothertask"
664 # (makes sure sometask runs after someothertask of all DEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600665 if 'deptask' in task_deps and taskname in task_deps['deptask']:
666 tasknames = task_deps['deptask'][taskname].split()
667 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500668
669 # Resolve 'rdeptask' dependencies
670 #
671 # e.g. do_sometask[rdeptask] = "do_someothertask"
672 # (makes sure sometask runs after someothertask of all RDEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600673 if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
674 tasknames = task_deps['rdeptask'][taskname].split()
675 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500676
677 # Resolve inter-task dependencies
678 #
679 # e.g. do_sometask[depends] = "targetname:do_someothertask"
680 # (makes sure sometask runs after targetname's someothertask)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600681 idepends = taskData[mc].taskentries[tid].idepends
682 for (depname, idependtask) in idepends:
683 if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500684 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600685 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500686 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600687 t = depdata + ":" + idependtask
688 depends.add(t)
689 if t not in taskData[mc].taskentries:
690 bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
691 irdepends = taskData[mc].taskentries[tid].irdepends
692 for (depname, idependtask) in irdepends:
693 if depname in taskData[mc].run_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500694 # Won't be in run_targets if ASSUME_PROVIDED
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500695 if not taskData[mc].run_targets[depname]:
696 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600697 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500698 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600699 t = depdata + ":" + idependtask
700 depends.add(t)
701 if t not in taskData[mc].taskentries:
702 bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500703
704 # Resolve recursive 'recrdeptask' dependencies (Part A)
705 #
706 # e.g. do_sometask[recrdeptask] = "do_someothertask"
707 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
708 # We cover the recursive part of the dependencies below
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600709 if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
710 tasknames = task_deps['recrdeptask'][taskname].split()
711 recursivetasks[tid] = tasknames
712 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
713 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
714 if taskname in tasknames:
715 recursivetasksselfref.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500716
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600717 if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
718 recursiveitasks[tid] = []
719 for t in task_deps['recideptask'][taskname].split():
720 newdep = build_tid(mc, fn, t)
721 recursiveitasks[tid].append(newdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500722
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600723 self.runtaskentries[tid].depends = depends
Brad Bishop316dfdd2018-06-25 12:45:53 -0400724 # Remove all self references
725 self.runtaskentries[tid].depends.discard(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500726
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600727 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500728
Brad Bishop316dfdd2018-06-25 12:45:53 -0400729 self.init_progress_reporter.next_stage()
730
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500731 # Resolve recursive 'recrdeptask' dependencies (Part B)
732 #
733 # e.g. do_sometask[recrdeptask] = "do_someothertask"
734 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600735 # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600736
Brad Bishop316dfdd2018-06-25 12:45:53 -0400737 # Generating/interating recursive lists of dependencies is painful and potentially slow
738 # Precompute recursive task dependencies here by:
739 # a) create a temp list of reverse dependencies (revdeps)
740 # b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0)
741 # c) combine the total list of dependencies in cumulativedeps
742 # d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500743
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500744
Brad Bishop316dfdd2018-06-25 12:45:53 -0400745 revdeps = {}
746 deps = {}
747 cumulativedeps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600748 for tid in self.runtaskentries:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400749 deps[tid] = set(self.runtaskentries[tid].depends)
750 revdeps[tid] = set()
751 cumulativedeps[tid] = set()
752 # Generate a temp list of reverse dependencies
753 for tid in self.runtaskentries:
754 for dep in self.runtaskentries[tid].depends:
755 revdeps[dep].add(tid)
756 # Find the dependency chain endpoints
757 endpoints = set()
758 for tid in self.runtaskentries:
759 if len(deps[tid]) == 0:
760 endpoints.add(tid)
761 # Iterate the chains collating dependencies
762 while endpoints:
763 next = set()
764 for tid in endpoints:
765 for dep in revdeps[tid]:
766 cumulativedeps[dep].add(fn_from_tid(tid))
767 cumulativedeps[dep].update(cumulativedeps[tid])
768 if tid in deps[dep]:
769 deps[dep].remove(tid)
770 if len(deps[dep]) == 0:
771 next.add(dep)
772 endpoints = next
773 #for tid in deps:
774 # if len(deps[tid]) != 0:
775 # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
776
777 # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
778 # resolve these recursively until we aren't adding any further extra dependencies
779 extradeps = True
780 while extradeps:
781 extradeps = 0
782 for tid in recursivetasks:
783 tasknames = recursivetasks[tid]
784
785 totaldeps = set(self.runtaskentries[tid].depends)
786 if tid in recursiveitasks:
787 totaldeps.update(recursiveitasks[tid])
788 for dep in recursiveitasks[tid]:
789 if dep not in self.runtaskentries:
790 continue
791 totaldeps.update(self.runtaskentries[dep].depends)
792
793 deps = set()
794 for dep in totaldeps:
795 if dep in cumulativedeps:
796 deps.update(cumulativedeps[dep])
797
798 for t in deps:
799 for taskname in tasknames:
800 newtid = t + ":" + taskname
801 if newtid == tid:
802 continue
803 if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
804 extradeps += 1
805 self.runtaskentries[tid].depends.add(newtid)
806
807 # Handle recursive tasks which depend upon other recursive tasks
808 deps = set()
809 for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
810 deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
811 for newtid in deps:
812 for taskname in tasknames:
813 if not newtid.endswith(":" + taskname):
814 continue
815 if newtid in self.runtaskentries:
816 extradeps += 1
817 self.runtaskentries[tid].depends.add(newtid)
818
819 bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps)
820
821 # Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work
822 for tid in recursivetasksselfref:
823 self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600824
825 self.init_progress_reporter.next_stage()
826
827 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500828
829 # Step B - Mark all active tasks
830 #
831 # Start with the tasks we were asked to run and mark all dependencies
832 # as active too. If the task is to be 'forced', clear its stamp. Once
833 # all active tasks are marked, prune the ones we don't need.
834
835 logger.verbose("Marking Active Tasks")
836
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600837 def mark_active(tid, depth):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500838 """
839 Mark an item as active along with its depends
840 (calls itself recursively)
841 """
842
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600843 if tid in runq_build:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500844 return
845
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600846 runq_build[tid] = 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500847
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600848 depends = self.runtaskentries[tid].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500849 for depend in depends:
850 mark_active(depend, depth+1)
851
Brad Bishop79641f22019-09-10 07:20:22 -0400852 def invalidate_task(tid, error_nostamp):
853 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
854 taskdep = self.dataCaches[mc].task_deps[taskfn]
855 if fn + ":" + taskname not in taskData[mc].taskentries:
856 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
857 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
858 if error_nostamp:
859 bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
860 else:
861 bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
862 else:
863 logger.verbose("Invalidate task %s, %s", taskname, fn)
864 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn)
865
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600866 self.target_tids = []
867 for (mc, target, task, fn) in self.targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500868
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600869 if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500870 continue
871
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600872 if target in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500873 continue
874
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500875 parents = False
876 if task.endswith('-'):
877 parents = True
878 task = task[:-1]
879
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600880 if fn in taskData[mc].failed_fns:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500881 continue
882
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600883 # fn already has mc prefix
884 tid = fn + ":" + task
885 self.target_tids.append(tid)
886 if tid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500887 import difflib
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600888 tasks = []
889 for x in taskData[mc].taskentries:
890 if x.startswith(fn + ":"):
891 tasks.append(taskname_from_tid(x))
892 close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500893 if close_matches:
894 extra = ". Close matches:\n %s" % "\n ".join(close_matches)
895 else:
896 extra = ""
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600897 bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
898
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500899 # For tasks called "XXXX-", ony run their dependencies
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500900 if parents:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600901 for i in self.runtaskentries[tid].depends:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500902 mark_active(i, 1)
903 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600904 mark_active(tid, 1)
905
906 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500907
908 # Step C - Prune all inactive tasks
909 #
910 # Once all active tasks are marked, prune the ones we don't need.
911
Brad Bishop316dfdd2018-06-25 12:45:53 -0400912 delcount = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600913 for tid in list(self.runtaskentries.keys()):
914 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400915 delcount[tid] = self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600916 del self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600917
Brad Bishop316dfdd2018-06-25 12:45:53 -0400918 # Handle --runall
919 if self.cooker.configuration.runall:
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500920 # re-run the mark_active and then drop unused tasks from new list
921 runq_build = {}
Brad Bishop316dfdd2018-06-25 12:45:53 -0400922
923 for task in self.cooker.configuration.runall:
924 runall_tids = set()
925 for tid in list(self.runtaskentries):
926 wanttid = fn_from_tid(tid) + ":do_%s" % task
927 if wanttid in delcount:
928 self.runtaskentries[wanttid] = delcount[wanttid]
929 if wanttid in self.runtaskentries:
930 runall_tids.add(wanttid)
931
932 for tid in list(runall_tids):
933 mark_active(tid,1)
Brad Bishop79641f22019-09-10 07:20:22 -0400934 if self.cooker.configuration.force:
935 invalidate_task(tid, False)
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500936
937 for tid in list(self.runtaskentries.keys()):
938 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400939 delcount[tid] = self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500940 del self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500941
942 if len(self.runtaskentries) == 0:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400943 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
944
945 self.init_progress_reporter.next_stage()
946
947 # Handle runonly
948 if self.cooker.configuration.runonly:
949 # re-run the mark_active and then drop unused tasks from new list
950 runq_build = {}
951
952 for task in self.cooker.configuration.runonly:
953 runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == "do_%s" % task }
954
955 for tid in list(runonly_tids):
956 mark_active(tid,1)
Brad Bishop79641f22019-09-10 07:20:22 -0400957 if self.cooker.configuration.force:
958 invalidate_task(tid, False)
Brad Bishop316dfdd2018-06-25 12:45:53 -0400959
960 for tid in list(self.runtaskentries.keys()):
961 if tid not in runq_build:
962 delcount[tid] = self.runtaskentries[tid]
963 del self.runtaskentries[tid]
964
965 if len(self.runtaskentries) == 0:
966 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500967
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500968 #
969 # Step D - Sanity checks and computation
970 #
971
972 # Check to make sure we still have tasks to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600973 if len(self.runtaskentries) == 0:
974 if not taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500975 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
976 else:
977 bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
978
Brad Bishop316dfdd2018-06-25 12:45:53 -0400979 logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500980
981 logger.verbose("Assign Weightings")
982
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600983 self.init_progress_reporter.next_stage()
984
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500985 # Generate a list of reverse dependencies to ease future calculations
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600986 for tid in self.runtaskentries:
987 for dep in self.runtaskentries[tid].depends:
988 self.runtaskentries[dep].revdeps.add(tid)
989
990 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500991
992 # Identify tasks at the end of dependency chains
993 # Error on circular dependency loops (length two)
994 endpoints = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600995 for tid in self.runtaskentries:
996 revdeps = self.runtaskentries[tid].revdeps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500997 if len(revdeps) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600998 endpoints.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500999 for dep in revdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001000 if dep in self.runtaskentries[tid].depends:
1001 bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
1002
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001003
1004 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
1005
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001006 self.init_progress_reporter.next_stage()
1007
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001008 # Calculate task weights
1009 # Check of higher length circular dependencies
1010 self.runq_weight = self.calculate_task_weights(endpoints)
1011
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001012 self.init_progress_reporter.next_stage()
1013
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001014 # Sanity Check - Check for multiple tasks building the same provider
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001015 for mc in self.dataCaches:
1016 prov_list = {}
1017 seen_fn = []
1018 for tid in self.runtaskentries:
1019 (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1020 if taskfn in seen_fn:
1021 continue
1022 if mc != tidmc:
1023 continue
1024 seen_fn.append(taskfn)
1025 for prov in self.dataCaches[mc].fn_provides[taskfn]:
1026 if prov not in prov_list:
1027 prov_list[prov] = [taskfn]
1028 elif taskfn not in prov_list[prov]:
1029 prov_list[prov].append(taskfn)
1030 for prov in prov_list:
1031 if len(prov_list[prov]) < 2:
1032 continue
1033 if prov in self.multi_provider_whitelist:
1034 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001035 seen_pn = []
1036 # If two versions of the same PN are being built its fatal, we don't support it.
1037 for fn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001038 pn = self.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001039 if pn not in seen_pn:
1040 seen_pn.append(pn)
1041 else:
1042 bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001043 msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
1044 #
1045 # Construct a list of things which uniquely depend on each provider
1046 # since this may help the user figure out which dependency is triggering this warning
1047 #
1048 msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
1049 deplist = {}
1050 commondeps = None
1051 for provfn in prov_list[prov]:
1052 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001053 for tid in self.runtaskentries:
1054 fn = fn_from_tid(tid)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001055 if fn != provfn:
1056 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001057 for dep in self.runtaskentries[tid].revdeps:
1058 fn = fn_from_tid(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001059 if fn == provfn:
1060 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001061 deps.add(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001062 if not commondeps:
1063 commondeps = set(deps)
1064 else:
1065 commondeps &= deps
1066 deplist[provfn] = deps
1067 for provfn in deplist:
1068 msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
1069 #
1070 # Construct a list of provides and runtime providers for each recipe
1071 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
1072 #
1073 msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
1074 provide_results = {}
1075 rprovide_results = {}
1076 commonprovs = None
1077 commonrprovs = None
1078 for provfn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001079 provides = set(self.dataCaches[mc].fn_provides[provfn])
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001080 rprovides = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001081 for rprovide in self.dataCaches[mc].rproviders:
1082 if provfn in self.dataCaches[mc].rproviders[rprovide]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001083 rprovides.add(rprovide)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001084 for package in self.dataCaches[mc].packages:
1085 if provfn in self.dataCaches[mc].packages[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001086 rprovides.add(package)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001087 for package in self.dataCaches[mc].packages_dynamic:
1088 if provfn in self.dataCaches[mc].packages_dynamic[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001089 rprovides.add(package)
1090 if not commonprovs:
1091 commonprovs = set(provides)
1092 else:
1093 commonprovs &= provides
1094 provide_results[provfn] = provides
1095 if not commonrprovs:
1096 commonrprovs = set(rprovides)
1097 else:
1098 commonrprovs &= rprovides
1099 rprovide_results[provfn] = rprovides
1100 #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
1101 #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
1102 for provfn in prov_list[prov]:
1103 msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
1104 msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
1105
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001106 if self.warn_multi_bb:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001107 logger.verbnote(msg)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001108 else:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001109 logger.error(msg)
1110
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001111 self.init_progress_reporter.next_stage()
1112
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001113 # Create a whitelist usable by the stamp checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001114 self.stampfnwhitelist = {}
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001115 for mc in self.taskData:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001116 self.stampfnwhitelist[mc] = []
1117 for entry in self.stampwhitelist.split():
1118 if entry not in self.taskData[mc].build_targets:
1119 continue
1120 fn = self.taskData.build_targets[entry][0]
1121 self.stampfnwhitelist[mc].append(fn)
1122
1123 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001124
1125 # Iterate over the task list looking for tasks with a 'setscene' function
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001126 self.runq_setscene_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001127 if not self.cooker.configuration.nosetscene:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001128 for tid in self.runtaskentries:
1129 (mc, fn, taskname, _) = split_tid_mcfn(tid)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001130 setscenetid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001131 if setscenetid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001132 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001133 self.runq_setscene_tids.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001134
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001135 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001136
1137 # Invalidate task if force mode active
1138 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001139 for tid in self.target_tids:
1140 invalidate_task(tid, False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001141
1142 # Invalidate task if invalidate mode active
1143 if self.cooker.configuration.invalidate_stamp:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001144 for tid in self.target_tids:
1145 fn = fn_from_tid(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001146 for st in self.cooker.configuration.invalidate_stamp.split(','):
1147 if not st.startswith("do_"):
1148 st = "do_%s" % st
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001149 invalidate_task(fn + ":" + st, True)
1150
1151 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001152
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001153 # Create and print to the logs a virtual/xxxx -> PN (fn) table
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001154 for mc in taskData:
1155 virtmap = taskData[mc].get_providermap(prefix="virtual/")
1156 virtpnmap = {}
1157 for v in virtmap:
1158 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
1159 bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
1160 if hasattr(bb.parse.siggen, "tasks_resolved"):
1161 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1162
1163 self.init_progress_reporter.next_stage()
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001164
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001165 # Iterate over the task list and call into the siggen code
1166 dealtwith = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001167 todeal = set(self.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001168 while len(todeal) > 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001169 for tid in todeal.copy():
1170 if len(self.runtaskentries[tid].depends - dealtwith) == 0:
1171 dealtwith.add(tid)
1172 todeal.remove(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001173 self.prepare_task_hash(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001174
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001175 bb.parse.siggen.writeout_file_checksum_cache()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001176
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001177 #self.dump_data()
1178 return len(self.runtaskentries)
1179
Brad Bishop19323692019-04-05 15:28:33 -04001180 def prepare_task_hash(self, tid):
1181 procdep = []
1182 for dep in self.runtaskentries[tid].depends:
Brad Bishop08902b02019-08-20 09:16:51 -04001183 procdep.append(dep)
1184 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, procdep, self.dataCaches[mc_from_tid(tid)])
1185 self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001186
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001187 def dump_data(self):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001188 """
1189 Dump some debug information on the internal data structures
1190 """
1191 logger.debug(3, "run_tasks:")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001192 for tid in self.runtaskentries:
1193 logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
1194 self.runtaskentries[tid].weight,
1195 self.runtaskentries[tid].depends,
1196 self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001197
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001198class RunQueueWorker():
1199 def __init__(self, process, pipe):
1200 self.process = process
1201 self.pipe = pipe
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001202
1203class RunQueue:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001204 def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001205
1206 self.cooker = cooker
1207 self.cfgData = cfgData
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001208 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001209
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001210 self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
1211 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001212 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001213
1214 self.state = runQueuePrepare
1215
1216 # For disk space monitor
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001217 # Invoked at regular time intervals via the bitbake heartbeat event
1218 # while the build is running. We generate a unique name for the handler
1219 # here, just in case that there ever is more than one RunQueue instance,
Brad Bishop96ff1982019-08-19 13:50:42 -04001220 # start the handler when reaching runQueueSceneInit, and stop it when
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001221 # done with the build.
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001222 self.dm = monitordisk.diskMonitor(cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001223 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
1224 self.dm_event_handler_registered = False
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001225 self.rqexe = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001226 self.worker = {}
1227 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001228
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001229 def _start_worker(self, mc, fakeroot = False, rqexec = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001230 logger.debug(1, "Starting bitbake-worker")
1231 magic = "decafbad"
1232 if self.cooker.configuration.profile:
1233 magic = "decafbadbad"
1234 if fakeroot:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001235 magic = magic + "beef"
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001236 mcdata = self.cooker.databuilder.mcdata[mc]
Brad Bishop19323692019-04-05 15:28:33 -04001237 fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD"))
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001238 fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001239 env = os.environ.copy()
1240 for key, value in (var.split('=') for var in fakerootenv):
1241 env[key] = value
Brad Bishop19323692019-04-05 15:28:33 -04001242 worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001243 else:
1244 worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
1245 bb.utils.nonblockingfd(worker.stdout)
1246 workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
1247
1248 workerdata = {
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001249 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1250 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1251 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1252 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001253 "sigdata" : bb.parse.siggen.get_taskdata(),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001254 "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
1255 "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
1256 "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
1257 "logdefaultdomain" : bb.msg.loggerDefaultDomains,
1258 "prhost" : self.cooker.prhost,
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001259 "buildname" : self.cfgData.getVar("BUILDNAME"),
1260 "date" : self.cfgData.getVar("DATE"),
1261 "time" : self.cfgData.getVar("TIME"),
Brad Bishop08902b02019-08-20 09:16:51 -04001262 "hashservport" : self.cooker.hashservport,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001263 }
1264
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001265 worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001266 worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001267 worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001268 worker.stdin.flush()
1269
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001270 return RunQueueWorker(worker, workerpipe)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001271
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001272 def _teardown_worker(self, worker):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001273 if not worker:
1274 return
1275 logger.debug(1, "Teardown for bitbake-worker")
1276 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001277 worker.process.stdin.write(b"<quit></quit>")
1278 worker.process.stdin.flush()
1279 worker.process.stdin.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001280 except IOError:
1281 pass
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001282 while worker.process.returncode is None:
1283 worker.pipe.read()
1284 worker.process.poll()
1285 while worker.pipe.read():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001286 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001287 worker.pipe.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001288
1289 def start_worker(self):
1290 if self.worker:
1291 self.teardown_workers()
1292 self.teardown = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001293 for mc in self.rqdata.dataCaches:
1294 self.worker[mc] = self._start_worker(mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001295
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001296 def start_fakeworker(self, rqexec, mc):
1297 if not mc in self.fakeworker:
1298 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001299
1300 def teardown_workers(self):
1301 self.teardown = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001302 for mc in self.worker:
1303 self._teardown_worker(self.worker[mc])
1304 self.worker = {}
1305 for mc in self.fakeworker:
1306 self._teardown_worker(self.fakeworker[mc])
1307 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001308
1309 def read_workers(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001310 for mc in self.worker:
1311 self.worker[mc].pipe.read()
1312 for mc in self.fakeworker:
1313 self.fakeworker[mc].pipe.read()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001314
1315 def active_fds(self):
1316 fds = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001317 for mc in self.worker:
1318 fds.append(self.worker[mc].pipe.input)
1319 for mc in self.fakeworker:
1320 fds.append(self.fakeworker[mc].pipe.input)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001321 return fds
1322
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001323 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001324 def get_timestamp(f):
1325 try:
1326 if not os.access(f, os.F_OK):
1327 return None
1328 return os.stat(f)[stat.ST_MTIME]
1329 except:
1330 return None
1331
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001332 (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
1333 if taskname is None:
1334 taskname = tn
1335
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001336 if self.stamppolicy == "perfile":
1337 fulldeptree = False
1338 else:
1339 fulldeptree = True
1340 stampwhitelist = []
1341 if self.stamppolicy == "whitelist":
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001342 stampwhitelist = self.rqdata.stampfnwhitelist[mc]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001343
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001344 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001345
1346 # If the stamp is missing, it's not current
1347 if not os.access(stampfile, os.F_OK):
1348 logger.debug(2, "Stampfile %s not available", stampfile)
1349 return False
1350 # If it's a 'nostamp' task, it's not current
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001351 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001352 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1353 logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
1354 return False
1355
1356 if taskname != "do_setscene" and taskname.endswith("_setscene"):
1357 return True
1358
1359 if cache is None:
1360 cache = {}
1361
1362 iscurrent = True
1363 t1 = get_timestamp(stampfile)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001364 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001365 if iscurrent:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001366 (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
1367 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1368 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001369 t2 = get_timestamp(stampfile2)
1370 t3 = get_timestamp(stampfile3)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001371 if t3 and not t2:
1372 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001373 if t3 and t3 > t2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001374 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001375 if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
1376 if not t2:
1377 logger.debug(2, 'Stampfile %s does not exist', stampfile2)
1378 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001379 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001380 if t1 < t2:
1381 logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
1382 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001383 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001384 if recurse and iscurrent:
1385 if dep in cache:
1386 iscurrent = cache[dep]
1387 if not iscurrent:
1388 logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
1389 else:
1390 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1391 cache[dep] = iscurrent
1392 if recurse:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001393 cache[tid] = iscurrent
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001394 return iscurrent
1395
Brad Bishop08902b02019-08-20 09:16:51 -04001396 def validate_hashes(self, tocheck, data, currentcount=None, siginfo=False):
Brad Bishop96ff1982019-08-19 13:50:42 -04001397 valid = set()
1398 if self.hashvalidate:
Brad Bishop08902b02019-08-20 09:16:51 -04001399 sq_data = {}
1400 sq_data['hash'] = {}
1401 sq_data['hashfn'] = {}
1402 sq_data['unihash'] = {}
Brad Bishop96ff1982019-08-19 13:50:42 -04001403 for tid in tocheck:
1404 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04001405 sq_data['hash'][tid] = self.rqdata.runtaskentries[tid].hash
1406 sq_data['hashfn'][tid] = self.rqdata.dataCaches[mc].hashfn[taskfn]
1407 sq_data['unihash'][tid] = self.rqdata.runtaskentries[tid].unihash
Brad Bishop96ff1982019-08-19 13:50:42 -04001408
Brad Bishop08902b02019-08-20 09:16:51 -04001409 valid = self.validate_hash(sq_data, data, siginfo, currentcount)
Brad Bishop96ff1982019-08-19 13:50:42 -04001410
1411 return valid
1412
Brad Bishop08902b02019-08-20 09:16:51 -04001413 def validate_hash(self, sq_data, d, siginfo, currentcount):
1414 locs = {"sq_data" : sq_data, "d" : d, "siginfo" : siginfo, "currentcount" : currentcount}
Brad Bishop19323692019-04-05 15:28:33 -04001415
Brad Bishop08902b02019-08-20 09:16:51 -04001416 # Metadata has **kwargs so args can be added, sq_data can also gain new fields
1417 call = self.hashvalidate + "(sq_data, d, siginfo=siginfo, currentcount=currentcount)"
Brad Bishop19323692019-04-05 15:28:33 -04001418
Brad Bishop19323692019-04-05 15:28:33 -04001419 return bb.utils.better_eval(call, locs)
1420
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001421 def _execute_runqueue(self):
1422 """
1423 Run the tasks in a queue prepared by rqdata.prepare()
1424 Upon failure, optionally try to recover the build using any alternate providers
1425 (if the abort on failure configuration option isn't set)
1426 """
1427
1428 retval = True
1429
1430 if self.state is runQueuePrepare:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001431 # NOTE: if you add, remove or significantly refactor the stages of this
1432 # process then you should recalculate the weightings here. This is quite
1433 # easy to do - just change the next line temporarily to pass debug=True as
1434 # the last parameter and you'll get a printout of the weightings as well
1435 # as a map to the lines where next_stage() was called. Of course this isn't
1436 # critical, but it helps to keep the progress reporting accurate.
1437 self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
1438 "Initialising tasks",
1439 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001440 if self.rqdata.prepare() == 0:
1441 self.state = runQueueComplete
1442 else:
1443 self.state = runQueueSceneInit
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001444
1445 if self.state is runQueueSceneInit:
Brad Bishop96ff1982019-08-19 13:50:42 -04001446 self.rqdata.init_progress_reporter.next_stage()
1447
1448 # we are ready to run, emit dependency info to any UI or class which
1449 # needs it
1450 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1451 self.rqdata.init_progress_reporter.next_stage()
1452 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
1453
Brad Bishope2d5b612018-11-23 10:55:50 +13001454 if not self.dm_event_handler_registered:
1455 res = bb.event.register(self.dm_event_handler_name,
Brad Bishop96ff1982019-08-19 13:50:42 -04001456 lambda x: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False,
Brad Bishope2d5b612018-11-23 10:55:50 +13001457 ('bb.event.HeartbeatEvent',))
1458 self.dm_event_handler_registered = True
1459
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001460 dump = self.cooker.configuration.dump_signatures
1461 if dump:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001462 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001463 if 'printdiff' in dump:
1464 invalidtasks = self.print_diffscenetasks()
1465 self.dump_signatures(dump)
1466 if 'printdiff' in dump:
1467 self.write_diffscenetasks(invalidtasks)
1468 self.state = runQueueComplete
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001469
Brad Bishop96ff1982019-08-19 13:50:42 -04001470 if self.state is runQueueSceneInit:
1471 self.rqdata.init_progress_reporter.next_stage()
1472 self.start_worker()
1473 self.rqdata.init_progress_reporter.next_stage()
1474 self.rqexe = RunQueueExecute(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001475
Brad Bishop96ff1982019-08-19 13:50:42 -04001476 # If we don't have any setscene functions, skip execution
1477 if len(self.rqdata.runq_setscene_tids) == 0:
1478 logger.info('No setscene tasks')
1479 for tid in self.rqdata.runtaskentries:
1480 if len(self.rqdata.runtaskentries[tid].depends) == 0:
1481 self.rqexe.setbuildable(tid)
1482 self.rqexe.tasks_notcovered.add(tid)
1483 self.rqexe.sqdone = True
1484 logger.info('Executing Tasks')
1485 self.state = runQueueRunning
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001486
1487 if self.state is runQueueRunning:
1488 retval = self.rqexe.execute()
1489
1490 if self.state is runQueueCleanUp:
1491 retval = self.rqexe.finish()
1492
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001493 build_done = self.state is runQueueComplete or self.state is runQueueFailed
1494
1495 if build_done and self.dm_event_handler_registered:
1496 bb.event.remove(self.dm_event_handler_name, None)
1497 self.dm_event_handler_registered = False
1498
1499 if build_done and self.rqexe:
Brad Bishop08902b02019-08-20 09:16:51 -04001500 bb.parse.siggen.save_unitaskhashes()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001501 self.teardown_workers()
Brad Bishop96ff1982019-08-19 13:50:42 -04001502 if self.rqexe:
1503 if self.rqexe.stats.failed:
1504 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
1505 else:
1506 # Let's avoid the word "failed" if nothing actually did
1507 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001508
1509 if self.state is runQueueFailed:
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001510 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001511
1512 if self.state is runQueueComplete:
1513 # All done
1514 return False
1515
1516 # Loop
1517 return retval
1518
1519 def execute_runqueue(self):
1520 # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
1521 try:
1522 return self._execute_runqueue()
1523 except bb.runqueue.TaskFailure:
1524 raise
1525 except SystemExit:
1526 raise
1527 except bb.BBHandledException:
1528 try:
1529 self.teardown_workers()
1530 except:
1531 pass
1532 self.state = runQueueComplete
1533 raise
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001534 except Exception as err:
1535 logger.exception("An uncaught exception occurred in runqueue")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001536 try:
1537 self.teardown_workers()
1538 except:
1539 pass
1540 self.state = runQueueComplete
1541 raise
1542
1543 def finish_runqueue(self, now = False):
1544 if not self.rqexe:
1545 self.state = runQueueComplete
1546 return
1547
1548 if now:
1549 self.rqexe.finish_now()
1550 else:
1551 self.rqexe.finish()
1552
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001553 def rq_dump_sigfn(self, fn, options):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001554 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001555 the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
1556 siggen = bb.parse.siggen
1557 dataCaches = self.rqdata.dataCaches
1558 siggen.dump_sigfn(fn, dataCaches, options)
1559
1560 def dump_signatures(self, options):
1561 fns = set()
1562 bb.note("Reparsing files to collect dependency data")
1563
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001564 for tid in self.rqdata.runtaskentries:
1565 fn = fn_from_tid(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001566 fns.add(fn)
1567
1568 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1569 # We cannot use the real multiprocessing.Pool easily due to some local data
1570 # that can't be pickled. This is a cheap multi-process solution.
1571 launched = []
1572 while fns:
1573 if len(launched) < max_process:
1574 p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
1575 p.start()
1576 launched.append(p)
1577 for q in launched:
1578 # The finished processes are joined when calling is_alive()
1579 if not q.is_alive():
1580 launched.remove(q)
1581 for p in launched:
1582 p.join()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001583
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001584 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001585
1586 return
1587
1588 def print_diffscenetasks(self):
1589
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001590 noexec = []
Brad Bishop96ff1982019-08-19 13:50:42 -04001591 tocheck = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001592
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001593 for tid in self.rqdata.runtaskentries:
1594 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1595 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001596
1597 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001598 noexec.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001599 continue
1600
Brad Bishop96ff1982019-08-19 13:50:42 -04001601 tocheck.add(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001602
Brad Bishop96ff1982019-08-19 13:50:42 -04001603 valid_new = self.validate_hashes(tocheck, self.cooker.data, None, True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001604
1605 # Tasks which are both setscene and noexec never care about dependencies
1606 # We therefore find tasks which are setscene and noexec and mark their
1607 # unique dependencies as valid.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001608 for tid in noexec:
1609 if tid not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001610 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001611 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001612 hasnoexecparents = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001613 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1614 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001615 continue
1616 hasnoexecparents = False
1617 break
1618 if hasnoexecparents:
1619 valid_new.add(dep)
1620
1621 invalidtasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001622 for tid in self.rqdata.runtaskentries:
1623 if tid not in valid_new and tid not in noexec:
1624 invalidtasks.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001625
1626 found = set()
1627 processed = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001628 for tid in invalidtasks:
1629 toprocess = set([tid])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001630 while toprocess:
1631 next = set()
1632 for t in toprocess:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001633 for dep in self.rqdata.runtaskentries[t].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001634 if dep in invalidtasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001635 found.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001636 if dep not in processed:
1637 processed.add(dep)
1638 next.add(dep)
1639 toprocess = next
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001640 if tid in found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001641 toprocess = set()
1642
1643 tasklist = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001644 for tid in invalidtasks.difference(found):
1645 tasklist.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001646
1647 if tasklist:
1648 bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
1649
1650 return invalidtasks.difference(found)
1651
1652 def write_diffscenetasks(self, invalidtasks):
1653
1654 # Define recursion callback
1655 def recursecb(key, hash1, hash2):
1656 hashes = [hash1, hash2]
1657 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1658
1659 recout = []
1660 if len(hashfiles) == 2:
1661 out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
Brad Bishopc342db32019-05-15 21:57:59 -04001662 recout.extend(list(' ' + l for l in out2))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001663 else:
1664 recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
1665
1666 return recout
1667
1668
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001669 for tid in invalidtasks:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001670 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1671 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001672 h = self.rqdata.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001673 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
1674 match = None
1675 for m in matches:
1676 if h in m:
1677 match = m
1678 if match is None:
1679 bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001680 matches = {k : v for k, v in iter(matches.items()) if h not in k}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001681 if matches:
1682 latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
Brad Bishop19323692019-04-05 15:28:33 -04001683 prevh = __find_sha256__.search(latestmatch).group(0)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001684 output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
1685 bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
1686
Brad Bishop96ff1982019-08-19 13:50:42 -04001687
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001688class RunQueueExecute:
1689
1690 def __init__(self, rq):
1691 self.rq = rq
1692 self.cooker = rq.cooker
1693 self.cfgData = rq.cfgData
1694 self.rqdata = rq.rqdata
1695
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001696 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
1697 self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001698
Brad Bishop96ff1982019-08-19 13:50:42 -04001699 self.sq_buildable = set()
1700 self.sq_running = set()
1701 self.sq_live = set()
1702
Brad Bishop08902b02019-08-20 09:16:51 -04001703 self.updated_taskhash_queue = []
1704 self.pending_migrations = set()
1705
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001706 self.runq_buildable = set()
1707 self.runq_running = set()
1708 self.runq_complete = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001709
1710 self.build_stamps = {}
1711 self.build_stamps2 = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001712 self.failed_tids = []
Brad Bishop96ff1982019-08-19 13:50:42 -04001713 self.sq_deferred = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001714
1715 self.stampcache = {}
1716
Brad Bishop08902b02019-08-20 09:16:51 -04001717 self.holdoff_tasks = set()
Brad Bishopc68388fc2019-08-26 01:33:31 -04001718 self.holdoff_need_update = True
Brad Bishop96ff1982019-08-19 13:50:42 -04001719 self.sqdone = False
1720
1721 self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
1722 self.sq_stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
1723
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001724 for mc in rq.worker:
1725 rq.worker[mc].pipe.setrunqueueexec(self)
1726 for mc in rq.fakeworker:
1727 rq.fakeworker[mc].pipe.setrunqueueexec(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001728
1729 if self.number_tasks <= 0:
1730 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1731
Brad Bishop96ff1982019-08-19 13:50:42 -04001732 # List of setscene tasks which we've covered
1733 self.scenequeue_covered = set()
1734 # List of tasks which are covered (including setscene ones)
1735 self.tasks_covered = set()
1736 self.tasks_scenequeue_done = set()
1737 self.scenequeue_notcovered = set()
1738 self.tasks_notcovered = set()
1739 self.scenequeue_notneeded = set()
1740
Brad Bishop08902b02019-08-20 09:16:51 -04001741 # We can't skip specified target tasks which aren't setscene tasks
1742 self.cantskip = set(self.rqdata.target_tids)
1743 self.cantskip.difference_update(self.rqdata.runq_setscene_tids)
1744 self.cantskip.intersection_update(self.rqdata.runtaskentries)
Brad Bishop96ff1982019-08-19 13:50:42 -04001745
1746 schedulers = self.get_schedulers()
1747 for scheduler in schedulers:
1748 if self.scheduler == scheduler.name:
1749 self.sched = scheduler(self, self.rqdata)
1750 logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
1751 break
1752 else:
1753 bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
1754 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1755
Brad Bishop08902b02019-08-20 09:16:51 -04001756 #if len(self.rqdata.runq_setscene_tids) > 0:
1757 self.sqdata = SQData()
1758 build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
Brad Bishop96ff1982019-08-19 13:50:42 -04001759
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001760 def runqueue_process_waitpid(self, task, status):
1761
1762 # self.build_stamps[pid] may not exist when use shared work directory.
1763 if task in self.build_stamps:
1764 self.build_stamps2.remove(self.build_stamps[task])
1765 del self.build_stamps[task]
1766
Brad Bishop96ff1982019-08-19 13:50:42 -04001767 if task in self.sq_live:
1768 if status != 0:
1769 self.sq_task_fail(task, status)
1770 else:
1771 self.sq_task_complete(task)
1772 self.sq_live.remove(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001773 else:
Brad Bishop96ff1982019-08-19 13:50:42 -04001774 if status != 0:
1775 self.task_fail(task, status)
1776 else:
1777 self.task_complete(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001778 return True
1779
1780 def finish_now(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001781 for mc in self.rq.worker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001782 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001783 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1784 self.rq.worker[mc].process.stdin.flush()
1785 except IOError:
1786 # worker must have died?
1787 pass
1788 for mc in self.rq.fakeworker:
1789 try:
1790 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1791 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001792 except IOError:
1793 # worker must have died?
1794 pass
1795
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001796 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001797 self.rq.state = runQueueFailed
1798 return
1799
1800 self.rq.state = runQueueComplete
1801 return
1802
1803 def finish(self):
1804 self.rq.state = runQueueCleanUp
1805
Brad Bishop96ff1982019-08-19 13:50:42 -04001806 active = self.stats.active + self.sq_stats.active
1807 if active > 0:
1808 bb.event.fire(runQueueExitWait(active), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001809 self.rq.read_workers()
1810 return self.rq.active_fds()
1811
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001812 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001813 self.rq.state = runQueueFailed
1814 return True
1815
1816 self.rq.state = runQueueComplete
1817 return True
1818
Brad Bishop96ff1982019-08-19 13:50:42 -04001819 # Used by setscene only
1820 def check_dependencies(self, task, taskdeps):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001821 if not self.rq.depvalidate:
1822 return False
1823
Brad Bishop08902b02019-08-20 09:16:51 -04001824 # Must not edit parent data
1825 taskdeps = set(taskdeps)
1826
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001827 taskdata = {}
1828 taskdeps.add(task)
1829 for dep in taskdeps:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001830 (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
1831 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001832 taskdata[dep] = [pn, taskname, fn]
1833 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001834 locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001835 valid = bb.utils.better_eval(call, locs)
1836 return valid
1837
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001838 def can_start_task(self):
Brad Bishop96ff1982019-08-19 13:50:42 -04001839 active = self.stats.active + self.sq_stats.active
1840 can_start = active < self.number_tasks
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001841 return can_start
1842
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001843 def get_schedulers(self):
1844 schedulers = set(obj for obj in globals().values()
1845 if type(obj) is type and
1846 issubclass(obj, RunQueueScheduler))
1847
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001848 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001849 if user_schedulers:
1850 for sched in user_schedulers.split():
1851 if not "." in sched:
1852 bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
1853 continue
1854
1855 modname, name = sched.rsplit(".", 1)
1856 try:
1857 module = __import__(modname, fromlist=(name,))
1858 except ImportError as exc:
1859 logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
1860 raise SystemExit(1)
1861 else:
1862 schedulers.add(getattr(module, name))
1863 return schedulers
1864
1865 def setbuildable(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001866 self.runq_buildable.add(task)
Brad Bishop316dfdd2018-06-25 12:45:53 -04001867 self.sched.newbuildable(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001868
1869 def task_completeoutright(self, task):
1870 """
1871 Mark a task as completed
1872 Look at the reverse dependencies and mark any task with
1873 completed dependencies as buildable
1874 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001875 self.runq_complete.add(task)
1876 for revdep in self.rqdata.runtaskentries[task].revdeps:
1877 if revdep in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001878 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001879 if revdep in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001880 continue
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001881 alldeps = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001882 for dep in self.rqdata.runtaskentries[revdep].depends:
1883 if dep not in self.runq_complete:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001884 alldeps = False
1885 break
1886 if alldeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001887 self.setbuildable(revdep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001888 logger.debug(1, "Marking task %s as buildable", revdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001889
1890 def task_complete(self, task):
1891 self.stats.taskCompleted()
1892 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1893 self.task_completeoutright(task)
1894
1895 def task_fail(self, task, exitcode):
1896 """
1897 Called when a task has failed
1898 Updates the state engine with the failure
1899 """
1900 self.stats.taskFailed()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001901 self.failed_tids.append(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001902 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001903 if self.rqdata.taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001904 self.rq.state = runQueueCleanUp
1905
1906 def task_skip(self, task, reason):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001907 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001908 self.setbuildable(task)
1909 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
1910 self.task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001911 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001912 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001913
Brad Bishop08902b02019-08-20 09:16:51 -04001914 def summarise_scenequeue_errors(self):
1915 err = False
1916 if not self.sqdone:
1917 logger.debug(1, 'We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered)))
1918 completeevent = sceneQueueComplete(self.sq_stats, self.rq)
1919 bb.event.fire(completeevent, self.cfgData)
1920 if self.sq_deferred:
1921 logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred))
1922 err = True
1923 if self.updated_taskhash_queue:
1924 logger.error("Scenequeue had unprocessed changed taskhash entries: %s" % pprint.pformat(self.updated_taskhash_queue))
1925 err = True
1926 if self.holdoff_tasks:
1927 logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks))
1928 err = True
1929
1930 for tid in self.rqdata.runq_setscene_tids:
1931 if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
1932 err = True
1933 logger.error("Setscene Task %s was never marked as covered or not covered" % tid)
1934 if tid not in self.sq_buildable:
1935 err = True
1936 logger.error("Setscene Task %s was never marked as buildable" % tid)
1937 if tid not in self.sq_running:
1938 err = True
1939 logger.error("Setscene Task %s was never marked as running" % tid)
1940
1941 for x in self.rqdata.runtaskentries:
1942 if x not in self.tasks_covered and x not in self.tasks_notcovered:
1943 logger.error("Task %s was never moved from the setscene queue" % x)
1944 err = True
1945 if x not in self.tasks_scenequeue_done:
1946 logger.error("Task %s was never processed by the setscene code" % x)
1947 err = True
1948 if len(self.rqdata.runtaskentries[x].depends) == 0 and x not in self.runq_buildable:
1949 logger.error("Task %s was never marked as buildable by the setscene code" % x)
1950 err = True
1951 return err
1952
1953
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001954 def execute(self):
1955 """
Brad Bishop96ff1982019-08-19 13:50:42 -04001956 Run the tasks in a queue prepared by prepare_runqueue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001957 """
1958
1959 self.rq.read_workers()
Brad Bishop08902b02019-08-20 09:16:51 -04001960 self.process_possible_migrations()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001961
Brad Bishop96ff1982019-08-19 13:50:42 -04001962 task = None
1963 if not self.sqdone and self.can_start_task():
1964 # Find the next setscene to run
Brad Bishop08902b02019-08-20 09:16:51 -04001965 for nexttask in sorted(self.rqdata.runq_setscene_tids):
Brad Bishop96ff1982019-08-19 13:50:42 -04001966 if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values():
1967 if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
1968 if nexttask not in self.rqdata.target_tids:
1969 logger.debug(2, "Skipping setscene for task %s" % nexttask)
1970 self.sq_task_skip(nexttask)
1971 self.scenequeue_notneeded.add(nexttask)
1972 if nexttask in self.sq_deferred:
1973 del self.sq_deferred[nexttask]
1974 return True
Brad Bishop08902b02019-08-20 09:16:51 -04001975 # If covered tasks are running, need to wait for them to complete
1976 for t in self.sqdata.sq_covered_tasks[nexttask]:
1977 if t in self.runq_running and t not in self.runq_complete:
1978 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04001979 if nexttask in self.sq_deferred:
1980 if self.sq_deferred[nexttask] not in self.runq_complete:
1981 continue
1982 logger.debug(1, "Task %s no longer deferred" % nexttask)
1983 del self.sq_deferred[nexttask]
1984 valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, None, False)
1985 if not valid:
1986 logger.debug(1, "%s didn't become valid, skipping setscene" % nexttask)
1987 self.sq_task_failoutright(nexttask)
1988 return True
1989 else:
1990 self.sqdata.outrightfail.remove(nexttask)
1991 if nexttask in self.sqdata.outrightfail:
1992 logger.debug(2, 'No package found, so skipping setscene task %s', nexttask)
1993 self.sq_task_failoutright(nexttask)
1994 return True
1995 if nexttask in self.sqdata.unskippable:
1996 logger.debug(2, "Setscene task %s is unskippable" % nexttask)
1997 task = nexttask
1998 break
1999 if task is not None:
2000 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
2001 taskname = taskname + "_setscene"
2002 if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
2003 logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
2004 self.sq_task_failoutright(task)
2005 return True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002006
Brad Bishop96ff1982019-08-19 13:50:42 -04002007 if self.cooker.configuration.force:
2008 if task in self.rqdata.target_tids:
2009 self.sq_task_failoutright(task)
2010 return True
2011
2012 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
2013 logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
2014 self.sq_task_skip(task)
2015 return True
2016
2017 if self.cooker.configuration.skipsetscene:
2018 logger.debug(2, 'No setscene tasks should be executed. Skipping %s', task)
2019 self.sq_task_failoutright(task)
2020 return True
2021
2022 startevent = sceneQueueTaskStarted(task, self.sq_stats, self.rq)
2023 bb.event.fire(startevent, self.cfgData)
2024
2025 taskdepdata = self.sq_build_taskdepdata(task)
2026
2027 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2028 taskhash = self.rqdata.get_task_hash(task)
2029 unihash = self.rqdata.get_task_unihash(task)
2030 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
2031 if not mc in self.rq.fakeworker:
2032 self.rq.start_fakeworker(self, mc)
2033 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
2034 self.rq.fakeworker[mc].process.stdin.flush()
2035 else:
2036 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
2037 self.rq.worker[mc].process.stdin.flush()
2038
2039 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2040 self.build_stamps2.append(self.build_stamps[task])
2041 self.sq_running.add(task)
2042 self.sq_live.add(task)
2043 self.sq_stats.taskActive()
2044 if self.can_start_task():
2045 return True
2046
Brad Bishopc68388fc2019-08-26 01:33:31 -04002047 self.update_holdofftasks()
2048
Brad Bishop08902b02019-08-20 09:16:51 -04002049 if not self.sq_live and not self.sqdone and not self.sq_deferred and not self.updated_taskhash_queue and not self.holdoff_tasks:
Brad Bishop96ff1982019-08-19 13:50:42 -04002050 logger.info("Setscene tasks completed")
Brad Bishop96ff1982019-08-19 13:50:42 -04002051
Brad Bishop08902b02019-08-20 09:16:51 -04002052 err = self.summarise_scenequeue_errors()
Brad Bishop96ff1982019-08-19 13:50:42 -04002053 if err:
2054 self.rq.state = runQueueFailed
2055 return True
2056
2057 if self.cooker.configuration.setsceneonly:
2058 self.rq.state = runQueueComplete
2059 return True
2060 self.sqdone = True
2061
2062 if self.stats.total == 0:
2063 # nothing to do
2064 self.rq.state = runQueueComplete
2065 return True
2066
2067 if self.cooker.configuration.setsceneonly:
2068 task = None
2069 else:
2070 task = self.sched.next()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002071 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002072 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002073
Brad Bishop96ff1982019-08-19 13:50:42 -04002074 if self.rqdata.setscenewhitelist is not None:
2075 if self.check_setscenewhitelist(task):
2076 self.task_fail(task, "setscene whitelist")
2077 return True
2078
2079 if task in self.tasks_covered:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002080 logger.debug(2, "Setscene covered task %s", task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002081 self.task_skip(task, "covered")
2082 return True
2083
2084 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002085 logger.debug(2, "Stamp current task %s", task)
2086
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002087 self.task_skip(task, "existing")
2088 return True
2089
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002090 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002091 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2092 startevent = runQueueTaskStarted(task, self.stats, self.rq,
2093 noexec=True)
2094 bb.event.fire(startevent, self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002095 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002096 self.stats.taskActive()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002097 if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002098 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002099 self.task_complete(task)
2100 return True
2101 else:
2102 startevent = runQueueTaskStarted(task, self.stats, self.rq)
2103 bb.event.fire(startevent, self.cfgData)
2104
2105 taskdepdata = self.build_taskdepdata(task)
2106
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002107 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop19323692019-04-05 15:28:33 -04002108 taskhash = self.rqdata.get_task_hash(task)
2109 unihash = self.rqdata.get_task_unihash(task)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002110 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002111 if not mc in self.rq.fakeworker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002112 try:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002113 self.rq.start_fakeworker(self, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002114 except OSError as exc:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002115 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002116 self.rq.state = runQueueFailed
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002117 self.stats.taskFailed()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002118 return True
Brad Bishop19323692019-04-05 15:28:33 -04002119 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002120 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002121 else:
Brad Bishop19323692019-04-05 15:28:33 -04002122 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002123 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002124
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002125 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2126 self.build_stamps2.append(self.build_stamps[task])
2127 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002128 self.stats.taskActive()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002129 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002130 return True
2131
Brad Bishop96ff1982019-08-19 13:50:42 -04002132 if self.stats.active > 0 or self.sq_stats.active > 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002133 self.rq.read_workers()
2134 return self.rq.active_fds()
2135
Brad Bishop96ff1982019-08-19 13:50:42 -04002136 # No more tasks can be run. If we have deferred setscene tasks we should run them.
2137 if self.sq_deferred:
2138 tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0])
2139 logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid)
2140 self.sq_task_failoutright(tid)
2141 return True
2142
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002143 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002144 self.rq.state = runQueueFailed
2145 return True
2146
2147 # Sanity Checks
Brad Bishop08902b02019-08-20 09:16:51 -04002148 err = self.summarise_scenequeue_errors()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002149 for task in self.rqdata.runtaskentries:
2150 if task not in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002151 logger.error("Task %s never buildable!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002152 err = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002153 elif task not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002154 logger.error("Task %s never ran!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002155 err = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002156 elif task not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002157 logger.error("Task %s never completed!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002158 err = True
2159
2160 if err:
2161 self.rq.state = runQueueFailed
2162 else:
2163 self.rq.state = runQueueComplete
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002164
2165 return True
2166
Brad Bishopc68388fc2019-08-26 01:33:31 -04002167 def filtermcdeps(self, task, mc, deps):
Andrew Geissler99467da2019-02-25 18:54:23 -06002168 ret = set()
Andrew Geissler99467da2019-02-25 18:54:23 -06002169 for dep in deps:
Brad Bishopc68388fc2019-08-26 01:33:31 -04002170 thismc = mc_from_tid(dep)
2171 if thismc != mc:
Andrew Geissler99467da2019-02-25 18:54:23 -06002172 continue
2173 ret.add(dep)
2174 return ret
2175
2176 # We filter out multiconfig dependencies from taskdepdata we pass to the tasks
2177 # as most code can't handle them
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002178 def build_taskdepdata(self, task):
2179 taskdepdata = {}
Brad Bishopc68388fc2019-08-26 01:33:31 -04002180 mc = mc_from_tid(task)
Brad Bishop08902b02019-08-20 09:16:51 -04002181 next = self.rqdata.runtaskentries[task].depends.copy()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002182 next.add(task)
Brad Bishopc68388fc2019-08-26 01:33:31 -04002183 next = self.filtermcdeps(task, mc, next)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002184 while next:
2185 additional = []
2186 for revdep in next:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002187 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2188 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2189 deps = self.rqdata.runtaskentries[revdep].depends
2190 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002191 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002192 unihash = self.rqdata.runtaskentries[revdep].unihash
Brad Bishopc68388fc2019-08-26 01:33:31 -04002193 deps = self.filtermcdeps(task, mc, deps)
Brad Bishop19323692019-04-05 15:28:33 -04002194 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002195 for revdep2 in deps:
2196 if revdep2 not in taskdepdata:
2197 additional.append(revdep2)
2198 next = additional
2199
2200 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2201 return taskdepdata
2202
Brad Bishop08902b02019-08-20 09:16:51 -04002203 def update_holdofftasks(self):
Brad Bishopc68388fc2019-08-26 01:33:31 -04002204
2205 if not self.holdoff_need_update:
2206 return
2207
2208 notcovered = set(self.scenequeue_notcovered)
2209 notcovered |= self.cantskip
2210 for tid in self.scenequeue_notcovered:
2211 notcovered |= self.sqdata.sq_covered_tasks[tid]
2212 notcovered |= self.sqdata.unskippable.difference(self.rqdata.runq_setscene_tids)
2213 notcovered.intersection_update(self.tasks_scenequeue_done)
2214
2215 covered = set(self.scenequeue_covered)
2216 for tid in self.scenequeue_covered:
2217 covered |= self.sqdata.sq_covered_tasks[tid]
2218 covered.difference_update(notcovered)
2219 covered.intersection_update(self.tasks_scenequeue_done)
2220
2221 for tid in notcovered | covered:
2222 if len(self.rqdata.runtaskentries[tid].depends) == 0:
2223 self.setbuildable(tid)
2224 elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete):
2225 self.setbuildable(tid)
2226
2227 self.tasks_covered = covered
2228 self.tasks_notcovered = notcovered
2229
Brad Bishop08902b02019-08-20 09:16:51 -04002230 self.holdoff_tasks = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002231
Brad Bishop08902b02019-08-20 09:16:51 -04002232 for tid in self.rqdata.runq_setscene_tids:
2233 if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
2234 self.holdoff_tasks.add(tid)
2235
2236 for tid in self.holdoff_tasks.copy():
2237 for dep in self.sqdata.sq_covered_tasks[tid]:
2238 if dep not in self.runq_complete:
2239 self.holdoff_tasks.add(dep)
2240
Brad Bishopc68388fc2019-08-26 01:33:31 -04002241 self.holdoff_need_update = False
2242
Brad Bishop08902b02019-08-20 09:16:51 -04002243 def process_possible_migrations(self):
2244
2245 changed = set()
2246 for tid, unihash in self.updated_taskhash_queue.copy():
2247 if tid in self.runq_running and tid not in self.runq_complete:
2248 continue
2249
2250 self.updated_taskhash_queue.remove((tid, unihash))
2251
2252 if unihash != self.rqdata.runtaskentries[tid].unihash:
2253 logger.info("Task %s unihash changed to %s" % (tid, unihash))
2254 self.rqdata.runtaskentries[tid].unihash = unihash
2255 bb.parse.siggen.set_unihash(tid, unihash)
2256
2257 # Work out all tasks which depend on this one
2258 total = set()
2259 next = set(self.rqdata.runtaskentries[tid].revdeps)
2260 while next:
2261 current = next.copy()
2262 total = total |next
2263 next = set()
2264 for ntid in current:
2265 next |= self.rqdata.runtaskentries[ntid].revdeps
2266 next.difference_update(total)
2267
2268 # Now iterate those tasks in dependency order to regenerate their taskhash/unihash
2269 done = set()
2270 next = set(self.rqdata.runtaskentries[tid].revdeps)
2271 while next:
2272 current = next.copy()
2273 next = set()
2274 for tid in current:
2275 if not self.rqdata.runtaskentries[tid].depends.isdisjoint(total):
2276 continue
2277 procdep = []
2278 for dep in self.rqdata.runtaskentries[tid].depends:
2279 procdep.append(dep)
2280 orighash = self.rqdata.runtaskentries[tid].hash
2281 self.rqdata.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, procdep, self.rqdata.dataCaches[mc_from_tid(tid)])
2282 origuni = self.rqdata.runtaskentries[tid].unihash
2283 self.rqdata.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
2284 logger.debug(1, "Task %s hash changes: %s->%s %s->%s" % (tid, orighash, self.rqdata.runtaskentries[tid].hash, origuni, self.rqdata.runtaskentries[tid].unihash))
2285 next |= self.rqdata.runtaskentries[tid].revdeps
2286 changed.add(tid)
2287 total.remove(tid)
2288 next.intersection_update(total)
2289
2290 if changed:
2291 for mc in self.rq.worker:
2292 self.rq.worker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
2293 for mc in self.rq.fakeworker:
2294 self.rq.fakeworker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
2295
2296 logger.debug(1, pprint.pformat("Tasks changed:\n%s" % (changed)))
2297
2298 for tid in changed:
2299 if tid not in self.rqdata.runq_setscene_tids:
2300 continue
2301 valid = self.rq.validate_hashes(set([tid]), self.cooker.data, None, False)
2302 if not valid:
2303 continue
2304 if tid in self.runq_running:
2305 continue
2306 if tid not in self.pending_migrations:
2307 self.pending_migrations.add(tid)
2308
2309 for tid in self.pending_migrations.copy():
2310 valid = True
2311 # Check no tasks this covers are running
2312 for dep in self.sqdata.sq_covered_tasks[tid]:
2313 if dep in self.runq_running and dep not in self.runq_complete:
2314 logger.debug(2, "Task %s is running which blocks setscene for %s from running" % (dep, tid))
2315 valid = False
2316 break
2317 if not valid:
2318 continue
2319
2320 self.pending_migrations.remove(tid)
Brad Bishopc68388fc2019-08-26 01:33:31 -04002321 changed = True
Brad Bishop08902b02019-08-20 09:16:51 -04002322
2323 if tid in self.tasks_scenequeue_done:
2324 self.tasks_scenequeue_done.remove(tid)
2325 for dep in self.sqdata.sq_covered_tasks[tid]:
2326 if dep not in self.runq_complete:
2327 if dep in self.tasks_scenequeue_done and dep not in self.sqdata.unskippable:
2328 self.tasks_scenequeue_done.remove(dep)
2329
2330 if tid in self.sq_buildable:
2331 self.sq_buildable.remove(tid)
2332 if tid in self.sq_running:
2333 self.sq_running.remove(tid)
2334 if self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
2335 if tid not in self.sq_buildable:
2336 self.sq_buildable.add(tid)
2337 if len(self.sqdata.sq_revdeps[tid]) == 0:
2338 self.sq_buildable.add(tid)
2339
2340 if tid in self.sqdata.outrightfail:
2341 self.sqdata.outrightfail.remove(tid)
2342 if tid in self.scenequeue_notcovered:
2343 self.scenequeue_notcovered.remove(tid)
2344 if tid in self.scenequeue_covered:
2345 self.scenequeue_covered.remove(tid)
2346 if tid in self.scenequeue_notneeded:
2347 self.scenequeue_notneeded.remove(tid)
2348
2349 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2350 self.sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
2351
2352 if tid in self.stampcache:
2353 del self.stampcache[tid]
2354
2355 if tid in self.build_stamps:
2356 del self.build_stamps[tid]
2357
2358 logger.info("Setscene task %s now valid and being rerun" % tid)
2359 self.sqdone = False
2360
2361 if changed:
Brad Bishopc68388fc2019-08-26 01:33:31 -04002362 self.holdoff_need_update = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002363
Brad Bishop96ff1982019-08-19 13:50:42 -04002364 def scenequeue_updatecounters(self, task, fail=False):
Brad Bishop08902b02019-08-20 09:16:51 -04002365
2366 for dep in sorted(self.sqdata.sq_deps[task]):
Brad Bishop96ff1982019-08-19 13:50:42 -04002367 if fail and task in self.sqdata.sq_harddeps and dep in self.sqdata.sq_harddeps[task]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002368 logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
Brad Bishop96ff1982019-08-19 13:50:42 -04002369 self.sq_task_failoutright(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002370 continue
Brad Bishop08902b02019-08-20 09:16:51 -04002371 if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
2372 if dep not in self.sq_buildable:
2373 self.sq_buildable.add(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002374
Brad Bishop96ff1982019-08-19 13:50:42 -04002375 next = set([task])
2376 while next:
2377 new = set()
Brad Bishop08902b02019-08-20 09:16:51 -04002378 for t in sorted(next):
Brad Bishop96ff1982019-08-19 13:50:42 -04002379 self.tasks_scenequeue_done.add(t)
2380 # Look down the dependency chain for non-setscene things which this task depends on
2381 # and mark as 'done'
2382 for dep in self.rqdata.runtaskentries[t].depends:
2383 if dep in self.rqdata.runq_setscene_tids or dep in self.tasks_scenequeue_done:
2384 continue
2385 if self.rqdata.runtaskentries[dep].revdeps.issubset(self.tasks_scenequeue_done):
2386 new.add(dep)
Brad Bishop96ff1982019-08-19 13:50:42 -04002387 next = new
2388
Brad Bishopc68388fc2019-08-26 01:33:31 -04002389 self.holdoff_need_update = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002390
2391 def sq_task_completeoutright(self, task):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002392 """
2393 Mark a task as completed
2394 Look at the reverse dependencies and mark any task with
2395 completed dependencies as buildable
2396 """
2397
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002398 logger.debug(1, 'Found task %s which could be accelerated', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002399 self.scenequeue_covered.add(task)
2400 self.scenequeue_updatecounters(task)
2401
Brad Bishop96ff1982019-08-19 13:50:42 -04002402 def sq_check_taskfail(self, task):
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002403 if self.rqdata.setscenewhitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002404 realtask = task.split('_setscene')[0]
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002405 (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
2406 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002407 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2408 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2409 self.rq.state = runQueueCleanUp
2410
Brad Bishop96ff1982019-08-19 13:50:42 -04002411 def sq_task_complete(self, task):
2412 self.sq_stats.taskCompleted()
2413 bb.event.fire(sceneQueueTaskCompleted(task, self.sq_stats, self.rq), self.cfgData)
2414 self.sq_task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002415
Brad Bishop96ff1982019-08-19 13:50:42 -04002416 def sq_task_fail(self, task, result):
2417 self.sq_stats.taskFailed()
2418 bb.event.fire(sceneQueueTaskFailed(task, self.sq_stats, result, self), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002419 self.scenequeue_notcovered.add(task)
2420 self.scenequeue_updatecounters(task, True)
Brad Bishop96ff1982019-08-19 13:50:42 -04002421 self.sq_check_taskfail(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002422
Brad Bishop96ff1982019-08-19 13:50:42 -04002423 def sq_task_failoutright(self, task):
2424 self.sq_running.add(task)
2425 self.sq_buildable.add(task)
2426 self.sq_stats.taskSkipped()
2427 self.sq_stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002428 self.scenequeue_notcovered.add(task)
2429 self.scenequeue_updatecounters(task, True)
2430
Brad Bishop96ff1982019-08-19 13:50:42 -04002431 def sq_task_skip(self, task):
2432 self.sq_running.add(task)
2433 self.sq_buildable.add(task)
2434 self.sq_task_completeoutright(task)
2435 self.sq_stats.taskSkipped()
2436 self.sq_stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002437
Brad Bishop96ff1982019-08-19 13:50:42 -04002438 def sq_build_taskdepdata(self, task):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002439 def getsetscenedeps(tid):
2440 deps = set()
2441 (mc, fn, taskname, _) = split_tid_mcfn(tid)
2442 realtid = tid + "_setscene"
2443 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
2444 for (depname, idependtask) in idepends:
2445 if depname not in self.rqdata.taskData[mc].build_targets:
2446 continue
2447
2448 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2449 if depfn is None:
2450 continue
2451 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2452 deps.add(deptid)
2453 return deps
2454
2455 taskdepdata = {}
2456 next = getsetscenedeps(task)
2457 next.add(task)
2458 while next:
2459 additional = []
2460 for revdep in next:
2461 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2462 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2463 deps = getsetscenedeps(revdep)
2464 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2465 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002466 unihash = self.rqdata.runtaskentries[revdep].unihash
2467 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002468 for revdep2 in deps:
2469 if revdep2 not in taskdepdata:
2470 additional.append(revdep2)
2471 next = additional
2472
2473 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2474 return taskdepdata
2475
Brad Bishop96ff1982019-08-19 13:50:42 -04002476 def check_setscenewhitelist(self, tid):
2477 # Check task that is going to run against the whitelist
2478 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2479 # Ignore covered tasks
2480 if tid in self.tasks_covered:
2481 return False
2482 # Ignore stamped tasks
2483 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
2484 return False
2485 # Ignore noexec tasks
2486 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2487 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2488 return False
2489
2490 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2491 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2492 if tid in self.rqdata.runq_setscene_tids:
2493 msg = 'Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)
2494 else:
2495 msg = 'Task %s.%s attempted to execute unexpectedly' % (pn, taskname)
2496 logger.error(msg + '\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered))
2497 return True
2498 return False
2499
2500class SQData(object):
2501 def __init__(self):
2502 # SceneQueue dependencies
2503 self.sq_deps = {}
2504 # SceneQueue reverse dependencies
2505 self.sq_revdeps = {}
Brad Bishop96ff1982019-08-19 13:50:42 -04002506 # Injected inter-setscene task dependencies
2507 self.sq_harddeps = {}
2508 # Cache of stamp files so duplicates can't run in parallel
2509 self.stamps = {}
2510 # Setscene tasks directly depended upon by the build
2511 self.unskippable = set()
2512 # List of setscene tasks which aren't present
2513 self.outrightfail = set()
2514 # A list of normal tasks a setscene task covers
2515 self.sq_covered_tasks = {}
2516
2517def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
2518
2519 sq_revdeps = {}
2520 sq_revdeps_squash = {}
2521 sq_collated_deps = {}
2522
2523 # We need to construct a dependency graph for the setscene functions. Intermediate
2524 # dependencies between the setscene tasks only complicate the code. This code
2525 # therefore aims to collapse the huge runqueue dependency tree into a smaller one
2526 # only containing the setscene functions.
2527
2528 rqdata.init_progress_reporter.next_stage()
2529
2530 # First process the chains up to the first setscene task.
2531 endpoints = {}
2532 for tid in rqdata.runtaskentries:
2533 sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps)
2534 sq_revdeps_squash[tid] = set()
2535 if (len(sq_revdeps[tid]) == 0) and tid not in rqdata.runq_setscene_tids:
2536 #bb.warn("Added endpoint %s" % (tid))
2537 endpoints[tid] = set()
2538
2539 rqdata.init_progress_reporter.next_stage()
2540
2541 # Secondly process the chains between setscene tasks.
2542 for tid in rqdata.runq_setscene_tids:
2543 sq_collated_deps[tid] = set()
2544 #bb.warn("Added endpoint 2 %s" % (tid))
2545 for dep in rqdata.runtaskentries[tid].depends:
2546 if tid in sq_revdeps[dep]:
2547 sq_revdeps[dep].remove(tid)
2548 if dep not in endpoints:
2549 endpoints[dep] = set()
2550 #bb.warn(" Added endpoint 3 %s" % (dep))
2551 endpoints[dep].add(tid)
2552
2553 rqdata.init_progress_reporter.next_stage()
2554
2555 def process_endpoints(endpoints):
2556 newendpoints = {}
2557 for point, task in endpoints.items():
2558 tasks = set()
2559 if task:
2560 tasks |= task
2561 if sq_revdeps_squash[point]:
2562 tasks |= sq_revdeps_squash[point]
2563 if point not in rqdata.runq_setscene_tids:
2564 for t in tasks:
2565 sq_collated_deps[t].add(point)
2566 sq_revdeps_squash[point] = set()
2567 if point in rqdata.runq_setscene_tids:
2568 sq_revdeps_squash[point] = tasks
2569 tasks = set()
2570 continue
2571 for dep in rqdata.runtaskentries[point].depends:
2572 if point in sq_revdeps[dep]:
2573 sq_revdeps[dep].remove(point)
2574 if tasks:
2575 sq_revdeps_squash[dep] |= tasks
2576 if len(sq_revdeps[dep]) == 0 and dep not in rqdata.runq_setscene_tids:
2577 newendpoints[dep] = task
2578 if len(newendpoints) != 0:
2579 process_endpoints(newendpoints)
2580
2581 process_endpoints(endpoints)
2582
2583 rqdata.init_progress_reporter.next_stage()
2584
Brad Bishop08902b02019-08-20 09:16:51 -04002585 # Build a list of tasks which are "unskippable"
2586 # These are direct endpoints referenced by the build upto and including setscene tasks
Brad Bishop96ff1982019-08-19 13:50:42 -04002587 # Take the build endpoints (no revdeps) and find the sstate tasks they depend upon
2588 new = True
2589 for tid in rqdata.runtaskentries:
2590 if len(rqdata.runtaskentries[tid].revdeps) == 0:
2591 sqdata.unskippable.add(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04002592 sqdata.unskippable |= sqrq.cantskip
Brad Bishop96ff1982019-08-19 13:50:42 -04002593 while new:
2594 new = False
Brad Bishop08902b02019-08-20 09:16:51 -04002595 orig = sqdata.unskippable.copy()
2596 for tid in sorted(orig, reverse=True):
Brad Bishop96ff1982019-08-19 13:50:42 -04002597 if tid in rqdata.runq_setscene_tids:
2598 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04002599 if len(rqdata.runtaskentries[tid].depends) == 0:
2600 # These are tasks which have no setscene tasks in their chain, need to mark as directly buildable
Brad Bishop96ff1982019-08-19 13:50:42 -04002601 sqrq.setbuildable(tid)
Brad Bishop96ff1982019-08-19 13:50:42 -04002602 sqdata.unskippable |= rqdata.runtaskentries[tid].depends
Brad Bishop08902b02019-08-20 09:16:51 -04002603 if sqdata.unskippable != orig:
2604 new = True
2605
2606 sqrq.tasks_scenequeue_done |= sqdata.unskippable.difference(rqdata.runq_setscene_tids)
Brad Bishop96ff1982019-08-19 13:50:42 -04002607
2608 rqdata.init_progress_reporter.next_stage(len(rqdata.runtaskentries))
2609
2610 # Sanity check all dependencies could be changed to setscene task references
2611 for taskcounter, tid in enumerate(rqdata.runtaskentries):
2612 if tid in rqdata.runq_setscene_tids:
2613 pass
2614 elif len(sq_revdeps_squash[tid]) != 0:
2615 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
2616 else:
2617 del sq_revdeps_squash[tid]
2618 rqdata.init_progress_reporter.update(taskcounter)
2619
2620 rqdata.init_progress_reporter.next_stage()
2621
2622 # Resolve setscene inter-task dependencies
2623 # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
2624 # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
2625 for tid in rqdata.runq_setscene_tids:
2626 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2627 realtid = tid + "_setscene"
2628 idepends = rqdata.taskData[mc].taskentries[realtid].idepends
2629 sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", rqdata.dataCaches[mc], taskfn, noextra=True)
2630 for (depname, idependtask) in idepends:
2631
2632 if depname not in rqdata.taskData[mc].build_targets:
2633 continue
2634
2635 depfn = rqdata.taskData[mc].build_targets[depname][0]
2636 if depfn is None:
2637 continue
2638 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2639 if deptid not in rqdata.runtaskentries:
2640 bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
2641
2642 if not deptid in sqdata.sq_harddeps:
2643 sqdata.sq_harddeps[deptid] = set()
2644 sqdata.sq_harddeps[deptid].add(tid)
2645
2646 sq_revdeps_squash[tid].add(deptid)
2647 # Have to zero this to avoid circular dependencies
2648 sq_revdeps_squash[deptid] = set()
2649
2650 rqdata.init_progress_reporter.next_stage()
2651
2652 for task in sqdata.sq_harddeps:
2653 for dep in sqdata.sq_harddeps[task]:
2654 sq_revdeps_squash[dep].add(task)
2655
2656 rqdata.init_progress_reporter.next_stage()
2657
2658 #for tid in sq_revdeps_squash:
2659 # data = ""
2660 # for dep in sq_revdeps_squash[tid]:
2661 # data = data + "\n %s" % dep
2662 # bb.warn("Task %s_setscene: is %s " % (tid, data))
2663
2664 sqdata.sq_revdeps = sq_revdeps_squash
Brad Bishop96ff1982019-08-19 13:50:42 -04002665 sqdata.sq_covered_tasks = sq_collated_deps
2666
2667 # Build reverse version of revdeps to populate deps structure
2668 for tid in sqdata.sq_revdeps:
2669 sqdata.sq_deps[tid] = set()
2670 for tid in sqdata.sq_revdeps:
2671 for dep in sqdata.sq_revdeps[tid]:
2672 sqdata.sq_deps[dep].add(tid)
2673
2674 rqdata.init_progress_reporter.next_stage()
2675
2676 multiconfigs = set()
2677 for tid in sqdata.sq_revdeps:
2678 multiconfigs.add(mc_from_tid(tid))
2679 if len(sqdata.sq_revdeps[tid]) == 0:
2680 sqrq.sq_buildable.add(tid)
2681
2682 rqdata.init_progress_reporter.finish()
2683
2684 if rq.hashvalidate:
2685 noexec = []
2686 stamppresent = []
2687 tocheck = set()
2688
Brad Bishop08902b02019-08-20 09:16:51 -04002689 for tid in sorted(sqdata.sq_revdeps):
Brad Bishop96ff1982019-08-19 13:50:42 -04002690 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2691
2692 taskdep = rqdata.dataCaches[mc].task_deps[taskfn]
2693
2694 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2695 noexec.append(tid)
2696 sqrq.sq_task_skip(tid)
2697 bb.build.make_stamp(taskname + "_setscene", rqdata.dataCaches[mc], taskfn)
2698 continue
2699
2700 if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache):
2701 logger.debug(2, 'Setscene stamp current for task %s', tid)
2702 stamppresent.append(tid)
2703 sqrq.sq_task_skip(tid)
2704 continue
2705
2706 if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache):
2707 logger.debug(2, 'Normal stamp current for task %s', tid)
2708 stamppresent.append(tid)
2709 sqrq.sq_task_skip(tid)
2710 continue
2711
2712 tocheck.add(tid)
2713
2714 valid = rq.validate_hashes(tocheck, cooker.data, len(stamppresent), False)
2715
2716 valid_new = stamppresent
2717 for v in valid:
2718 valid_new.append(v)
2719
2720 hashes = {}
2721 for mc in sorted(multiconfigs):
Brad Bishop08902b02019-08-20 09:16:51 -04002722 for tid in sorted(sqdata.sq_revdeps):
Brad Bishop96ff1982019-08-19 13:50:42 -04002723 if mc_from_tid(tid) != mc:
2724 continue
2725 if tid not in valid_new and tid not in noexec and tid not in sqrq.scenequeue_notcovered:
2726 sqdata.outrightfail.add(tid)
2727
2728 h = pending_hash_index(tid, rqdata)
2729 if h not in hashes:
2730 hashes[h] = tid
2731 else:
2732 sqrq.sq_deferred[tid] = hashes[h]
2733 bb.warn("Deferring %s after %s" % (tid, hashes[h]))
2734
2735
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002736class TaskFailure(Exception):
2737 """
2738 Exception raised when a task in a runqueue fails
2739 """
2740 def __init__(self, x):
2741 self.args = x
2742
2743
2744class runQueueExitWait(bb.event.Event):
2745 """
2746 Event when waiting for task processes to exit
2747 """
2748
2749 def __init__(self, remain):
2750 self.remain = remain
2751 self.message = "Waiting for %s active tasks to finish" % remain
2752 bb.event.Event.__init__(self)
2753
2754class runQueueEvent(bb.event.Event):
2755 """
2756 Base runQueue event class
2757 """
2758 def __init__(self, task, stats, rq):
2759 self.taskid = task
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002760 self.taskstring = task
2761 self.taskname = taskname_from_tid(task)
2762 self.taskfile = fn_from_tid(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002763 self.taskhash = rq.rqdata.get_task_hash(task)
2764 self.stats = stats.copy()
2765 bb.event.Event.__init__(self)
2766
2767class sceneQueueEvent(runQueueEvent):
2768 """
2769 Base sceneQueue event class
2770 """
2771 def __init__(self, task, stats, rq, noexec=False):
2772 runQueueEvent.__init__(self, task, stats, rq)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002773 self.taskstring = task + "_setscene"
2774 self.taskname = taskname_from_tid(task) + "_setscene"
2775 self.taskfile = fn_from_tid(task)
2776 self.taskhash = rq.rqdata.get_task_hash(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002777
2778class runQueueTaskStarted(runQueueEvent):
2779 """
2780 Event notifying a task was started
2781 """
2782 def __init__(self, task, stats, rq, noexec=False):
2783 runQueueEvent.__init__(self, task, stats, rq)
2784 self.noexec = noexec
2785
2786class sceneQueueTaskStarted(sceneQueueEvent):
2787 """
2788 Event notifying a setscene task was started
2789 """
2790 def __init__(self, task, stats, rq, noexec=False):
2791 sceneQueueEvent.__init__(self, task, stats, rq)
2792 self.noexec = noexec
2793
2794class runQueueTaskFailed(runQueueEvent):
2795 """
2796 Event notifying a task failed
2797 """
2798 def __init__(self, task, stats, exitcode, rq):
2799 runQueueEvent.__init__(self, task, stats, rq)
2800 self.exitcode = exitcode
2801
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002802 def __str__(self):
2803 return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
2804
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002805class sceneQueueTaskFailed(sceneQueueEvent):
2806 """
2807 Event notifying a setscene task failed
2808 """
2809 def __init__(self, task, stats, exitcode, rq):
2810 sceneQueueEvent.__init__(self, task, stats, rq)
2811 self.exitcode = exitcode
2812
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002813 def __str__(self):
2814 return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
2815
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002816class sceneQueueComplete(sceneQueueEvent):
2817 """
2818 Event when all the sceneQueue tasks are complete
2819 """
2820 def __init__(self, stats, rq):
2821 self.stats = stats.copy()
2822 bb.event.Event.__init__(self)
2823
2824class runQueueTaskCompleted(runQueueEvent):
2825 """
2826 Event notifying a task completed
2827 """
2828
2829class sceneQueueTaskCompleted(sceneQueueEvent):
2830 """
2831 Event notifying a setscene task completed
2832 """
2833
2834class runQueueTaskSkipped(runQueueEvent):
2835 """
2836 Event notifying a task was skipped
2837 """
2838 def __init__(self, task, stats, rq, reason):
2839 runQueueEvent.__init__(self, task, stats, rq)
2840 self.reason = reason
2841
Brad Bishop08902b02019-08-20 09:16:51 -04002842class taskUniHashUpdate(bb.event.Event):
2843 """
2844 Base runQueue event class
2845 """
2846 def __init__(self, task, unihash):
2847 self.taskid = task
2848 self.unihash = unihash
2849 bb.event.Event.__init__(self)
2850
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002851class runQueuePipe():
2852 """
2853 Abstraction for a pipe between a worker thread and the server
2854 """
2855 def __init__(self, pipein, pipeout, d, rq, rqexec):
2856 self.input = pipein
2857 if pipeout:
2858 pipeout.close()
2859 bb.utils.nonblockingfd(self.input)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002860 self.queue = b""
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002861 self.d = d
2862 self.rq = rq
2863 self.rqexec = rqexec
2864
2865 def setrunqueueexec(self, rqexec):
2866 self.rqexec = rqexec
2867
2868 def read(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002869 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
2870 for worker in workers.values():
2871 worker.process.poll()
2872 if worker.process.returncode is not None and not self.rq.teardown:
2873 bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
2874 self.rq.finish_runqueue(True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002875
2876 start = len(self.queue)
2877 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002878 self.queue = self.queue + (self.input.read(102400) or b"")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002879 except (OSError, IOError) as e:
2880 if e.errno != errno.EAGAIN:
2881 raise
2882 end = len(self.queue)
2883 found = True
2884 while found and len(self.queue):
2885 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002886 index = self.queue.find(b"</event>")
2887 while index != -1 and self.queue.startswith(b"<event>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002888 try:
2889 event = pickle.loads(self.queue[7:index])
2890 except ValueError as e:
2891 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
2892 bb.event.fire_from_worker(event, self.d)
Brad Bishop08902b02019-08-20 09:16:51 -04002893 if isinstance(event, taskUniHashUpdate):
2894 self.rqexec.updated_taskhash_queue.append((event.taskid, event.unihash))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002895 found = True
2896 self.queue = self.queue[index+8:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002897 index = self.queue.find(b"</event>")
2898 index = self.queue.find(b"</exitcode>")
2899 while index != -1 and self.queue.startswith(b"<exitcode>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002900 try:
2901 task, status = pickle.loads(self.queue[10:index])
2902 except ValueError as e:
2903 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
2904 self.rqexec.runqueue_process_waitpid(task, status)
2905 found = True
2906 self.queue = self.queue[index+11:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002907 index = self.queue.find(b"</exitcode>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002908 return (end > start)
2909
2910 def close(self):
2911 while self.read():
2912 continue
2913 if len(self.queue) > 0:
2914 print("Warning, worker left partial message: %s" % self.queue)
2915 self.input.close()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002916
2917def get_setscene_enforce_whitelist(d):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002918 if d.getVar('BB_SETSCENE_ENFORCE') != '1':
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002919 return None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002920 whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002921 outlist = []
2922 for item in whitelist[:]:
2923 if item.startswith('%:'):
2924 for target in sys.argv[1:]:
2925 if not target.startswith('-'):
2926 outlist.append(target.split(':')[0] + ':' + item.split(':')[1])
2927 else:
2928 outlist.append(item)
2929 return outlist
2930
2931def check_setscene_enforce_whitelist(pn, taskname, whitelist):
2932 import fnmatch
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002933 if whitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002934 item = '%s:%s' % (pn, taskname)
2935 for whitelist_item in whitelist:
2936 if fnmatch.fnmatch(item, whitelist_item):
2937 return True
2938 return False
2939 return True