blob: d9a67a31678c88f17c5b989d7d06fe916ec8048e [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001"""
2BitBake 'RunQueue' implementation
3
4Handles preparation and execution of a queue of tasks
5"""
6
7# Copyright (C) 2006-2007 Richard Purdie
8#
Brad Bishopc342db32019-05-15 21:57:59 -04009# SPDX-License-Identifier: GPL-2.0-only
Patrick Williamsc124f4f2015-09-15 14:41:29 -050010#
Patrick Williamsc124f4f2015-09-15 14:41:29 -050011
12import copy
13import os
14import sys
15import signal
16import stat
17import fcntl
18import errno
19import logging
20import re
21import bb
22from bb import msg, data, event
23from bb import monitordisk
24import subprocess
Patrick Williamsc0f7c042017-02-23 20:41:17 -060025import pickle
Brad Bishop6e60e8b2018-02-01 10:27:11 -050026from multiprocessing import Process
Brad Bishop19323692019-04-05 15:28:33 -040027import shlex
Brad Bishop96ff1982019-08-19 13:50:42 -040028import pprint
Patrick Williamsc124f4f2015-09-15 14:41:29 -050029
30bblogger = logging.getLogger("BitBake")
31logger = logging.getLogger("BitBake.RunQueue")
32
Brad Bishop19323692019-04-05 15:28:33 -040033__find_sha256__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{64}(?![a-z0-9])' )
Patrick Williamsc124f4f2015-09-15 14:41:29 -050034
Patrick Williamsc0f7c042017-02-23 20:41:17 -060035def fn_from_tid(tid):
36 return tid.rsplit(":", 1)[0]
37
38def taskname_from_tid(tid):
39 return tid.rsplit(":", 1)[1]
40
Andrew Geissler99467da2019-02-25 18:54:23 -060041def mc_from_tid(tid):
Brad Bishop15ae2502019-06-18 21:44:24 -040042 if tid.startswith('mc:'):
Andrew Geissler99467da2019-02-25 18:54:23 -060043 return tid.split(':')[1]
44 return ""
45
Patrick Williamsc0f7c042017-02-23 20:41:17 -060046def split_tid(tid):
47 (mc, fn, taskname, _) = split_tid_mcfn(tid)
48 return (mc, fn, taskname)
49
50def split_tid_mcfn(tid):
Brad Bishop15ae2502019-06-18 21:44:24 -040051 if tid.startswith('mc:'):
Patrick Williamsc0f7c042017-02-23 20:41:17 -060052 elems = tid.split(':')
53 mc = elems[1]
54 fn = ":".join(elems[2:-1])
55 taskname = elems[-1]
Brad Bishop15ae2502019-06-18 21:44:24 -040056 mcfn = "mc:" + mc + ":" + fn
Patrick Williamsc0f7c042017-02-23 20:41:17 -060057 else:
58 tid = tid.rsplit(":", 1)
59 mc = ""
60 fn = tid[0]
61 taskname = tid[1]
62 mcfn = fn
63
64 return (mc, fn, taskname, mcfn)
65
66def build_tid(mc, fn, taskname):
67 if mc:
Brad Bishop15ae2502019-06-18 21:44:24 -040068 return "mc:" + mc + ":" + fn + ":" + taskname
Patrick Williamsc0f7c042017-02-23 20:41:17 -060069 return fn + ":" + taskname
70
Brad Bishop96ff1982019-08-19 13:50:42 -040071# Index used to pair up potentially matching multiconfig tasks
72# We match on PN, taskname and hash being equal
73def pending_hash_index(tid, rqdata):
74 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
75 pn = rqdata.dataCaches[mc].pkg_fn[taskfn]
76 h = rqdata.runtaskentries[tid].hash
77 return pn + ":" + "taskname" + h
78
Patrick Williamsc124f4f2015-09-15 14:41:29 -050079class RunQueueStats:
80 """
81 Holds statistics on the tasks handled by the associated runQueue
82 """
83 def __init__(self, total):
84 self.completed = 0
85 self.skipped = 0
86 self.failed = 0
87 self.active = 0
88 self.total = total
89
90 def copy(self):
91 obj = self.__class__(self.total)
92 obj.__dict__.update(self.__dict__)
93 return obj
94
95 def taskFailed(self):
96 self.active = self.active - 1
97 self.failed = self.failed + 1
98
Brad Bishop1a4b7ee2018-12-16 17:11:34 -080099 def taskCompleted(self):
100 self.active = self.active - 1
101 self.completed = self.completed + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500102
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800103 def taskSkipped(self):
104 self.active = self.active + 1
105 self.skipped = self.skipped + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500106
107 def taskActive(self):
108 self.active = self.active + 1
109
110# These values indicate the next step due to be run in the
111# runQueue state machine
112runQueuePrepare = 2
113runQueueSceneInit = 3
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500114runQueueRunning = 6
115runQueueFailed = 7
116runQueueCleanUp = 8
117runQueueComplete = 9
118
119class RunQueueScheduler(object):
120 """
121 Control the order tasks are scheduled in.
122 """
123 name = "basic"
124
125 def __init__(self, runqueue, rqdata):
126 """
127 The default scheduler just returns the first buildable task (the
128 priority map is sorted by task number)
129 """
130 self.rq = runqueue
131 self.rqdata = rqdata
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600132 self.numTasks = len(self.rqdata.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500133
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600134 self.prio_map = [self.rqdata.runtaskentries.keys()]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500135
Brad Bishop08902b02019-08-20 09:16:51 -0400136 self.buildable = set()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800137 self.skip_maxthread = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500138 self.stamps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600139 for tid in self.rqdata.runtaskentries:
140 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
141 self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
142 if tid in self.rq.runq_buildable:
143 self.buildable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500144
145 self.rev_prio_map = None
146
147 def next_buildable_task(self):
148 """
149 Return the id of the first task we find that is buildable
150 """
Brad Bishop08902b02019-08-20 09:16:51 -0400151 buildable = set(self.buildable)
152 buildable.difference_update(self.rq.runq_running)
153 buildable.difference_update(self.rq.holdoff_tasks)
154 buildable.intersection_update(self.rq.tasks_covered | self.rq.tasks_notcovered)
Brad Bishop96ff1982019-08-19 13:50:42 -0400155 if not buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500156 return None
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800157
158 # Filter out tasks that have a max number of threads that have been exceeded
159 skip_buildable = {}
160 for running in self.rq.runq_running.difference(self.rq.runq_complete):
161 rtaskname = taskname_from_tid(running)
162 if rtaskname not in self.skip_maxthread:
163 self.skip_maxthread[rtaskname] = self.rq.cfgData.getVarFlag(rtaskname, "number_threads")
164 if not self.skip_maxthread[rtaskname]:
165 continue
166 if rtaskname in skip_buildable:
167 skip_buildable[rtaskname] += 1
168 else:
169 skip_buildable[rtaskname] = 1
170
Brad Bishop96ff1982019-08-19 13:50:42 -0400171 if len(buildable) == 1:
Brad Bishop08902b02019-08-20 09:16:51 -0400172 tid = buildable.pop()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800173 taskname = taskname_from_tid(tid)
174 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
175 return None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600176 stamp = self.stamps[tid]
177 if stamp not in self.rq.build_stamps.values():
178 return tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500179
180 if not self.rev_prio_map:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600181 self.rev_prio_map = {}
182 for tid in self.rqdata.runtaskentries:
183 self.rev_prio_map[tid] = self.prio_map.index(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500184
185 best = None
186 bestprio = None
Brad Bishop96ff1982019-08-19 13:50:42 -0400187 for tid in buildable:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800188 taskname = taskname_from_tid(tid)
189 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
190 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600191 prio = self.rev_prio_map[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500192 if bestprio is None or bestprio > prio:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600193 stamp = self.stamps[tid]
194 if stamp in self.rq.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500195 continue
196 bestprio = prio
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600197 best = tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500198
199 return best
200
201 def next(self):
202 """
203 Return the id of the task we should build next
204 """
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800205 if self.rq.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500206 return self.next_buildable_task()
207
Brad Bishop316dfdd2018-06-25 12:45:53 -0400208 def newbuildable(self, task):
Brad Bishop08902b02019-08-20 09:16:51 -0400209 self.buildable.add(task)
210
211 def removebuildable(self, task):
212 self.buildable.remove(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500213
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500214 def describe_task(self, taskid):
215 result = 'ID %s' % taskid
216 if self.rev_prio_map:
217 result = result + (' pri %d' % self.rev_prio_map[taskid])
218 return result
219
220 def dump_prio(self, comment):
221 bb.debug(3, '%s (most important first):\n%s' %
222 (comment,
223 '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
224 index, taskid in enumerate(self.prio_map)])))
225
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500226class RunQueueSchedulerSpeed(RunQueueScheduler):
227 """
228 A scheduler optimised for speed. The priority map is sorted by task weight,
229 heavier weighted tasks (tasks needed by the most other tasks) are run first.
230 """
231 name = "speed"
232
233 def __init__(self, runqueue, rqdata):
234 """
235 The priority map is sorted by task weight.
236 """
237 RunQueueScheduler.__init__(self, runqueue, rqdata)
238
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600239 weights = {}
240 for tid in self.rqdata.runtaskentries:
241 weight = self.rqdata.runtaskentries[tid].weight
242 if not weight in weights:
243 weights[weight] = []
244 weights[weight].append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500245
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600246 self.prio_map = []
247 for weight in sorted(weights):
248 for w in weights[weight]:
249 self.prio_map.append(w)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500250
251 self.prio_map.reverse()
252
253class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
254 """
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500255 A scheduler optimised to complete .bb files as quickly as possible. The
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500256 priority map is sorted by task weight, but then reordered so once a given
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500257 .bb file starts to build, it's completed as quickly as possible by
258 running all tasks related to the same .bb file one after the after.
259 This works well where disk space is at a premium and classes like OE's
260 rm_work are in force.
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500261 """
262 name = "completion"
263
264 def __init__(self, runqueue, rqdata):
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500265 super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500266
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500267 # Extract list of tasks for each recipe, with tasks sorted
268 # ascending from "must run first" (typically do_fetch) to
269 # "runs last" (do_build). The speed scheduler prioritizes
270 # tasks that must run first before the ones that run later;
271 # this is what we depend on here.
272 task_lists = {}
273 for taskid in self.prio_map:
274 fn, taskname = taskid.rsplit(':', 1)
275 task_lists.setdefault(fn, []).append(taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500276
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500277 # Now unify the different task lists. The strategy is that
278 # common tasks get skipped and new ones get inserted after the
279 # preceeding common one(s) as they are found. Because task
280 # lists should differ only by their number of tasks, but not
281 # the ordering of the common tasks, this should result in a
282 # deterministic result that is a superset of the individual
283 # task ordering.
284 all_tasks = []
285 for recipe, new_tasks in task_lists.items():
286 index = 0
287 old_task = all_tasks[index] if index < len(all_tasks) else None
288 for new_task in new_tasks:
289 if old_task == new_task:
290 # Common task, skip it. This is the fast-path which
291 # avoids a full search.
292 index += 1
293 old_task = all_tasks[index] if index < len(all_tasks) else None
294 else:
295 try:
296 index = all_tasks.index(new_task)
297 # Already present, just not at the current
298 # place. We re-synchronized by changing the
299 # index so that it matches again. Now
300 # move on to the next existing task.
301 index += 1
302 old_task = all_tasks[index] if index < len(all_tasks) else None
303 except ValueError:
304 # Not present. Insert before old_task, which
305 # remains the same (but gets shifted back).
306 all_tasks.insert(index, new_task)
307 index += 1
308 bb.debug(3, 'merged task list: %s' % all_tasks)
309
310 # Now reverse the order so that tasks that finish the work on one
311 # recipe are considered more imporant (= come first). The ordering
312 # is now so that do_build is most important.
313 all_tasks.reverse()
314
315 # Group tasks of the same kind before tasks of less important
316 # kinds at the head of the queue (because earlier = lower
317 # priority number = runs earlier), while preserving the
318 # ordering by recipe. If recipe foo is more important than
319 # bar, then the goal is to work on foo's do_populate_sysroot
320 # before bar's do_populate_sysroot and on the more important
321 # tasks of foo before any of the less important tasks in any
322 # other recipe (if those other recipes are more important than
323 # foo).
324 #
325 # All of this only applies when tasks are runable. Explicit
326 # dependencies still override this ordering by priority.
327 #
328 # Here's an example why this priority re-ordering helps with
329 # minimizing disk usage. Consider a recipe foo with a higher
330 # priority than bar where foo DEPENDS on bar. Then the
331 # implicit rule (from base.bbclass) is that foo's do_configure
332 # depends on bar's do_populate_sysroot. This ensures that
333 # bar's do_populate_sysroot gets done first. Normally the
334 # tasks from foo would continue to run once that is done, and
335 # bar only gets completed and cleaned up later. By ordering
336 # bar's task that depend on bar's do_populate_sysroot before foo's
337 # do_configure, that problem gets avoided.
338 task_index = 0
339 self.dump_prio('original priorities')
340 for task in all_tasks:
341 for index in range(task_index, self.numTasks):
342 taskid = self.prio_map[index]
343 taskname = taskid.rsplit(':', 1)[1]
344 if taskname == task:
345 del self.prio_map[index]
346 self.prio_map.insert(task_index, taskid)
347 task_index += 1
348 self.dump_prio('completion priorities')
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500349
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600350class RunTaskEntry(object):
351 def __init__(self):
352 self.depends = set()
353 self.revdeps = set()
354 self.hash = None
Brad Bishop19323692019-04-05 15:28:33 -0400355 self.unihash = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600356 self.task = None
357 self.weight = 1
358
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500359class RunQueueData:
360 """
361 BitBake Run Queue implementation
362 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600363 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500364 self.cooker = cooker
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600365 self.dataCaches = dataCaches
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500366 self.taskData = taskData
367 self.targets = targets
368 self.rq = rq
369 self.warn_multi_bb = False
370
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500371 self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
372 self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600373 self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
374 self.setscenewhitelist_checked = False
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500375 self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600376 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500377
378 self.reset()
379
380 def reset(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600381 self.runtaskentries = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500382
383 def runq_depends_names(self, ids):
384 import re
385 ret = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600386 for id in ids:
387 nam = os.path.basename(id)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500388 nam = re.sub("_[^,]*,", ",", nam)
389 ret.extend([nam])
390 return ret
391
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600392 def get_task_hash(self, tid):
393 return self.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500394
Brad Bishop19323692019-04-05 15:28:33 -0400395 def get_task_unihash(self, tid):
396 return self.runtaskentries[tid].unihash
397
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600398 def get_user_idstring(self, tid, task_name_suffix = ""):
399 return tid + task_name_suffix
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500400
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500401 def get_short_user_idstring(self, task, task_name_suffix = ""):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500402 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
403 pn = self.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600404 taskname = taskname_from_tid(task) + task_name_suffix
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500405 return "%s:%s" % (pn, taskname)
406
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500407 def circular_depchains_handler(self, tasks):
408 """
409 Some tasks aren't buildable, likely due to circular dependency issues.
410 Identify the circular dependencies and print them in a user readable format.
411 """
412 from copy import deepcopy
413
414 valid_chains = []
415 explored_deps = {}
416 msgs = []
417
Andrew Geissler99467da2019-02-25 18:54:23 -0600418 class TooManyLoops(Exception):
419 pass
420
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500421 def chain_reorder(chain):
422 """
423 Reorder a dependency chain so the lowest task id is first
424 """
425 lowest = 0
426 new_chain = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600427 for entry in range(len(chain)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500428 if chain[entry] < chain[lowest]:
429 lowest = entry
430 new_chain.extend(chain[lowest:])
431 new_chain.extend(chain[:lowest])
432 return new_chain
433
434 def chain_compare_equal(chain1, chain2):
435 """
436 Compare two dependency chains and see if they're the same
437 """
438 if len(chain1) != len(chain2):
439 return False
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600440 for index in range(len(chain1)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500441 if chain1[index] != chain2[index]:
442 return False
443 return True
444
445 def chain_array_contains(chain, chain_array):
446 """
447 Return True if chain_array contains chain
448 """
449 for ch in chain_array:
450 if chain_compare_equal(ch, chain):
451 return True
452 return False
453
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600454 def find_chains(tid, prev_chain):
455 prev_chain.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500456 total_deps = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600457 total_deps.extend(self.runtaskentries[tid].revdeps)
458 for revdep in self.runtaskentries[tid].revdeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500459 if revdep in prev_chain:
460 idx = prev_chain.index(revdep)
461 # To prevent duplicates, reorder the chain to start with the lowest taskid
462 # and search through an array of those we've already printed
463 chain = prev_chain[idx:]
464 new_chain = chain_reorder(chain)
465 if not chain_array_contains(new_chain, valid_chains):
466 valid_chains.append(new_chain)
467 msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
468 for dep in new_chain:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600469 msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500470 msgs.append("\n")
471 if len(valid_chains) > 10:
472 msgs.append("Aborted dependency loops search after 10 matches.\n")
Andrew Geissler99467da2019-02-25 18:54:23 -0600473 raise TooManyLoops
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500474 continue
475 scan = False
476 if revdep not in explored_deps:
477 scan = True
478 elif revdep in explored_deps[revdep]:
479 scan = True
480 else:
481 for dep in prev_chain:
482 if dep in explored_deps[revdep]:
483 scan = True
484 if scan:
485 find_chains(revdep, copy.deepcopy(prev_chain))
486 for dep in explored_deps[revdep]:
487 if dep not in total_deps:
488 total_deps.append(dep)
489
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600490 explored_deps[tid] = total_deps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500491
Andrew Geissler99467da2019-02-25 18:54:23 -0600492 try:
493 for task in tasks:
494 find_chains(task, [])
495 except TooManyLoops:
496 pass
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500497
498 return msgs
499
500 def calculate_task_weights(self, endpoints):
501 """
502 Calculate a number representing the "weight" of each task. Heavier weighted tasks
503 have more dependencies and hence should be executed sooner for maximum speed.
504
505 This function also sanity checks the task list finding tasks that are not
506 possible to execute due to circular dependencies.
507 """
508
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600509 numTasks = len(self.runtaskentries)
510 weight = {}
511 deps_left = {}
512 task_done = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500513
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600514 for tid in self.runtaskentries:
515 task_done[tid] = False
516 weight[tid] = 1
517 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500518
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600519 for tid in endpoints:
520 weight[tid] = 10
521 task_done[tid] = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500522
523 while True:
524 next_points = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600525 for tid in endpoints:
526 for revdep in self.runtaskentries[tid].depends:
527 weight[revdep] = weight[revdep] + weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500528 deps_left[revdep] = deps_left[revdep] - 1
529 if deps_left[revdep] == 0:
530 next_points.append(revdep)
531 task_done[revdep] = True
532 endpoints = next_points
533 if len(next_points) == 0:
534 break
535
536 # Circular dependency sanity check
537 problem_tasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600538 for tid in self.runtaskentries:
539 if task_done[tid] is False or deps_left[tid] != 0:
540 problem_tasks.append(tid)
541 logger.debug(2, "Task %s is not buildable", tid)
542 logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
543 self.runtaskentries[tid].weight = weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500544
545 if problem_tasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600546 message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500547 message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
548 message = message + "Identifying dependency loops (this may take a short while)...\n"
549 logger.error(message)
550
551 msgs = self.circular_depchains_handler(problem_tasks)
552
553 message = "\n"
554 for msg in msgs:
555 message = message + msg
556 bb.msg.fatal("RunQueue", message)
557
558 return weight
559
560 def prepare(self):
561 """
562 Turn a set of taskData into a RunQueue and compute data needed
563 to optimise the execution order.
564 """
565
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600566 runq_build = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500567 recursivetasks = {}
568 recursiveitasks = {}
569 recursivetasksselfref = set()
570
571 taskData = self.taskData
572
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600573 found = False
574 for mc in self.taskData:
575 if len(taskData[mc].taskentries) > 0:
576 found = True
577 break
578 if not found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500579 # Nothing to do
580 return 0
581
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600582 self.init_progress_reporter.start()
583 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500584
585 # Step A - Work out a list of tasks to run
586 #
587 # Taskdata gives us a list of possible providers for every build and run
588 # target ordered by priority. It also gives information on each of those
589 # providers.
590 #
591 # To create the actual list of tasks to execute we fix the list of
592 # providers and then resolve the dependencies into task IDs. This
593 # process is repeated for each type of dependency (tdepends, deptask,
594 # rdeptast, recrdeptask, idepends).
595
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600596 def add_build_dependencies(depids, tasknames, depends, mc):
597 for depname in depids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500598 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600599 if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500600 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600601 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500602 if depdata is None:
603 continue
604 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600605 t = depdata + ":" + taskname
606 if t in taskData[mc].taskentries:
607 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500608
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600609 def add_runtime_dependencies(depids, tasknames, depends, mc):
610 for depname in depids:
611 if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500612 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600613 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500614 if depdata is None:
615 continue
616 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600617 t = depdata + ":" + taskname
618 if t in taskData[mc].taskentries:
619 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500620
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800621 def add_mc_dependencies(mc, tid):
622 mcdeps = taskData[mc].get_mcdepends()
623 for dep in mcdeps:
624 mcdependency = dep.split(':')
625 pn = mcdependency[3]
626 frommc = mcdependency[1]
627 mcdep = mcdependency[2]
628 deptask = mcdependency[4]
629 if mc == frommc:
630 fn = taskData[mcdep].build_targets[pn][0]
631 newdep = '%s:%s' % (fn,deptask)
632 taskData[mc].taskentries[tid].tdepends.append(newdep)
633
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600634 for mc in taskData:
635 for tid in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500636
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600637 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
638 #runtid = build_tid(mc, fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500639
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600640 #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
641
642 depends = set()
643 task_deps = self.dataCaches[mc].task_deps[taskfn]
644
645 self.runtaskentries[tid] = RunTaskEntry()
646
647 if fn in taskData[mc].failed_fns:
648 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500649
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800650 # We add multiconfig dependencies before processing internal task deps (tdepends)
651 if 'mcdepends' in task_deps and taskname in task_deps['mcdepends']:
652 add_mc_dependencies(mc, tid)
653
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500654 # Resolve task internal dependencies
655 #
656 # e.g. addtask before X after Y
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600657 for t in taskData[mc].taskentries[tid].tdepends:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800658 (depmc, depfn, deptaskname, _) = split_tid_mcfn(t)
659 depends.add(build_tid(depmc, depfn, deptaskname))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500660
661 # Resolve 'deptask' dependencies
662 #
663 # e.g. do_sometask[deptask] = "do_someothertask"
664 # (makes sure sometask runs after someothertask of all DEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600665 if 'deptask' in task_deps and taskname in task_deps['deptask']:
666 tasknames = task_deps['deptask'][taskname].split()
667 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500668
669 # Resolve 'rdeptask' dependencies
670 #
671 # e.g. do_sometask[rdeptask] = "do_someothertask"
672 # (makes sure sometask runs after someothertask of all RDEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600673 if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
674 tasknames = task_deps['rdeptask'][taskname].split()
675 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500676
677 # Resolve inter-task dependencies
678 #
679 # e.g. do_sometask[depends] = "targetname:do_someothertask"
680 # (makes sure sometask runs after targetname's someothertask)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600681 idepends = taskData[mc].taskentries[tid].idepends
682 for (depname, idependtask) in idepends:
683 if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500684 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600685 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500686 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600687 t = depdata + ":" + idependtask
688 depends.add(t)
689 if t not in taskData[mc].taskentries:
690 bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
691 irdepends = taskData[mc].taskentries[tid].irdepends
692 for (depname, idependtask) in irdepends:
693 if depname in taskData[mc].run_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500694 # Won't be in run_targets if ASSUME_PROVIDED
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500695 if not taskData[mc].run_targets[depname]:
696 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600697 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500698 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600699 t = depdata + ":" + idependtask
700 depends.add(t)
701 if t not in taskData[mc].taskentries:
702 bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500703
704 # Resolve recursive 'recrdeptask' dependencies (Part A)
705 #
706 # e.g. do_sometask[recrdeptask] = "do_someothertask"
707 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
708 # We cover the recursive part of the dependencies below
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600709 if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
710 tasknames = task_deps['recrdeptask'][taskname].split()
711 recursivetasks[tid] = tasknames
712 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
713 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
714 if taskname in tasknames:
715 recursivetasksselfref.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500716
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600717 if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
718 recursiveitasks[tid] = []
719 for t in task_deps['recideptask'][taskname].split():
720 newdep = build_tid(mc, fn, t)
721 recursiveitasks[tid].append(newdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500722
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600723 self.runtaskentries[tid].depends = depends
Brad Bishop316dfdd2018-06-25 12:45:53 -0400724 # Remove all self references
725 self.runtaskentries[tid].depends.discard(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500726
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600727 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500728
Brad Bishop316dfdd2018-06-25 12:45:53 -0400729 self.init_progress_reporter.next_stage()
730
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500731 # Resolve recursive 'recrdeptask' dependencies (Part B)
732 #
733 # e.g. do_sometask[recrdeptask] = "do_someothertask"
734 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600735 # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600736
Brad Bishop316dfdd2018-06-25 12:45:53 -0400737 # Generating/interating recursive lists of dependencies is painful and potentially slow
738 # Precompute recursive task dependencies here by:
739 # a) create a temp list of reverse dependencies (revdeps)
740 # b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0)
741 # c) combine the total list of dependencies in cumulativedeps
742 # d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500743
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500744
Brad Bishop316dfdd2018-06-25 12:45:53 -0400745 revdeps = {}
746 deps = {}
747 cumulativedeps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600748 for tid in self.runtaskentries:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400749 deps[tid] = set(self.runtaskentries[tid].depends)
750 revdeps[tid] = set()
751 cumulativedeps[tid] = set()
752 # Generate a temp list of reverse dependencies
753 for tid in self.runtaskentries:
754 for dep in self.runtaskentries[tid].depends:
755 revdeps[dep].add(tid)
756 # Find the dependency chain endpoints
757 endpoints = set()
758 for tid in self.runtaskentries:
759 if len(deps[tid]) == 0:
760 endpoints.add(tid)
761 # Iterate the chains collating dependencies
762 while endpoints:
763 next = set()
764 for tid in endpoints:
765 for dep in revdeps[tid]:
766 cumulativedeps[dep].add(fn_from_tid(tid))
767 cumulativedeps[dep].update(cumulativedeps[tid])
768 if tid in deps[dep]:
769 deps[dep].remove(tid)
770 if len(deps[dep]) == 0:
771 next.add(dep)
772 endpoints = next
773 #for tid in deps:
774 # if len(deps[tid]) != 0:
775 # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
776
777 # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
778 # resolve these recursively until we aren't adding any further extra dependencies
779 extradeps = True
780 while extradeps:
781 extradeps = 0
782 for tid in recursivetasks:
783 tasknames = recursivetasks[tid]
784
785 totaldeps = set(self.runtaskentries[tid].depends)
786 if tid in recursiveitasks:
787 totaldeps.update(recursiveitasks[tid])
788 for dep in recursiveitasks[tid]:
789 if dep not in self.runtaskentries:
790 continue
791 totaldeps.update(self.runtaskentries[dep].depends)
792
793 deps = set()
794 for dep in totaldeps:
795 if dep in cumulativedeps:
796 deps.update(cumulativedeps[dep])
797
798 for t in deps:
799 for taskname in tasknames:
800 newtid = t + ":" + taskname
801 if newtid == tid:
802 continue
803 if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
804 extradeps += 1
805 self.runtaskentries[tid].depends.add(newtid)
806
807 # Handle recursive tasks which depend upon other recursive tasks
808 deps = set()
809 for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
810 deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
811 for newtid in deps:
812 for taskname in tasknames:
813 if not newtid.endswith(":" + taskname):
814 continue
815 if newtid in self.runtaskentries:
816 extradeps += 1
817 self.runtaskentries[tid].depends.add(newtid)
818
819 bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps)
820
821 # Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work
822 for tid in recursivetasksselfref:
823 self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600824
825 self.init_progress_reporter.next_stage()
826
827 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500828
829 # Step B - Mark all active tasks
830 #
831 # Start with the tasks we were asked to run and mark all dependencies
832 # as active too. If the task is to be 'forced', clear its stamp. Once
833 # all active tasks are marked, prune the ones we don't need.
834
835 logger.verbose("Marking Active Tasks")
836
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600837 def mark_active(tid, depth):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500838 """
839 Mark an item as active along with its depends
840 (calls itself recursively)
841 """
842
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600843 if tid in runq_build:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500844 return
845
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600846 runq_build[tid] = 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500847
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600848 depends = self.runtaskentries[tid].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500849 for depend in depends:
850 mark_active(depend, depth+1)
851
Brad Bishop79641f22019-09-10 07:20:22 -0400852 def invalidate_task(tid, error_nostamp):
853 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
854 taskdep = self.dataCaches[mc].task_deps[taskfn]
855 if fn + ":" + taskname not in taskData[mc].taskentries:
856 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
857 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
858 if error_nostamp:
859 bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
860 else:
861 bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
862 else:
863 logger.verbose("Invalidate task %s, %s", taskname, fn)
864 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn)
865
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600866 self.target_tids = []
867 for (mc, target, task, fn) in self.targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500868
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600869 if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500870 continue
871
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600872 if target in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500873 continue
874
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500875 parents = False
876 if task.endswith('-'):
877 parents = True
878 task = task[:-1]
879
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600880 if fn in taskData[mc].failed_fns:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500881 continue
882
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600883 # fn already has mc prefix
884 tid = fn + ":" + task
885 self.target_tids.append(tid)
886 if tid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500887 import difflib
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600888 tasks = []
889 for x in taskData[mc].taskentries:
890 if x.startswith(fn + ":"):
891 tasks.append(taskname_from_tid(x))
892 close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500893 if close_matches:
894 extra = ". Close matches:\n %s" % "\n ".join(close_matches)
895 else:
896 extra = ""
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600897 bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
898
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500899 # For tasks called "XXXX-", ony run their dependencies
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500900 if parents:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600901 for i in self.runtaskentries[tid].depends:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500902 mark_active(i, 1)
903 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600904 mark_active(tid, 1)
905
906 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500907
908 # Step C - Prune all inactive tasks
909 #
910 # Once all active tasks are marked, prune the ones we don't need.
911
Brad Bishop316dfdd2018-06-25 12:45:53 -0400912 delcount = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600913 for tid in list(self.runtaskentries.keys()):
914 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400915 delcount[tid] = self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600916 del self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600917
Brad Bishop316dfdd2018-06-25 12:45:53 -0400918 # Handle --runall
919 if self.cooker.configuration.runall:
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500920 # re-run the mark_active and then drop unused tasks from new list
921 runq_build = {}
Brad Bishop316dfdd2018-06-25 12:45:53 -0400922
923 for task in self.cooker.configuration.runall:
924 runall_tids = set()
925 for tid in list(self.runtaskentries):
926 wanttid = fn_from_tid(tid) + ":do_%s" % task
927 if wanttid in delcount:
928 self.runtaskentries[wanttid] = delcount[wanttid]
929 if wanttid in self.runtaskentries:
930 runall_tids.add(wanttid)
931
932 for tid in list(runall_tids):
933 mark_active(tid,1)
Brad Bishop79641f22019-09-10 07:20:22 -0400934 if self.cooker.configuration.force:
935 invalidate_task(tid, False)
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500936
937 for tid in list(self.runtaskentries.keys()):
938 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400939 delcount[tid] = self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500940 del self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500941
942 if len(self.runtaskentries) == 0:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400943 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
944
945 self.init_progress_reporter.next_stage()
946
947 # Handle runonly
948 if self.cooker.configuration.runonly:
949 # re-run the mark_active and then drop unused tasks from new list
950 runq_build = {}
951
952 for task in self.cooker.configuration.runonly:
953 runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == "do_%s" % task }
954
955 for tid in list(runonly_tids):
956 mark_active(tid,1)
Brad Bishop79641f22019-09-10 07:20:22 -0400957 if self.cooker.configuration.force:
958 invalidate_task(tid, False)
Brad Bishop316dfdd2018-06-25 12:45:53 -0400959
960 for tid in list(self.runtaskentries.keys()):
961 if tid not in runq_build:
962 delcount[tid] = self.runtaskentries[tid]
963 del self.runtaskentries[tid]
964
965 if len(self.runtaskentries) == 0:
966 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500967
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500968 #
969 # Step D - Sanity checks and computation
970 #
971
972 # Check to make sure we still have tasks to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600973 if len(self.runtaskentries) == 0:
974 if not taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500975 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
976 else:
977 bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
978
Brad Bishop316dfdd2018-06-25 12:45:53 -0400979 logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500980
981 logger.verbose("Assign Weightings")
982
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600983 self.init_progress_reporter.next_stage()
984
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500985 # Generate a list of reverse dependencies to ease future calculations
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600986 for tid in self.runtaskentries:
987 for dep in self.runtaskentries[tid].depends:
988 self.runtaskentries[dep].revdeps.add(tid)
989
990 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500991
992 # Identify tasks at the end of dependency chains
993 # Error on circular dependency loops (length two)
994 endpoints = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600995 for tid in self.runtaskentries:
996 revdeps = self.runtaskentries[tid].revdeps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500997 if len(revdeps) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600998 endpoints.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500999 for dep in revdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001000 if dep in self.runtaskentries[tid].depends:
1001 bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
1002
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001003
1004 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
1005
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001006 self.init_progress_reporter.next_stage()
1007
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001008 # Calculate task weights
1009 # Check of higher length circular dependencies
1010 self.runq_weight = self.calculate_task_weights(endpoints)
1011
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001012 self.init_progress_reporter.next_stage()
1013
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001014 # Sanity Check - Check for multiple tasks building the same provider
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001015 for mc in self.dataCaches:
1016 prov_list = {}
1017 seen_fn = []
1018 for tid in self.runtaskentries:
1019 (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1020 if taskfn in seen_fn:
1021 continue
1022 if mc != tidmc:
1023 continue
1024 seen_fn.append(taskfn)
1025 for prov in self.dataCaches[mc].fn_provides[taskfn]:
1026 if prov not in prov_list:
1027 prov_list[prov] = [taskfn]
1028 elif taskfn not in prov_list[prov]:
1029 prov_list[prov].append(taskfn)
1030 for prov in prov_list:
1031 if len(prov_list[prov]) < 2:
1032 continue
1033 if prov in self.multi_provider_whitelist:
1034 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001035 seen_pn = []
1036 # If two versions of the same PN are being built its fatal, we don't support it.
1037 for fn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001038 pn = self.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001039 if pn not in seen_pn:
1040 seen_pn.append(pn)
1041 else:
1042 bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001043 msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
1044 #
1045 # Construct a list of things which uniquely depend on each provider
1046 # since this may help the user figure out which dependency is triggering this warning
1047 #
1048 msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
1049 deplist = {}
1050 commondeps = None
1051 for provfn in prov_list[prov]:
1052 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001053 for tid in self.runtaskentries:
1054 fn = fn_from_tid(tid)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001055 if fn != provfn:
1056 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001057 for dep in self.runtaskentries[tid].revdeps:
1058 fn = fn_from_tid(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001059 if fn == provfn:
1060 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001061 deps.add(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001062 if not commondeps:
1063 commondeps = set(deps)
1064 else:
1065 commondeps &= deps
1066 deplist[provfn] = deps
1067 for provfn in deplist:
1068 msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
1069 #
1070 # Construct a list of provides and runtime providers for each recipe
1071 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
1072 #
1073 msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
1074 provide_results = {}
1075 rprovide_results = {}
1076 commonprovs = None
1077 commonrprovs = None
1078 for provfn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001079 provides = set(self.dataCaches[mc].fn_provides[provfn])
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001080 rprovides = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001081 for rprovide in self.dataCaches[mc].rproviders:
1082 if provfn in self.dataCaches[mc].rproviders[rprovide]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001083 rprovides.add(rprovide)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001084 for package in self.dataCaches[mc].packages:
1085 if provfn in self.dataCaches[mc].packages[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001086 rprovides.add(package)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001087 for package in self.dataCaches[mc].packages_dynamic:
1088 if provfn in self.dataCaches[mc].packages_dynamic[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001089 rprovides.add(package)
1090 if not commonprovs:
1091 commonprovs = set(provides)
1092 else:
1093 commonprovs &= provides
1094 provide_results[provfn] = provides
1095 if not commonrprovs:
1096 commonrprovs = set(rprovides)
1097 else:
1098 commonrprovs &= rprovides
1099 rprovide_results[provfn] = rprovides
1100 #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
1101 #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
1102 for provfn in prov_list[prov]:
1103 msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
1104 msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
1105
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001106 if self.warn_multi_bb:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001107 logger.verbnote(msg)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001108 else:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001109 logger.error(msg)
1110
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001111 self.init_progress_reporter.next_stage()
1112
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001113 # Create a whitelist usable by the stamp checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001114 self.stampfnwhitelist = {}
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001115 for mc in self.taskData:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001116 self.stampfnwhitelist[mc] = []
1117 for entry in self.stampwhitelist.split():
1118 if entry not in self.taskData[mc].build_targets:
1119 continue
1120 fn = self.taskData.build_targets[entry][0]
1121 self.stampfnwhitelist[mc].append(fn)
1122
1123 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001124
1125 # Iterate over the task list looking for tasks with a 'setscene' function
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001126 self.runq_setscene_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001127 if not self.cooker.configuration.nosetscene:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001128 for tid in self.runtaskentries:
1129 (mc, fn, taskname, _) = split_tid_mcfn(tid)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001130 setscenetid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001131 if setscenetid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001132 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001133 self.runq_setscene_tids.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001134
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001135 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001136
1137 # Invalidate task if force mode active
1138 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001139 for tid in self.target_tids:
1140 invalidate_task(tid, False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001141
1142 # Invalidate task if invalidate mode active
1143 if self.cooker.configuration.invalidate_stamp:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001144 for tid in self.target_tids:
1145 fn = fn_from_tid(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001146 for st in self.cooker.configuration.invalidate_stamp.split(','):
1147 if not st.startswith("do_"):
1148 st = "do_%s" % st
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001149 invalidate_task(fn + ":" + st, True)
1150
1151 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001152
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001153 # Create and print to the logs a virtual/xxxx -> PN (fn) table
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001154 for mc in taskData:
1155 virtmap = taskData[mc].get_providermap(prefix="virtual/")
1156 virtpnmap = {}
1157 for v in virtmap:
1158 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
1159 bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
1160 if hasattr(bb.parse.siggen, "tasks_resolved"):
1161 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1162
1163 self.init_progress_reporter.next_stage()
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001164
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001165 # Iterate over the task list and call into the siggen code
1166 dealtwith = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001167 todeal = set(self.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001168 while len(todeal) > 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001169 for tid in todeal.copy():
1170 if len(self.runtaskentries[tid].depends - dealtwith) == 0:
1171 dealtwith.add(tid)
1172 todeal.remove(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001173 self.prepare_task_hash(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001174
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001175 bb.parse.siggen.writeout_file_checksum_cache()
Brad Bishopa34c0302019-09-23 22:34:48 -04001176 bb.parse.siggen.set_setscene_tasks(self.runq_setscene_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001177
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001178 #self.dump_data()
1179 return len(self.runtaskentries)
1180
Brad Bishop19323692019-04-05 15:28:33 -04001181 def prepare_task_hash(self, tid):
1182 procdep = []
1183 for dep in self.runtaskentries[tid].depends:
Brad Bishop08902b02019-08-20 09:16:51 -04001184 procdep.append(dep)
1185 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, procdep, self.dataCaches[mc_from_tid(tid)])
1186 self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001187
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001188 def dump_data(self):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001189 """
1190 Dump some debug information on the internal data structures
1191 """
1192 logger.debug(3, "run_tasks:")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001193 for tid in self.runtaskentries:
1194 logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
1195 self.runtaskentries[tid].weight,
1196 self.runtaskentries[tid].depends,
1197 self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001198
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001199class RunQueueWorker():
1200 def __init__(self, process, pipe):
1201 self.process = process
1202 self.pipe = pipe
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001203
1204class RunQueue:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001205 def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001206
1207 self.cooker = cooker
1208 self.cfgData = cfgData
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001209 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001210
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001211 self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
1212 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001213 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001214
1215 self.state = runQueuePrepare
1216
1217 # For disk space monitor
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001218 # Invoked at regular time intervals via the bitbake heartbeat event
1219 # while the build is running. We generate a unique name for the handler
1220 # here, just in case that there ever is more than one RunQueue instance,
Brad Bishop96ff1982019-08-19 13:50:42 -04001221 # start the handler when reaching runQueueSceneInit, and stop it when
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001222 # done with the build.
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001223 self.dm = monitordisk.diskMonitor(cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001224 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
1225 self.dm_event_handler_registered = False
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001226 self.rqexe = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001227 self.worker = {}
1228 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001229
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001230 def _start_worker(self, mc, fakeroot = False, rqexec = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001231 logger.debug(1, "Starting bitbake-worker")
1232 magic = "decafbad"
1233 if self.cooker.configuration.profile:
1234 magic = "decafbadbad"
1235 if fakeroot:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001236 magic = magic + "beef"
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001237 mcdata = self.cooker.databuilder.mcdata[mc]
Brad Bishop19323692019-04-05 15:28:33 -04001238 fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD"))
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001239 fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001240 env = os.environ.copy()
1241 for key, value in (var.split('=') for var in fakerootenv):
1242 env[key] = value
Brad Bishop19323692019-04-05 15:28:33 -04001243 worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001244 else:
1245 worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
1246 bb.utils.nonblockingfd(worker.stdout)
1247 workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
1248
1249 workerdata = {
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001250 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1251 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1252 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1253 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001254 "sigdata" : bb.parse.siggen.get_taskdata(),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001255 "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
1256 "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
1257 "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
1258 "logdefaultdomain" : bb.msg.loggerDefaultDomains,
1259 "prhost" : self.cooker.prhost,
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001260 "buildname" : self.cfgData.getVar("BUILDNAME"),
1261 "date" : self.cfgData.getVar("DATE"),
1262 "time" : self.cfgData.getVar("TIME"),
Brad Bishopa34c0302019-09-23 22:34:48 -04001263 "hashservaddr" : self.cooker.hashservaddr,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001264 }
1265
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001266 worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001267 worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001268 worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001269 worker.stdin.flush()
1270
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001271 return RunQueueWorker(worker, workerpipe)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001272
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001273 def _teardown_worker(self, worker):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001274 if not worker:
1275 return
1276 logger.debug(1, "Teardown for bitbake-worker")
1277 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001278 worker.process.stdin.write(b"<quit></quit>")
1279 worker.process.stdin.flush()
1280 worker.process.stdin.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001281 except IOError:
1282 pass
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001283 while worker.process.returncode is None:
1284 worker.pipe.read()
1285 worker.process.poll()
1286 while worker.pipe.read():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001287 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001288 worker.pipe.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001289
1290 def start_worker(self):
1291 if self.worker:
1292 self.teardown_workers()
1293 self.teardown = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001294 for mc in self.rqdata.dataCaches:
1295 self.worker[mc] = self._start_worker(mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001296
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001297 def start_fakeworker(self, rqexec, mc):
1298 if not mc in self.fakeworker:
1299 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001300
1301 def teardown_workers(self):
1302 self.teardown = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001303 for mc in self.worker:
1304 self._teardown_worker(self.worker[mc])
1305 self.worker = {}
1306 for mc in self.fakeworker:
1307 self._teardown_worker(self.fakeworker[mc])
1308 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001309
1310 def read_workers(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001311 for mc in self.worker:
1312 self.worker[mc].pipe.read()
1313 for mc in self.fakeworker:
1314 self.fakeworker[mc].pipe.read()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001315
1316 def active_fds(self):
1317 fds = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001318 for mc in self.worker:
1319 fds.append(self.worker[mc].pipe.input)
1320 for mc in self.fakeworker:
1321 fds.append(self.fakeworker[mc].pipe.input)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001322 return fds
1323
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001324 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001325 def get_timestamp(f):
1326 try:
1327 if not os.access(f, os.F_OK):
1328 return None
1329 return os.stat(f)[stat.ST_MTIME]
1330 except:
1331 return None
1332
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001333 (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
1334 if taskname is None:
1335 taskname = tn
1336
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001337 if self.stamppolicy == "perfile":
1338 fulldeptree = False
1339 else:
1340 fulldeptree = True
1341 stampwhitelist = []
1342 if self.stamppolicy == "whitelist":
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001343 stampwhitelist = self.rqdata.stampfnwhitelist[mc]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001344
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001345 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001346
1347 # If the stamp is missing, it's not current
1348 if not os.access(stampfile, os.F_OK):
1349 logger.debug(2, "Stampfile %s not available", stampfile)
1350 return False
1351 # If it's a 'nostamp' task, it's not current
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001352 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001353 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1354 logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
1355 return False
1356
1357 if taskname != "do_setscene" and taskname.endswith("_setscene"):
1358 return True
1359
1360 if cache is None:
1361 cache = {}
1362
1363 iscurrent = True
1364 t1 = get_timestamp(stampfile)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001365 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001366 if iscurrent:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001367 (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
1368 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1369 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001370 t2 = get_timestamp(stampfile2)
1371 t3 = get_timestamp(stampfile3)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001372 if t3 and not t2:
1373 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001374 if t3 and t3 > t2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001375 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001376 if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
1377 if not t2:
1378 logger.debug(2, 'Stampfile %s does not exist', stampfile2)
1379 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001380 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001381 if t1 < t2:
1382 logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
1383 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001384 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001385 if recurse and iscurrent:
1386 if dep in cache:
1387 iscurrent = cache[dep]
1388 if not iscurrent:
1389 logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
1390 else:
1391 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1392 cache[dep] = iscurrent
1393 if recurse:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001394 cache[tid] = iscurrent
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001395 return iscurrent
1396
Brad Bishopa34c0302019-09-23 22:34:48 -04001397 def validate_hashes(self, tocheck, data, currentcount=0, siginfo=False):
Brad Bishop96ff1982019-08-19 13:50:42 -04001398 valid = set()
1399 if self.hashvalidate:
Brad Bishop08902b02019-08-20 09:16:51 -04001400 sq_data = {}
1401 sq_data['hash'] = {}
1402 sq_data['hashfn'] = {}
1403 sq_data['unihash'] = {}
Brad Bishop96ff1982019-08-19 13:50:42 -04001404 for tid in tocheck:
1405 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04001406 sq_data['hash'][tid] = self.rqdata.runtaskentries[tid].hash
1407 sq_data['hashfn'][tid] = self.rqdata.dataCaches[mc].hashfn[taskfn]
1408 sq_data['unihash'][tid] = self.rqdata.runtaskentries[tid].unihash
Brad Bishop96ff1982019-08-19 13:50:42 -04001409
Brad Bishop08902b02019-08-20 09:16:51 -04001410 valid = self.validate_hash(sq_data, data, siginfo, currentcount)
Brad Bishop96ff1982019-08-19 13:50:42 -04001411
1412 return valid
1413
Brad Bishop08902b02019-08-20 09:16:51 -04001414 def validate_hash(self, sq_data, d, siginfo, currentcount):
1415 locs = {"sq_data" : sq_data, "d" : d, "siginfo" : siginfo, "currentcount" : currentcount}
Brad Bishop19323692019-04-05 15:28:33 -04001416
Brad Bishop08902b02019-08-20 09:16:51 -04001417 # Metadata has **kwargs so args can be added, sq_data can also gain new fields
1418 call = self.hashvalidate + "(sq_data, d, siginfo=siginfo, currentcount=currentcount)"
Brad Bishop19323692019-04-05 15:28:33 -04001419
Brad Bishop19323692019-04-05 15:28:33 -04001420 return bb.utils.better_eval(call, locs)
1421
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001422 def _execute_runqueue(self):
1423 """
1424 Run the tasks in a queue prepared by rqdata.prepare()
1425 Upon failure, optionally try to recover the build using any alternate providers
1426 (if the abort on failure configuration option isn't set)
1427 """
1428
1429 retval = True
1430
1431 if self.state is runQueuePrepare:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001432 # NOTE: if you add, remove or significantly refactor the stages of this
1433 # process then you should recalculate the weightings here. This is quite
1434 # easy to do - just change the next line temporarily to pass debug=True as
1435 # the last parameter and you'll get a printout of the weightings as well
1436 # as a map to the lines where next_stage() was called. Of course this isn't
1437 # critical, but it helps to keep the progress reporting accurate.
1438 self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
1439 "Initialising tasks",
1440 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001441 if self.rqdata.prepare() == 0:
1442 self.state = runQueueComplete
1443 else:
1444 self.state = runQueueSceneInit
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001445
1446 if self.state is runQueueSceneInit:
Brad Bishop96ff1982019-08-19 13:50:42 -04001447 self.rqdata.init_progress_reporter.next_stage()
1448
1449 # we are ready to run, emit dependency info to any UI or class which
1450 # needs it
1451 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1452 self.rqdata.init_progress_reporter.next_stage()
1453 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
1454
Brad Bishope2d5b612018-11-23 10:55:50 +13001455 if not self.dm_event_handler_registered:
1456 res = bb.event.register(self.dm_event_handler_name,
Brad Bishop96ff1982019-08-19 13:50:42 -04001457 lambda x: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False,
Brad Bishope2d5b612018-11-23 10:55:50 +13001458 ('bb.event.HeartbeatEvent',))
1459 self.dm_event_handler_registered = True
1460
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001461 dump = self.cooker.configuration.dump_signatures
1462 if dump:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001463 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001464 if 'printdiff' in dump:
1465 invalidtasks = self.print_diffscenetasks()
1466 self.dump_signatures(dump)
1467 if 'printdiff' in dump:
1468 self.write_diffscenetasks(invalidtasks)
1469 self.state = runQueueComplete
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001470
Brad Bishop96ff1982019-08-19 13:50:42 -04001471 if self.state is runQueueSceneInit:
1472 self.rqdata.init_progress_reporter.next_stage()
1473 self.start_worker()
1474 self.rqdata.init_progress_reporter.next_stage()
1475 self.rqexe = RunQueueExecute(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001476
Brad Bishop96ff1982019-08-19 13:50:42 -04001477 # If we don't have any setscene functions, skip execution
1478 if len(self.rqdata.runq_setscene_tids) == 0:
1479 logger.info('No setscene tasks')
1480 for tid in self.rqdata.runtaskentries:
1481 if len(self.rqdata.runtaskentries[tid].depends) == 0:
1482 self.rqexe.setbuildable(tid)
1483 self.rqexe.tasks_notcovered.add(tid)
1484 self.rqexe.sqdone = True
1485 logger.info('Executing Tasks')
1486 self.state = runQueueRunning
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001487
1488 if self.state is runQueueRunning:
1489 retval = self.rqexe.execute()
1490
1491 if self.state is runQueueCleanUp:
1492 retval = self.rqexe.finish()
1493
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001494 build_done = self.state is runQueueComplete or self.state is runQueueFailed
1495
1496 if build_done and self.dm_event_handler_registered:
1497 bb.event.remove(self.dm_event_handler_name, None)
1498 self.dm_event_handler_registered = False
1499
1500 if build_done and self.rqexe:
Brad Bishop08902b02019-08-20 09:16:51 -04001501 bb.parse.siggen.save_unitaskhashes()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001502 self.teardown_workers()
Brad Bishop96ff1982019-08-19 13:50:42 -04001503 if self.rqexe:
1504 if self.rqexe.stats.failed:
1505 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
1506 else:
1507 # Let's avoid the word "failed" if nothing actually did
1508 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001509
1510 if self.state is runQueueFailed:
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001511 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001512
1513 if self.state is runQueueComplete:
1514 # All done
1515 return False
1516
1517 # Loop
1518 return retval
1519
1520 def execute_runqueue(self):
1521 # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
1522 try:
1523 return self._execute_runqueue()
1524 except bb.runqueue.TaskFailure:
1525 raise
1526 except SystemExit:
1527 raise
1528 except bb.BBHandledException:
1529 try:
1530 self.teardown_workers()
1531 except:
1532 pass
1533 self.state = runQueueComplete
1534 raise
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001535 except Exception as err:
1536 logger.exception("An uncaught exception occurred in runqueue")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001537 try:
1538 self.teardown_workers()
1539 except:
1540 pass
1541 self.state = runQueueComplete
1542 raise
1543
1544 def finish_runqueue(self, now = False):
1545 if not self.rqexe:
1546 self.state = runQueueComplete
1547 return
1548
1549 if now:
1550 self.rqexe.finish_now()
1551 else:
1552 self.rqexe.finish()
1553
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001554 def rq_dump_sigfn(self, fn, options):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001555 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001556 the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
1557 siggen = bb.parse.siggen
1558 dataCaches = self.rqdata.dataCaches
1559 siggen.dump_sigfn(fn, dataCaches, options)
1560
1561 def dump_signatures(self, options):
1562 fns = set()
1563 bb.note("Reparsing files to collect dependency data")
1564
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001565 for tid in self.rqdata.runtaskentries:
1566 fn = fn_from_tid(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001567 fns.add(fn)
1568
1569 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1570 # We cannot use the real multiprocessing.Pool easily due to some local data
1571 # that can't be pickled. This is a cheap multi-process solution.
1572 launched = []
1573 while fns:
1574 if len(launched) < max_process:
1575 p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
1576 p.start()
1577 launched.append(p)
1578 for q in launched:
1579 # The finished processes are joined when calling is_alive()
1580 if not q.is_alive():
1581 launched.remove(q)
1582 for p in launched:
1583 p.join()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001584
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001585 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001586
1587 return
1588
1589 def print_diffscenetasks(self):
1590
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001591 noexec = []
Brad Bishop96ff1982019-08-19 13:50:42 -04001592 tocheck = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001593
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001594 for tid in self.rqdata.runtaskentries:
1595 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1596 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001597
1598 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001599 noexec.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001600 continue
1601
Brad Bishop96ff1982019-08-19 13:50:42 -04001602 tocheck.add(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001603
Brad Bishopa34c0302019-09-23 22:34:48 -04001604 valid_new = self.validate_hashes(tocheck, self.cooker.data, 0, True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001605
1606 # Tasks which are both setscene and noexec never care about dependencies
1607 # We therefore find tasks which are setscene and noexec and mark their
1608 # unique dependencies as valid.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001609 for tid in noexec:
1610 if tid not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001611 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001612 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001613 hasnoexecparents = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001614 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1615 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001616 continue
1617 hasnoexecparents = False
1618 break
1619 if hasnoexecparents:
1620 valid_new.add(dep)
1621
1622 invalidtasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001623 for tid in self.rqdata.runtaskentries:
1624 if tid not in valid_new and tid not in noexec:
1625 invalidtasks.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001626
1627 found = set()
1628 processed = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001629 for tid in invalidtasks:
1630 toprocess = set([tid])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001631 while toprocess:
1632 next = set()
1633 for t in toprocess:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001634 for dep in self.rqdata.runtaskentries[t].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001635 if dep in invalidtasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001636 found.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001637 if dep not in processed:
1638 processed.add(dep)
1639 next.add(dep)
1640 toprocess = next
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001641 if tid in found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001642 toprocess = set()
1643
1644 tasklist = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001645 for tid in invalidtasks.difference(found):
1646 tasklist.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001647
1648 if tasklist:
1649 bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
1650
1651 return invalidtasks.difference(found)
1652
1653 def write_diffscenetasks(self, invalidtasks):
1654
1655 # Define recursion callback
1656 def recursecb(key, hash1, hash2):
1657 hashes = [hash1, hash2]
1658 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1659
1660 recout = []
1661 if len(hashfiles) == 2:
1662 out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
Brad Bishopc342db32019-05-15 21:57:59 -04001663 recout.extend(list(' ' + l for l in out2))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001664 else:
1665 recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
1666
1667 return recout
1668
1669
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001670 for tid in invalidtasks:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001671 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1672 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001673 h = self.rqdata.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001674 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
1675 match = None
1676 for m in matches:
1677 if h in m:
1678 match = m
1679 if match is None:
1680 bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001681 matches = {k : v for k, v in iter(matches.items()) if h not in k}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001682 if matches:
1683 latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
Brad Bishop19323692019-04-05 15:28:33 -04001684 prevh = __find_sha256__.search(latestmatch).group(0)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001685 output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
1686 bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
1687
Brad Bishop96ff1982019-08-19 13:50:42 -04001688
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001689class RunQueueExecute:
1690
1691 def __init__(self, rq):
1692 self.rq = rq
1693 self.cooker = rq.cooker
1694 self.cfgData = rq.cfgData
1695 self.rqdata = rq.rqdata
1696
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001697 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
1698 self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001699
Brad Bishop96ff1982019-08-19 13:50:42 -04001700 self.sq_buildable = set()
1701 self.sq_running = set()
1702 self.sq_live = set()
1703
Brad Bishop08902b02019-08-20 09:16:51 -04001704 self.updated_taskhash_queue = []
1705 self.pending_migrations = set()
1706
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001707 self.runq_buildable = set()
1708 self.runq_running = set()
1709 self.runq_complete = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001710
1711 self.build_stamps = {}
1712 self.build_stamps2 = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001713 self.failed_tids = []
Brad Bishop96ff1982019-08-19 13:50:42 -04001714 self.sq_deferred = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001715
1716 self.stampcache = {}
1717
Brad Bishop08902b02019-08-20 09:16:51 -04001718 self.holdoff_tasks = set()
Brad Bishopc68388fc2019-08-26 01:33:31 -04001719 self.holdoff_need_update = True
Brad Bishop96ff1982019-08-19 13:50:42 -04001720 self.sqdone = False
1721
1722 self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
1723 self.sq_stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
1724
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001725 for mc in rq.worker:
1726 rq.worker[mc].pipe.setrunqueueexec(self)
1727 for mc in rq.fakeworker:
1728 rq.fakeworker[mc].pipe.setrunqueueexec(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001729
1730 if self.number_tasks <= 0:
1731 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1732
Brad Bishop96ff1982019-08-19 13:50:42 -04001733 # List of setscene tasks which we've covered
1734 self.scenequeue_covered = set()
1735 # List of tasks which are covered (including setscene ones)
1736 self.tasks_covered = set()
1737 self.tasks_scenequeue_done = set()
1738 self.scenequeue_notcovered = set()
1739 self.tasks_notcovered = set()
1740 self.scenequeue_notneeded = set()
1741
Brad Bishop08902b02019-08-20 09:16:51 -04001742 # We can't skip specified target tasks which aren't setscene tasks
1743 self.cantskip = set(self.rqdata.target_tids)
1744 self.cantskip.difference_update(self.rqdata.runq_setscene_tids)
1745 self.cantskip.intersection_update(self.rqdata.runtaskentries)
Brad Bishop96ff1982019-08-19 13:50:42 -04001746
1747 schedulers = self.get_schedulers()
1748 for scheduler in schedulers:
1749 if self.scheduler == scheduler.name:
1750 self.sched = scheduler(self, self.rqdata)
1751 logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
1752 break
1753 else:
1754 bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
1755 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1756
Brad Bishop08902b02019-08-20 09:16:51 -04001757 #if len(self.rqdata.runq_setscene_tids) > 0:
1758 self.sqdata = SQData()
1759 build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
Brad Bishop96ff1982019-08-19 13:50:42 -04001760
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001761 def runqueue_process_waitpid(self, task, status):
1762
1763 # self.build_stamps[pid] may not exist when use shared work directory.
1764 if task in self.build_stamps:
1765 self.build_stamps2.remove(self.build_stamps[task])
1766 del self.build_stamps[task]
1767
Brad Bishop96ff1982019-08-19 13:50:42 -04001768 if task in self.sq_live:
1769 if status != 0:
1770 self.sq_task_fail(task, status)
1771 else:
1772 self.sq_task_complete(task)
1773 self.sq_live.remove(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001774 else:
Brad Bishop96ff1982019-08-19 13:50:42 -04001775 if status != 0:
1776 self.task_fail(task, status)
1777 else:
1778 self.task_complete(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001779 return True
1780
1781 def finish_now(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001782 for mc in self.rq.worker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001783 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001784 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1785 self.rq.worker[mc].process.stdin.flush()
1786 except IOError:
1787 # worker must have died?
1788 pass
1789 for mc in self.rq.fakeworker:
1790 try:
1791 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1792 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001793 except IOError:
1794 # worker must have died?
1795 pass
1796
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001797 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001798 self.rq.state = runQueueFailed
1799 return
1800
1801 self.rq.state = runQueueComplete
1802 return
1803
1804 def finish(self):
1805 self.rq.state = runQueueCleanUp
1806
Brad Bishop96ff1982019-08-19 13:50:42 -04001807 active = self.stats.active + self.sq_stats.active
1808 if active > 0:
1809 bb.event.fire(runQueueExitWait(active), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001810 self.rq.read_workers()
1811 return self.rq.active_fds()
1812
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001813 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001814 self.rq.state = runQueueFailed
1815 return True
1816
1817 self.rq.state = runQueueComplete
1818 return True
1819
Brad Bishop96ff1982019-08-19 13:50:42 -04001820 # Used by setscene only
1821 def check_dependencies(self, task, taskdeps):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001822 if not self.rq.depvalidate:
1823 return False
1824
Brad Bishop08902b02019-08-20 09:16:51 -04001825 # Must not edit parent data
1826 taskdeps = set(taskdeps)
1827
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001828 taskdata = {}
1829 taskdeps.add(task)
1830 for dep in taskdeps:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001831 (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
1832 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001833 taskdata[dep] = [pn, taskname, fn]
1834 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001835 locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001836 valid = bb.utils.better_eval(call, locs)
1837 return valid
1838
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001839 def can_start_task(self):
Brad Bishop96ff1982019-08-19 13:50:42 -04001840 active = self.stats.active + self.sq_stats.active
1841 can_start = active < self.number_tasks
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001842 return can_start
1843
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001844 def get_schedulers(self):
1845 schedulers = set(obj for obj in globals().values()
1846 if type(obj) is type and
1847 issubclass(obj, RunQueueScheduler))
1848
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001849 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001850 if user_schedulers:
1851 for sched in user_schedulers.split():
1852 if not "." in sched:
1853 bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
1854 continue
1855
1856 modname, name = sched.rsplit(".", 1)
1857 try:
1858 module = __import__(modname, fromlist=(name,))
1859 except ImportError as exc:
1860 logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
1861 raise SystemExit(1)
1862 else:
1863 schedulers.add(getattr(module, name))
1864 return schedulers
1865
1866 def setbuildable(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001867 self.runq_buildable.add(task)
Brad Bishop316dfdd2018-06-25 12:45:53 -04001868 self.sched.newbuildable(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001869
1870 def task_completeoutright(self, task):
1871 """
1872 Mark a task as completed
1873 Look at the reverse dependencies and mark any task with
1874 completed dependencies as buildable
1875 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001876 self.runq_complete.add(task)
1877 for revdep in self.rqdata.runtaskentries[task].revdeps:
1878 if revdep in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001879 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001880 if revdep in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001881 continue
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001882 alldeps = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001883 for dep in self.rqdata.runtaskentries[revdep].depends:
1884 if dep not in self.runq_complete:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001885 alldeps = False
1886 break
1887 if alldeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001888 self.setbuildable(revdep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001889 logger.debug(1, "Marking task %s as buildable", revdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001890
1891 def task_complete(self, task):
1892 self.stats.taskCompleted()
1893 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1894 self.task_completeoutright(task)
1895
1896 def task_fail(self, task, exitcode):
1897 """
1898 Called when a task has failed
1899 Updates the state engine with the failure
1900 """
1901 self.stats.taskFailed()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001902 self.failed_tids.append(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001903 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001904 if self.rqdata.taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001905 self.rq.state = runQueueCleanUp
1906
1907 def task_skip(self, task, reason):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001908 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001909 self.setbuildable(task)
1910 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
1911 self.task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001912 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001913 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001914
Brad Bishop08902b02019-08-20 09:16:51 -04001915 def summarise_scenequeue_errors(self):
1916 err = False
1917 if not self.sqdone:
1918 logger.debug(1, 'We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered)))
1919 completeevent = sceneQueueComplete(self.sq_stats, self.rq)
1920 bb.event.fire(completeevent, self.cfgData)
1921 if self.sq_deferred:
1922 logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred))
1923 err = True
1924 if self.updated_taskhash_queue:
1925 logger.error("Scenequeue had unprocessed changed taskhash entries: %s" % pprint.pformat(self.updated_taskhash_queue))
1926 err = True
1927 if self.holdoff_tasks:
1928 logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks))
1929 err = True
1930
1931 for tid in self.rqdata.runq_setscene_tids:
1932 if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
1933 err = True
1934 logger.error("Setscene Task %s was never marked as covered or not covered" % tid)
1935 if tid not in self.sq_buildable:
1936 err = True
1937 logger.error("Setscene Task %s was never marked as buildable" % tid)
1938 if tid not in self.sq_running:
1939 err = True
1940 logger.error("Setscene Task %s was never marked as running" % tid)
1941
1942 for x in self.rqdata.runtaskentries:
1943 if x not in self.tasks_covered and x not in self.tasks_notcovered:
1944 logger.error("Task %s was never moved from the setscene queue" % x)
1945 err = True
1946 if x not in self.tasks_scenequeue_done:
1947 logger.error("Task %s was never processed by the setscene code" % x)
1948 err = True
1949 if len(self.rqdata.runtaskentries[x].depends) == 0 and x not in self.runq_buildable:
1950 logger.error("Task %s was never marked as buildable by the setscene code" % x)
1951 err = True
1952 return err
1953
1954
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001955 def execute(self):
1956 """
Brad Bishop96ff1982019-08-19 13:50:42 -04001957 Run the tasks in a queue prepared by prepare_runqueue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001958 """
1959
1960 self.rq.read_workers()
Brad Bishop08902b02019-08-20 09:16:51 -04001961 self.process_possible_migrations()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001962
Brad Bishop96ff1982019-08-19 13:50:42 -04001963 task = None
1964 if not self.sqdone and self.can_start_task():
1965 # Find the next setscene to run
Brad Bishop08902b02019-08-20 09:16:51 -04001966 for nexttask in sorted(self.rqdata.runq_setscene_tids):
Brad Bishop96ff1982019-08-19 13:50:42 -04001967 if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values():
1968 if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
1969 if nexttask not in self.rqdata.target_tids:
1970 logger.debug(2, "Skipping setscene for task %s" % nexttask)
1971 self.sq_task_skip(nexttask)
1972 self.scenequeue_notneeded.add(nexttask)
1973 if nexttask in self.sq_deferred:
1974 del self.sq_deferred[nexttask]
1975 return True
Brad Bishop08902b02019-08-20 09:16:51 -04001976 # If covered tasks are running, need to wait for them to complete
1977 for t in self.sqdata.sq_covered_tasks[nexttask]:
1978 if t in self.runq_running and t not in self.runq_complete:
1979 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04001980 if nexttask in self.sq_deferred:
1981 if self.sq_deferred[nexttask] not in self.runq_complete:
1982 continue
1983 logger.debug(1, "Task %s no longer deferred" % nexttask)
1984 del self.sq_deferred[nexttask]
Brad Bishopa34c0302019-09-23 22:34:48 -04001985 valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False)
Brad Bishop96ff1982019-08-19 13:50:42 -04001986 if not valid:
1987 logger.debug(1, "%s didn't become valid, skipping setscene" % nexttask)
1988 self.sq_task_failoutright(nexttask)
1989 return True
1990 else:
1991 self.sqdata.outrightfail.remove(nexttask)
1992 if nexttask in self.sqdata.outrightfail:
1993 logger.debug(2, 'No package found, so skipping setscene task %s', nexttask)
1994 self.sq_task_failoutright(nexttask)
1995 return True
1996 if nexttask in self.sqdata.unskippable:
1997 logger.debug(2, "Setscene task %s is unskippable" % nexttask)
1998 task = nexttask
1999 break
2000 if task is not None:
2001 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
2002 taskname = taskname + "_setscene"
2003 if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
2004 logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
2005 self.sq_task_failoutright(task)
2006 return True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002007
Brad Bishop96ff1982019-08-19 13:50:42 -04002008 if self.cooker.configuration.force:
2009 if task in self.rqdata.target_tids:
2010 self.sq_task_failoutright(task)
2011 return True
2012
2013 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
2014 logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
2015 self.sq_task_skip(task)
2016 return True
2017
2018 if self.cooker.configuration.skipsetscene:
2019 logger.debug(2, 'No setscene tasks should be executed. Skipping %s', task)
2020 self.sq_task_failoutright(task)
2021 return True
2022
2023 startevent = sceneQueueTaskStarted(task, self.sq_stats, self.rq)
2024 bb.event.fire(startevent, self.cfgData)
2025
2026 taskdepdata = self.sq_build_taskdepdata(task)
2027
2028 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2029 taskhash = self.rqdata.get_task_hash(task)
2030 unihash = self.rqdata.get_task_unihash(task)
2031 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
2032 if not mc in self.rq.fakeworker:
2033 self.rq.start_fakeworker(self, mc)
2034 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
2035 self.rq.fakeworker[mc].process.stdin.flush()
2036 else:
2037 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
2038 self.rq.worker[mc].process.stdin.flush()
2039
2040 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2041 self.build_stamps2.append(self.build_stamps[task])
2042 self.sq_running.add(task)
2043 self.sq_live.add(task)
2044 self.sq_stats.taskActive()
2045 if self.can_start_task():
2046 return True
2047
Brad Bishopc68388fc2019-08-26 01:33:31 -04002048 self.update_holdofftasks()
2049
Brad Bishop08902b02019-08-20 09:16:51 -04002050 if not self.sq_live and not self.sqdone and not self.sq_deferred and not self.updated_taskhash_queue and not self.holdoff_tasks:
Brad Bishop96ff1982019-08-19 13:50:42 -04002051 logger.info("Setscene tasks completed")
Brad Bishop96ff1982019-08-19 13:50:42 -04002052
Brad Bishop08902b02019-08-20 09:16:51 -04002053 err = self.summarise_scenequeue_errors()
Brad Bishop96ff1982019-08-19 13:50:42 -04002054 if err:
2055 self.rq.state = runQueueFailed
2056 return True
2057
2058 if self.cooker.configuration.setsceneonly:
2059 self.rq.state = runQueueComplete
2060 return True
2061 self.sqdone = True
2062
2063 if self.stats.total == 0:
2064 # nothing to do
2065 self.rq.state = runQueueComplete
2066 return True
2067
2068 if self.cooker.configuration.setsceneonly:
2069 task = None
2070 else:
2071 task = self.sched.next()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002072 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002073 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002074
Brad Bishop96ff1982019-08-19 13:50:42 -04002075 if self.rqdata.setscenewhitelist is not None:
2076 if self.check_setscenewhitelist(task):
2077 self.task_fail(task, "setscene whitelist")
2078 return True
2079
2080 if task in self.tasks_covered:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002081 logger.debug(2, "Setscene covered task %s", task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002082 self.task_skip(task, "covered")
2083 return True
2084
2085 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002086 logger.debug(2, "Stamp current task %s", task)
2087
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002088 self.task_skip(task, "existing")
2089 return True
2090
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002091 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002092 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2093 startevent = runQueueTaskStarted(task, self.stats, self.rq,
2094 noexec=True)
2095 bb.event.fire(startevent, self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002096 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002097 self.stats.taskActive()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002098 if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002099 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002100 self.task_complete(task)
2101 return True
2102 else:
2103 startevent = runQueueTaskStarted(task, self.stats, self.rq)
2104 bb.event.fire(startevent, self.cfgData)
2105
2106 taskdepdata = self.build_taskdepdata(task)
2107
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002108 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop19323692019-04-05 15:28:33 -04002109 taskhash = self.rqdata.get_task_hash(task)
2110 unihash = self.rqdata.get_task_unihash(task)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002111 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002112 if not mc in self.rq.fakeworker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002113 try:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002114 self.rq.start_fakeworker(self, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002115 except OSError as exc:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002116 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002117 self.rq.state = runQueueFailed
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002118 self.stats.taskFailed()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002119 return True
Brad Bishop19323692019-04-05 15:28:33 -04002120 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002121 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002122 else:
Brad Bishop19323692019-04-05 15:28:33 -04002123 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002124 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002125
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002126 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2127 self.build_stamps2.append(self.build_stamps[task])
2128 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002129 self.stats.taskActive()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002130 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002131 return True
2132
Brad Bishop96ff1982019-08-19 13:50:42 -04002133 if self.stats.active > 0 or self.sq_stats.active > 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002134 self.rq.read_workers()
2135 return self.rq.active_fds()
2136
Brad Bishop96ff1982019-08-19 13:50:42 -04002137 # No more tasks can be run. If we have deferred setscene tasks we should run them.
2138 if self.sq_deferred:
2139 tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0])
2140 logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid)
2141 self.sq_task_failoutright(tid)
2142 return True
2143
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002144 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002145 self.rq.state = runQueueFailed
2146 return True
2147
2148 # Sanity Checks
Brad Bishop08902b02019-08-20 09:16:51 -04002149 err = self.summarise_scenequeue_errors()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002150 for task in self.rqdata.runtaskentries:
2151 if task not in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002152 logger.error("Task %s never buildable!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002153 err = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002154 elif task not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002155 logger.error("Task %s never ran!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002156 err = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002157 elif task not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002158 logger.error("Task %s never completed!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002159 err = True
2160
2161 if err:
2162 self.rq.state = runQueueFailed
2163 else:
2164 self.rq.state = runQueueComplete
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002165
2166 return True
2167
Brad Bishopc68388fc2019-08-26 01:33:31 -04002168 def filtermcdeps(self, task, mc, deps):
Andrew Geissler99467da2019-02-25 18:54:23 -06002169 ret = set()
Andrew Geissler99467da2019-02-25 18:54:23 -06002170 for dep in deps:
Brad Bishopc68388fc2019-08-26 01:33:31 -04002171 thismc = mc_from_tid(dep)
2172 if thismc != mc:
Andrew Geissler99467da2019-02-25 18:54:23 -06002173 continue
2174 ret.add(dep)
2175 return ret
2176
Brad Bishopa34c0302019-09-23 22:34:48 -04002177 # We filter out multiconfig dependencies from taskdepdata we pass to the tasks
Andrew Geissler99467da2019-02-25 18:54:23 -06002178 # as most code can't handle them
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002179 def build_taskdepdata(self, task):
2180 taskdepdata = {}
Brad Bishopc68388fc2019-08-26 01:33:31 -04002181 mc = mc_from_tid(task)
Brad Bishop08902b02019-08-20 09:16:51 -04002182 next = self.rqdata.runtaskentries[task].depends.copy()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002183 next.add(task)
Brad Bishopc68388fc2019-08-26 01:33:31 -04002184 next = self.filtermcdeps(task, mc, next)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002185 while next:
2186 additional = []
2187 for revdep in next:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002188 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2189 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2190 deps = self.rqdata.runtaskentries[revdep].depends
2191 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002192 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002193 unihash = self.rqdata.runtaskentries[revdep].unihash
Brad Bishopc68388fc2019-08-26 01:33:31 -04002194 deps = self.filtermcdeps(task, mc, deps)
Brad Bishop19323692019-04-05 15:28:33 -04002195 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002196 for revdep2 in deps:
2197 if revdep2 not in taskdepdata:
2198 additional.append(revdep2)
2199 next = additional
2200
2201 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2202 return taskdepdata
2203
Brad Bishop08902b02019-08-20 09:16:51 -04002204 def update_holdofftasks(self):
Brad Bishopc68388fc2019-08-26 01:33:31 -04002205
2206 if not self.holdoff_need_update:
2207 return
2208
2209 notcovered = set(self.scenequeue_notcovered)
2210 notcovered |= self.cantskip
2211 for tid in self.scenequeue_notcovered:
2212 notcovered |= self.sqdata.sq_covered_tasks[tid]
2213 notcovered |= self.sqdata.unskippable.difference(self.rqdata.runq_setscene_tids)
2214 notcovered.intersection_update(self.tasks_scenequeue_done)
2215
2216 covered = set(self.scenequeue_covered)
2217 for tid in self.scenequeue_covered:
2218 covered |= self.sqdata.sq_covered_tasks[tid]
2219 covered.difference_update(notcovered)
2220 covered.intersection_update(self.tasks_scenequeue_done)
2221
2222 for tid in notcovered | covered:
2223 if len(self.rqdata.runtaskentries[tid].depends) == 0:
2224 self.setbuildable(tid)
2225 elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete):
2226 self.setbuildable(tid)
2227
2228 self.tasks_covered = covered
2229 self.tasks_notcovered = notcovered
2230
Brad Bishop08902b02019-08-20 09:16:51 -04002231 self.holdoff_tasks = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002232
Brad Bishop08902b02019-08-20 09:16:51 -04002233 for tid in self.rqdata.runq_setscene_tids:
2234 if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
2235 self.holdoff_tasks.add(tid)
2236
2237 for tid in self.holdoff_tasks.copy():
2238 for dep in self.sqdata.sq_covered_tasks[tid]:
2239 if dep not in self.runq_complete:
2240 self.holdoff_tasks.add(dep)
2241
Brad Bishopc68388fc2019-08-26 01:33:31 -04002242 self.holdoff_need_update = False
2243
Brad Bishop08902b02019-08-20 09:16:51 -04002244 def process_possible_migrations(self):
2245
2246 changed = set()
2247 for tid, unihash in self.updated_taskhash_queue.copy():
2248 if tid in self.runq_running and tid not in self.runq_complete:
2249 continue
2250
2251 self.updated_taskhash_queue.remove((tid, unihash))
2252
2253 if unihash != self.rqdata.runtaskentries[tid].unihash:
2254 logger.info("Task %s unihash changed to %s" % (tid, unihash))
2255 self.rqdata.runtaskentries[tid].unihash = unihash
2256 bb.parse.siggen.set_unihash(tid, unihash)
2257
2258 # Work out all tasks which depend on this one
2259 total = set()
2260 next = set(self.rqdata.runtaskentries[tid].revdeps)
2261 while next:
2262 current = next.copy()
2263 total = total |next
2264 next = set()
2265 for ntid in current:
2266 next |= self.rqdata.runtaskentries[ntid].revdeps
2267 next.difference_update(total)
2268
2269 # Now iterate those tasks in dependency order to regenerate their taskhash/unihash
2270 done = set()
2271 next = set(self.rqdata.runtaskentries[tid].revdeps)
2272 while next:
2273 current = next.copy()
2274 next = set()
2275 for tid in current:
2276 if not self.rqdata.runtaskentries[tid].depends.isdisjoint(total):
2277 continue
2278 procdep = []
2279 for dep in self.rqdata.runtaskentries[tid].depends:
2280 procdep.append(dep)
2281 orighash = self.rqdata.runtaskentries[tid].hash
2282 self.rqdata.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, procdep, self.rqdata.dataCaches[mc_from_tid(tid)])
2283 origuni = self.rqdata.runtaskentries[tid].unihash
2284 self.rqdata.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
2285 logger.debug(1, "Task %s hash changes: %s->%s %s->%s" % (tid, orighash, self.rqdata.runtaskentries[tid].hash, origuni, self.rqdata.runtaskentries[tid].unihash))
2286 next |= self.rqdata.runtaskentries[tid].revdeps
2287 changed.add(tid)
2288 total.remove(tid)
2289 next.intersection_update(total)
2290
2291 if changed:
2292 for mc in self.rq.worker:
2293 self.rq.worker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
2294 for mc in self.rq.fakeworker:
2295 self.rq.fakeworker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
2296
2297 logger.debug(1, pprint.pformat("Tasks changed:\n%s" % (changed)))
2298
2299 for tid in changed:
2300 if tid not in self.rqdata.runq_setscene_tids:
2301 continue
2302 valid = self.rq.validate_hashes(set([tid]), self.cooker.data, None, False)
2303 if not valid:
2304 continue
2305 if tid in self.runq_running:
2306 continue
2307 if tid not in self.pending_migrations:
2308 self.pending_migrations.add(tid)
2309
2310 for tid in self.pending_migrations.copy():
2311 valid = True
2312 # Check no tasks this covers are running
2313 for dep in self.sqdata.sq_covered_tasks[tid]:
2314 if dep in self.runq_running and dep not in self.runq_complete:
2315 logger.debug(2, "Task %s is running which blocks setscene for %s from running" % (dep, tid))
2316 valid = False
2317 break
2318 if not valid:
2319 continue
2320
2321 self.pending_migrations.remove(tid)
Brad Bishopc68388fc2019-08-26 01:33:31 -04002322 changed = True
Brad Bishop08902b02019-08-20 09:16:51 -04002323
2324 if tid in self.tasks_scenequeue_done:
2325 self.tasks_scenequeue_done.remove(tid)
2326 for dep in self.sqdata.sq_covered_tasks[tid]:
2327 if dep not in self.runq_complete:
2328 if dep in self.tasks_scenequeue_done and dep not in self.sqdata.unskippable:
2329 self.tasks_scenequeue_done.remove(dep)
2330
2331 if tid in self.sq_buildable:
2332 self.sq_buildable.remove(tid)
2333 if tid in self.sq_running:
2334 self.sq_running.remove(tid)
2335 if self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
2336 if tid not in self.sq_buildable:
2337 self.sq_buildable.add(tid)
2338 if len(self.sqdata.sq_revdeps[tid]) == 0:
2339 self.sq_buildable.add(tid)
2340
2341 if tid in self.sqdata.outrightfail:
2342 self.sqdata.outrightfail.remove(tid)
2343 if tid in self.scenequeue_notcovered:
2344 self.scenequeue_notcovered.remove(tid)
2345 if tid in self.scenequeue_covered:
2346 self.scenequeue_covered.remove(tid)
2347 if tid in self.scenequeue_notneeded:
2348 self.scenequeue_notneeded.remove(tid)
2349
2350 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2351 self.sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
2352
2353 if tid in self.stampcache:
2354 del self.stampcache[tid]
2355
2356 if tid in self.build_stamps:
2357 del self.build_stamps[tid]
2358
2359 logger.info("Setscene task %s now valid and being rerun" % tid)
2360 self.sqdone = False
2361
2362 if changed:
Brad Bishopc68388fc2019-08-26 01:33:31 -04002363 self.holdoff_need_update = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002364
Brad Bishop96ff1982019-08-19 13:50:42 -04002365 def scenequeue_updatecounters(self, task, fail=False):
Brad Bishop08902b02019-08-20 09:16:51 -04002366
2367 for dep in sorted(self.sqdata.sq_deps[task]):
Brad Bishop96ff1982019-08-19 13:50:42 -04002368 if fail and task in self.sqdata.sq_harddeps and dep in self.sqdata.sq_harddeps[task]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002369 logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
Brad Bishop96ff1982019-08-19 13:50:42 -04002370 self.sq_task_failoutright(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002371 continue
Brad Bishop08902b02019-08-20 09:16:51 -04002372 if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
2373 if dep not in self.sq_buildable:
2374 self.sq_buildable.add(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002375
Brad Bishop96ff1982019-08-19 13:50:42 -04002376 next = set([task])
2377 while next:
2378 new = set()
Brad Bishop08902b02019-08-20 09:16:51 -04002379 for t in sorted(next):
Brad Bishop96ff1982019-08-19 13:50:42 -04002380 self.tasks_scenequeue_done.add(t)
2381 # Look down the dependency chain for non-setscene things which this task depends on
2382 # and mark as 'done'
2383 for dep in self.rqdata.runtaskentries[t].depends:
2384 if dep in self.rqdata.runq_setscene_tids or dep in self.tasks_scenequeue_done:
2385 continue
2386 if self.rqdata.runtaskentries[dep].revdeps.issubset(self.tasks_scenequeue_done):
2387 new.add(dep)
Brad Bishop96ff1982019-08-19 13:50:42 -04002388 next = new
2389
Brad Bishopc68388fc2019-08-26 01:33:31 -04002390 self.holdoff_need_update = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002391
2392 def sq_task_completeoutright(self, task):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002393 """
2394 Mark a task as completed
2395 Look at the reverse dependencies and mark any task with
2396 completed dependencies as buildable
2397 """
2398
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002399 logger.debug(1, 'Found task %s which could be accelerated', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002400 self.scenequeue_covered.add(task)
2401 self.scenequeue_updatecounters(task)
2402
Brad Bishop96ff1982019-08-19 13:50:42 -04002403 def sq_check_taskfail(self, task):
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002404 if self.rqdata.setscenewhitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002405 realtask = task.split('_setscene')[0]
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002406 (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
2407 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002408 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2409 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2410 self.rq.state = runQueueCleanUp
2411
Brad Bishop96ff1982019-08-19 13:50:42 -04002412 def sq_task_complete(self, task):
2413 self.sq_stats.taskCompleted()
2414 bb.event.fire(sceneQueueTaskCompleted(task, self.sq_stats, self.rq), self.cfgData)
2415 self.sq_task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002416
Brad Bishop96ff1982019-08-19 13:50:42 -04002417 def sq_task_fail(self, task, result):
2418 self.sq_stats.taskFailed()
2419 bb.event.fire(sceneQueueTaskFailed(task, self.sq_stats, result, self), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002420 self.scenequeue_notcovered.add(task)
2421 self.scenequeue_updatecounters(task, True)
Brad Bishop96ff1982019-08-19 13:50:42 -04002422 self.sq_check_taskfail(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002423
Brad Bishop96ff1982019-08-19 13:50:42 -04002424 def sq_task_failoutright(self, task):
2425 self.sq_running.add(task)
2426 self.sq_buildable.add(task)
2427 self.sq_stats.taskSkipped()
2428 self.sq_stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002429 self.scenequeue_notcovered.add(task)
2430 self.scenequeue_updatecounters(task, True)
2431
Brad Bishop96ff1982019-08-19 13:50:42 -04002432 def sq_task_skip(self, task):
2433 self.sq_running.add(task)
2434 self.sq_buildable.add(task)
2435 self.sq_task_completeoutright(task)
2436 self.sq_stats.taskSkipped()
2437 self.sq_stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002438
Brad Bishop96ff1982019-08-19 13:50:42 -04002439 def sq_build_taskdepdata(self, task):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002440 def getsetscenedeps(tid):
2441 deps = set()
2442 (mc, fn, taskname, _) = split_tid_mcfn(tid)
2443 realtid = tid + "_setscene"
2444 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
2445 for (depname, idependtask) in idepends:
2446 if depname not in self.rqdata.taskData[mc].build_targets:
2447 continue
2448
2449 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2450 if depfn is None:
2451 continue
2452 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2453 deps.add(deptid)
2454 return deps
2455
2456 taskdepdata = {}
2457 next = getsetscenedeps(task)
2458 next.add(task)
2459 while next:
2460 additional = []
2461 for revdep in next:
2462 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2463 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2464 deps = getsetscenedeps(revdep)
2465 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2466 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002467 unihash = self.rqdata.runtaskentries[revdep].unihash
2468 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002469 for revdep2 in deps:
2470 if revdep2 not in taskdepdata:
2471 additional.append(revdep2)
2472 next = additional
2473
2474 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2475 return taskdepdata
2476
Brad Bishop96ff1982019-08-19 13:50:42 -04002477 def check_setscenewhitelist(self, tid):
2478 # Check task that is going to run against the whitelist
2479 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2480 # Ignore covered tasks
2481 if tid in self.tasks_covered:
2482 return False
2483 # Ignore stamped tasks
2484 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
2485 return False
2486 # Ignore noexec tasks
2487 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2488 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2489 return False
2490
2491 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2492 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2493 if tid in self.rqdata.runq_setscene_tids:
2494 msg = 'Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)
2495 else:
2496 msg = 'Task %s.%s attempted to execute unexpectedly' % (pn, taskname)
2497 logger.error(msg + '\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered))
2498 return True
2499 return False
2500
2501class SQData(object):
2502 def __init__(self):
2503 # SceneQueue dependencies
2504 self.sq_deps = {}
2505 # SceneQueue reverse dependencies
2506 self.sq_revdeps = {}
Brad Bishop96ff1982019-08-19 13:50:42 -04002507 # Injected inter-setscene task dependencies
2508 self.sq_harddeps = {}
2509 # Cache of stamp files so duplicates can't run in parallel
2510 self.stamps = {}
2511 # Setscene tasks directly depended upon by the build
2512 self.unskippable = set()
2513 # List of setscene tasks which aren't present
2514 self.outrightfail = set()
2515 # A list of normal tasks a setscene task covers
2516 self.sq_covered_tasks = {}
2517
2518def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
2519
2520 sq_revdeps = {}
2521 sq_revdeps_squash = {}
2522 sq_collated_deps = {}
2523
2524 # We need to construct a dependency graph for the setscene functions. Intermediate
2525 # dependencies between the setscene tasks only complicate the code. This code
2526 # therefore aims to collapse the huge runqueue dependency tree into a smaller one
2527 # only containing the setscene functions.
2528
2529 rqdata.init_progress_reporter.next_stage()
2530
2531 # First process the chains up to the first setscene task.
2532 endpoints = {}
2533 for tid in rqdata.runtaskentries:
2534 sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps)
2535 sq_revdeps_squash[tid] = set()
2536 if (len(sq_revdeps[tid]) == 0) and tid not in rqdata.runq_setscene_tids:
2537 #bb.warn("Added endpoint %s" % (tid))
2538 endpoints[tid] = set()
2539
2540 rqdata.init_progress_reporter.next_stage()
2541
2542 # Secondly process the chains between setscene tasks.
2543 for tid in rqdata.runq_setscene_tids:
2544 sq_collated_deps[tid] = set()
2545 #bb.warn("Added endpoint 2 %s" % (tid))
2546 for dep in rqdata.runtaskentries[tid].depends:
2547 if tid in sq_revdeps[dep]:
2548 sq_revdeps[dep].remove(tid)
2549 if dep not in endpoints:
2550 endpoints[dep] = set()
2551 #bb.warn(" Added endpoint 3 %s" % (dep))
2552 endpoints[dep].add(tid)
2553
2554 rqdata.init_progress_reporter.next_stage()
2555
2556 def process_endpoints(endpoints):
2557 newendpoints = {}
2558 for point, task in endpoints.items():
2559 tasks = set()
2560 if task:
2561 tasks |= task
2562 if sq_revdeps_squash[point]:
2563 tasks |= sq_revdeps_squash[point]
2564 if point not in rqdata.runq_setscene_tids:
2565 for t in tasks:
2566 sq_collated_deps[t].add(point)
2567 sq_revdeps_squash[point] = set()
2568 if point in rqdata.runq_setscene_tids:
2569 sq_revdeps_squash[point] = tasks
2570 tasks = set()
2571 continue
2572 for dep in rqdata.runtaskentries[point].depends:
2573 if point in sq_revdeps[dep]:
2574 sq_revdeps[dep].remove(point)
2575 if tasks:
2576 sq_revdeps_squash[dep] |= tasks
2577 if len(sq_revdeps[dep]) == 0 and dep not in rqdata.runq_setscene_tids:
2578 newendpoints[dep] = task
2579 if len(newendpoints) != 0:
2580 process_endpoints(newendpoints)
2581
2582 process_endpoints(endpoints)
2583
2584 rqdata.init_progress_reporter.next_stage()
2585
Brad Bishop08902b02019-08-20 09:16:51 -04002586 # Build a list of tasks which are "unskippable"
2587 # These are direct endpoints referenced by the build upto and including setscene tasks
Brad Bishop96ff1982019-08-19 13:50:42 -04002588 # Take the build endpoints (no revdeps) and find the sstate tasks they depend upon
2589 new = True
2590 for tid in rqdata.runtaskentries:
2591 if len(rqdata.runtaskentries[tid].revdeps) == 0:
2592 sqdata.unskippable.add(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04002593 sqdata.unskippable |= sqrq.cantskip
Brad Bishop96ff1982019-08-19 13:50:42 -04002594 while new:
2595 new = False
Brad Bishop08902b02019-08-20 09:16:51 -04002596 orig = sqdata.unskippable.copy()
2597 for tid in sorted(orig, reverse=True):
Brad Bishop96ff1982019-08-19 13:50:42 -04002598 if tid in rqdata.runq_setscene_tids:
2599 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04002600 if len(rqdata.runtaskentries[tid].depends) == 0:
2601 # These are tasks which have no setscene tasks in their chain, need to mark as directly buildable
Brad Bishop96ff1982019-08-19 13:50:42 -04002602 sqrq.setbuildable(tid)
Brad Bishop96ff1982019-08-19 13:50:42 -04002603 sqdata.unskippable |= rqdata.runtaskentries[tid].depends
Brad Bishop08902b02019-08-20 09:16:51 -04002604 if sqdata.unskippable != orig:
2605 new = True
2606
2607 sqrq.tasks_scenequeue_done |= sqdata.unskippable.difference(rqdata.runq_setscene_tids)
Brad Bishop96ff1982019-08-19 13:50:42 -04002608
2609 rqdata.init_progress_reporter.next_stage(len(rqdata.runtaskentries))
2610
2611 # Sanity check all dependencies could be changed to setscene task references
2612 for taskcounter, tid in enumerate(rqdata.runtaskentries):
2613 if tid in rqdata.runq_setscene_tids:
2614 pass
2615 elif len(sq_revdeps_squash[tid]) != 0:
2616 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
2617 else:
2618 del sq_revdeps_squash[tid]
2619 rqdata.init_progress_reporter.update(taskcounter)
2620
2621 rqdata.init_progress_reporter.next_stage()
2622
2623 # Resolve setscene inter-task dependencies
2624 # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
2625 # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
2626 for tid in rqdata.runq_setscene_tids:
2627 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2628 realtid = tid + "_setscene"
2629 idepends = rqdata.taskData[mc].taskentries[realtid].idepends
2630 sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", rqdata.dataCaches[mc], taskfn, noextra=True)
2631 for (depname, idependtask) in idepends:
2632
2633 if depname not in rqdata.taskData[mc].build_targets:
2634 continue
2635
2636 depfn = rqdata.taskData[mc].build_targets[depname][0]
2637 if depfn is None:
2638 continue
2639 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2640 if deptid not in rqdata.runtaskentries:
2641 bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
2642
2643 if not deptid in sqdata.sq_harddeps:
2644 sqdata.sq_harddeps[deptid] = set()
2645 sqdata.sq_harddeps[deptid].add(tid)
2646
2647 sq_revdeps_squash[tid].add(deptid)
2648 # Have to zero this to avoid circular dependencies
2649 sq_revdeps_squash[deptid] = set()
2650
2651 rqdata.init_progress_reporter.next_stage()
2652
2653 for task in sqdata.sq_harddeps:
2654 for dep in sqdata.sq_harddeps[task]:
2655 sq_revdeps_squash[dep].add(task)
2656
2657 rqdata.init_progress_reporter.next_stage()
2658
2659 #for tid in sq_revdeps_squash:
2660 # data = ""
2661 # for dep in sq_revdeps_squash[tid]:
2662 # data = data + "\n %s" % dep
2663 # bb.warn("Task %s_setscene: is %s " % (tid, data))
2664
2665 sqdata.sq_revdeps = sq_revdeps_squash
Brad Bishop96ff1982019-08-19 13:50:42 -04002666 sqdata.sq_covered_tasks = sq_collated_deps
2667
2668 # Build reverse version of revdeps to populate deps structure
2669 for tid in sqdata.sq_revdeps:
2670 sqdata.sq_deps[tid] = set()
2671 for tid in sqdata.sq_revdeps:
2672 for dep in sqdata.sq_revdeps[tid]:
2673 sqdata.sq_deps[dep].add(tid)
2674
2675 rqdata.init_progress_reporter.next_stage()
2676
2677 multiconfigs = set()
2678 for tid in sqdata.sq_revdeps:
2679 multiconfigs.add(mc_from_tid(tid))
2680 if len(sqdata.sq_revdeps[tid]) == 0:
2681 sqrq.sq_buildable.add(tid)
2682
2683 rqdata.init_progress_reporter.finish()
2684
2685 if rq.hashvalidate:
2686 noexec = []
2687 stamppresent = []
2688 tocheck = set()
2689
Brad Bishop08902b02019-08-20 09:16:51 -04002690 for tid in sorted(sqdata.sq_revdeps):
Brad Bishop96ff1982019-08-19 13:50:42 -04002691 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2692
2693 taskdep = rqdata.dataCaches[mc].task_deps[taskfn]
2694
2695 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2696 noexec.append(tid)
2697 sqrq.sq_task_skip(tid)
2698 bb.build.make_stamp(taskname + "_setscene", rqdata.dataCaches[mc], taskfn)
2699 continue
2700
2701 if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache):
2702 logger.debug(2, 'Setscene stamp current for task %s', tid)
2703 stamppresent.append(tid)
2704 sqrq.sq_task_skip(tid)
2705 continue
2706
2707 if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache):
2708 logger.debug(2, 'Normal stamp current for task %s', tid)
2709 stamppresent.append(tid)
2710 sqrq.sq_task_skip(tid)
2711 continue
2712
2713 tocheck.add(tid)
2714
2715 valid = rq.validate_hashes(tocheck, cooker.data, len(stamppresent), False)
2716
2717 valid_new = stamppresent
2718 for v in valid:
2719 valid_new.append(v)
2720
2721 hashes = {}
2722 for mc in sorted(multiconfigs):
Brad Bishop08902b02019-08-20 09:16:51 -04002723 for tid in sorted(sqdata.sq_revdeps):
Brad Bishop96ff1982019-08-19 13:50:42 -04002724 if mc_from_tid(tid) != mc:
2725 continue
2726 if tid not in valid_new and tid not in noexec and tid not in sqrq.scenequeue_notcovered:
2727 sqdata.outrightfail.add(tid)
2728
2729 h = pending_hash_index(tid, rqdata)
2730 if h not in hashes:
2731 hashes[h] = tid
2732 else:
2733 sqrq.sq_deferred[tid] = hashes[h]
2734 bb.warn("Deferring %s after %s" % (tid, hashes[h]))
2735
2736
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002737class TaskFailure(Exception):
2738 """
2739 Exception raised when a task in a runqueue fails
2740 """
2741 def __init__(self, x):
2742 self.args = x
2743
2744
2745class runQueueExitWait(bb.event.Event):
2746 """
2747 Event when waiting for task processes to exit
2748 """
2749
2750 def __init__(self, remain):
2751 self.remain = remain
2752 self.message = "Waiting for %s active tasks to finish" % remain
2753 bb.event.Event.__init__(self)
2754
2755class runQueueEvent(bb.event.Event):
2756 """
2757 Base runQueue event class
2758 """
2759 def __init__(self, task, stats, rq):
2760 self.taskid = task
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002761 self.taskstring = task
2762 self.taskname = taskname_from_tid(task)
2763 self.taskfile = fn_from_tid(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002764 self.taskhash = rq.rqdata.get_task_hash(task)
2765 self.stats = stats.copy()
2766 bb.event.Event.__init__(self)
2767
2768class sceneQueueEvent(runQueueEvent):
2769 """
2770 Base sceneQueue event class
2771 """
2772 def __init__(self, task, stats, rq, noexec=False):
2773 runQueueEvent.__init__(self, task, stats, rq)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002774 self.taskstring = task + "_setscene"
2775 self.taskname = taskname_from_tid(task) + "_setscene"
2776 self.taskfile = fn_from_tid(task)
2777 self.taskhash = rq.rqdata.get_task_hash(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002778
2779class runQueueTaskStarted(runQueueEvent):
2780 """
2781 Event notifying a task was started
2782 """
2783 def __init__(self, task, stats, rq, noexec=False):
2784 runQueueEvent.__init__(self, task, stats, rq)
2785 self.noexec = noexec
2786
2787class sceneQueueTaskStarted(sceneQueueEvent):
2788 """
2789 Event notifying a setscene task was started
2790 """
2791 def __init__(self, task, stats, rq, noexec=False):
2792 sceneQueueEvent.__init__(self, task, stats, rq)
2793 self.noexec = noexec
2794
2795class runQueueTaskFailed(runQueueEvent):
2796 """
2797 Event notifying a task failed
2798 """
2799 def __init__(self, task, stats, exitcode, rq):
2800 runQueueEvent.__init__(self, task, stats, rq)
2801 self.exitcode = exitcode
2802
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002803 def __str__(self):
2804 return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
2805
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002806class sceneQueueTaskFailed(sceneQueueEvent):
2807 """
2808 Event notifying a setscene task failed
2809 """
2810 def __init__(self, task, stats, exitcode, rq):
2811 sceneQueueEvent.__init__(self, task, stats, rq)
2812 self.exitcode = exitcode
2813
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002814 def __str__(self):
2815 return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
2816
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002817class sceneQueueComplete(sceneQueueEvent):
2818 """
2819 Event when all the sceneQueue tasks are complete
2820 """
2821 def __init__(self, stats, rq):
2822 self.stats = stats.copy()
2823 bb.event.Event.__init__(self)
2824
2825class runQueueTaskCompleted(runQueueEvent):
2826 """
2827 Event notifying a task completed
2828 """
2829
2830class sceneQueueTaskCompleted(sceneQueueEvent):
2831 """
2832 Event notifying a setscene task completed
2833 """
2834
2835class runQueueTaskSkipped(runQueueEvent):
2836 """
2837 Event notifying a task was skipped
2838 """
2839 def __init__(self, task, stats, rq, reason):
2840 runQueueEvent.__init__(self, task, stats, rq)
2841 self.reason = reason
2842
Brad Bishop08902b02019-08-20 09:16:51 -04002843class taskUniHashUpdate(bb.event.Event):
2844 """
2845 Base runQueue event class
2846 """
2847 def __init__(self, task, unihash):
2848 self.taskid = task
2849 self.unihash = unihash
2850 bb.event.Event.__init__(self)
2851
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002852class runQueuePipe():
2853 """
2854 Abstraction for a pipe between a worker thread and the server
2855 """
2856 def __init__(self, pipein, pipeout, d, rq, rqexec):
2857 self.input = pipein
2858 if pipeout:
2859 pipeout.close()
2860 bb.utils.nonblockingfd(self.input)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002861 self.queue = b""
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002862 self.d = d
2863 self.rq = rq
2864 self.rqexec = rqexec
2865
2866 def setrunqueueexec(self, rqexec):
2867 self.rqexec = rqexec
2868
2869 def read(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002870 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
2871 for worker in workers.values():
2872 worker.process.poll()
2873 if worker.process.returncode is not None and not self.rq.teardown:
2874 bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
2875 self.rq.finish_runqueue(True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002876
2877 start = len(self.queue)
2878 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002879 self.queue = self.queue + (self.input.read(102400) or b"")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002880 except (OSError, IOError) as e:
2881 if e.errno != errno.EAGAIN:
2882 raise
2883 end = len(self.queue)
2884 found = True
2885 while found and len(self.queue):
2886 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002887 index = self.queue.find(b"</event>")
2888 while index != -1 and self.queue.startswith(b"<event>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002889 try:
2890 event = pickle.loads(self.queue[7:index])
2891 except ValueError as e:
2892 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
2893 bb.event.fire_from_worker(event, self.d)
Brad Bishop08902b02019-08-20 09:16:51 -04002894 if isinstance(event, taskUniHashUpdate):
2895 self.rqexec.updated_taskhash_queue.append((event.taskid, event.unihash))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002896 found = True
2897 self.queue = self.queue[index+8:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002898 index = self.queue.find(b"</event>")
2899 index = self.queue.find(b"</exitcode>")
2900 while index != -1 and self.queue.startswith(b"<exitcode>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002901 try:
2902 task, status = pickle.loads(self.queue[10:index])
2903 except ValueError as e:
2904 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
2905 self.rqexec.runqueue_process_waitpid(task, status)
2906 found = True
2907 self.queue = self.queue[index+11:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002908 index = self.queue.find(b"</exitcode>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002909 return (end > start)
2910
2911 def close(self):
2912 while self.read():
2913 continue
2914 if len(self.queue) > 0:
2915 print("Warning, worker left partial message: %s" % self.queue)
2916 self.input.close()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002917
2918def get_setscene_enforce_whitelist(d):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002919 if d.getVar('BB_SETSCENE_ENFORCE') != '1':
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002920 return None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002921 whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002922 outlist = []
2923 for item in whitelist[:]:
2924 if item.startswith('%:'):
2925 for target in sys.argv[1:]:
2926 if not target.startswith('-'):
2927 outlist.append(target.split(':')[0] + ':' + item.split(':')[1])
2928 else:
2929 outlist.append(item)
2930 return outlist
2931
2932def check_setscene_enforce_whitelist(pn, taskname, whitelist):
2933 import fnmatch
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002934 if whitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002935 item = '%s:%s' % (pn, taskname)
2936 for whitelist_item in whitelist:
2937 if fnmatch.fnmatch(item, whitelist_item):
2938 return True
2939 return False
2940 return True