blob: 18049436fd56231991fa498ccf96076b1690c7fd [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001"""
2BitBake 'RunQueue' implementation
3
4Handles preparation and execution of a queue of tasks
5"""
6
7# Copyright (C) 2006-2007 Richard Purdie
8#
Brad Bishopc342db32019-05-15 21:57:59 -04009# SPDX-License-Identifier: GPL-2.0-only
Patrick Williamsc124f4f2015-09-15 14:41:29 -050010#
Patrick Williamsc124f4f2015-09-15 14:41:29 -050011
12import copy
13import os
14import sys
15import signal
16import stat
17import fcntl
18import errno
19import logging
20import re
21import bb
22from bb import msg, data, event
23from bb import monitordisk
24import subprocess
Patrick Williamsc0f7c042017-02-23 20:41:17 -060025import pickle
Brad Bishop6e60e8b2018-02-01 10:27:11 -050026from multiprocessing import Process
Brad Bishop19323692019-04-05 15:28:33 -040027import shlex
Brad Bishop96ff1982019-08-19 13:50:42 -040028import pprint
Patrick Williamsc124f4f2015-09-15 14:41:29 -050029
30bblogger = logging.getLogger("BitBake")
31logger = logging.getLogger("BitBake.RunQueue")
32
Brad Bishop19323692019-04-05 15:28:33 -040033__find_sha256__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{64}(?![a-z0-9])' )
Patrick Williamsc124f4f2015-09-15 14:41:29 -050034
Patrick Williamsc0f7c042017-02-23 20:41:17 -060035def fn_from_tid(tid):
36 return tid.rsplit(":", 1)[0]
37
38def taskname_from_tid(tid):
39 return tid.rsplit(":", 1)[1]
40
Andrew Geissler99467da2019-02-25 18:54:23 -060041def mc_from_tid(tid):
Brad Bishop15ae2502019-06-18 21:44:24 -040042 if tid.startswith('mc:'):
Andrew Geissler99467da2019-02-25 18:54:23 -060043 return tid.split(':')[1]
44 return ""
45
Patrick Williamsc0f7c042017-02-23 20:41:17 -060046def split_tid(tid):
47 (mc, fn, taskname, _) = split_tid_mcfn(tid)
48 return (mc, fn, taskname)
49
50def split_tid_mcfn(tid):
Brad Bishop15ae2502019-06-18 21:44:24 -040051 if tid.startswith('mc:'):
Patrick Williamsc0f7c042017-02-23 20:41:17 -060052 elems = tid.split(':')
53 mc = elems[1]
54 fn = ":".join(elems[2:-1])
55 taskname = elems[-1]
Brad Bishop15ae2502019-06-18 21:44:24 -040056 mcfn = "mc:" + mc + ":" + fn
Patrick Williamsc0f7c042017-02-23 20:41:17 -060057 else:
58 tid = tid.rsplit(":", 1)
59 mc = ""
60 fn = tid[0]
61 taskname = tid[1]
62 mcfn = fn
63
64 return (mc, fn, taskname, mcfn)
65
66def build_tid(mc, fn, taskname):
67 if mc:
Brad Bishop15ae2502019-06-18 21:44:24 -040068 return "mc:" + mc + ":" + fn + ":" + taskname
Patrick Williamsc0f7c042017-02-23 20:41:17 -060069 return fn + ":" + taskname
70
Brad Bishop96ff1982019-08-19 13:50:42 -040071# Index used to pair up potentially matching multiconfig tasks
72# We match on PN, taskname and hash being equal
73def pending_hash_index(tid, rqdata):
74 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
75 pn = rqdata.dataCaches[mc].pkg_fn[taskfn]
Brad Bishop00e122a2019-10-05 11:10:57 -040076 h = rqdata.runtaskentries[tid].unihash
Brad Bishop96ff1982019-08-19 13:50:42 -040077 return pn + ":" + "taskname" + h
78
Patrick Williamsc124f4f2015-09-15 14:41:29 -050079class RunQueueStats:
80 """
81 Holds statistics on the tasks handled by the associated runQueue
82 """
83 def __init__(self, total):
84 self.completed = 0
85 self.skipped = 0
86 self.failed = 0
87 self.active = 0
88 self.total = total
89
90 def copy(self):
91 obj = self.__class__(self.total)
92 obj.__dict__.update(self.__dict__)
93 return obj
94
95 def taskFailed(self):
96 self.active = self.active - 1
97 self.failed = self.failed + 1
98
Brad Bishop1a4b7ee2018-12-16 17:11:34 -080099 def taskCompleted(self):
100 self.active = self.active - 1
101 self.completed = self.completed + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500102
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800103 def taskSkipped(self):
104 self.active = self.active + 1
105 self.skipped = self.skipped + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500106
107 def taskActive(self):
108 self.active = self.active + 1
109
110# These values indicate the next step due to be run in the
111# runQueue state machine
112runQueuePrepare = 2
113runQueueSceneInit = 3
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500114runQueueRunning = 6
115runQueueFailed = 7
116runQueueCleanUp = 8
117runQueueComplete = 9
118
119class RunQueueScheduler(object):
120 """
121 Control the order tasks are scheduled in.
122 """
123 name = "basic"
124
125 def __init__(self, runqueue, rqdata):
126 """
127 The default scheduler just returns the first buildable task (the
128 priority map is sorted by task number)
129 """
130 self.rq = runqueue
131 self.rqdata = rqdata
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600132 self.numTasks = len(self.rqdata.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500133
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600134 self.prio_map = [self.rqdata.runtaskentries.keys()]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500135
Brad Bishop08902b02019-08-20 09:16:51 -0400136 self.buildable = set()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800137 self.skip_maxthread = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500138 self.stamps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600139 for tid in self.rqdata.runtaskentries:
140 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
141 self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
142 if tid in self.rq.runq_buildable:
143 self.buildable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500144
145 self.rev_prio_map = None
146
147 def next_buildable_task(self):
148 """
149 Return the id of the first task we find that is buildable
150 """
Brad Bishop08902b02019-08-20 09:16:51 -0400151 buildable = set(self.buildable)
152 buildable.difference_update(self.rq.runq_running)
153 buildable.difference_update(self.rq.holdoff_tasks)
154 buildable.intersection_update(self.rq.tasks_covered | self.rq.tasks_notcovered)
Brad Bishop96ff1982019-08-19 13:50:42 -0400155 if not buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500156 return None
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800157
158 # Filter out tasks that have a max number of threads that have been exceeded
159 skip_buildable = {}
160 for running in self.rq.runq_running.difference(self.rq.runq_complete):
161 rtaskname = taskname_from_tid(running)
162 if rtaskname not in self.skip_maxthread:
163 self.skip_maxthread[rtaskname] = self.rq.cfgData.getVarFlag(rtaskname, "number_threads")
164 if not self.skip_maxthread[rtaskname]:
165 continue
166 if rtaskname in skip_buildable:
167 skip_buildable[rtaskname] += 1
168 else:
169 skip_buildable[rtaskname] = 1
170
Brad Bishop96ff1982019-08-19 13:50:42 -0400171 if len(buildable) == 1:
Brad Bishop08902b02019-08-20 09:16:51 -0400172 tid = buildable.pop()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800173 taskname = taskname_from_tid(tid)
174 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
175 return None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600176 stamp = self.stamps[tid]
177 if stamp not in self.rq.build_stamps.values():
178 return tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500179
180 if not self.rev_prio_map:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600181 self.rev_prio_map = {}
182 for tid in self.rqdata.runtaskentries:
183 self.rev_prio_map[tid] = self.prio_map.index(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500184
185 best = None
186 bestprio = None
Brad Bishop96ff1982019-08-19 13:50:42 -0400187 for tid in buildable:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800188 taskname = taskname_from_tid(tid)
189 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
190 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600191 prio = self.rev_prio_map[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500192 if bestprio is None or bestprio > prio:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600193 stamp = self.stamps[tid]
194 if stamp in self.rq.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500195 continue
196 bestprio = prio
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600197 best = tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500198
199 return best
200
201 def next(self):
202 """
203 Return the id of the task we should build next
204 """
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800205 if self.rq.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500206 return self.next_buildable_task()
207
Brad Bishop316dfdd2018-06-25 12:45:53 -0400208 def newbuildable(self, task):
Brad Bishop08902b02019-08-20 09:16:51 -0400209 self.buildable.add(task)
Brad Bishop00e122a2019-10-05 11:10:57 -0400210 # Once tasks are running we don't need to worry about them again
211 self.buildable.difference_update(self.rq.runq_running)
Brad Bishop08902b02019-08-20 09:16:51 -0400212
213 def removebuildable(self, task):
214 self.buildable.remove(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500215
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500216 def describe_task(self, taskid):
217 result = 'ID %s' % taskid
218 if self.rev_prio_map:
219 result = result + (' pri %d' % self.rev_prio_map[taskid])
220 return result
221
222 def dump_prio(self, comment):
223 bb.debug(3, '%s (most important first):\n%s' %
224 (comment,
225 '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
226 index, taskid in enumerate(self.prio_map)])))
227
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500228class RunQueueSchedulerSpeed(RunQueueScheduler):
229 """
230 A scheduler optimised for speed. The priority map is sorted by task weight,
231 heavier weighted tasks (tasks needed by the most other tasks) are run first.
232 """
233 name = "speed"
234
235 def __init__(self, runqueue, rqdata):
236 """
237 The priority map is sorted by task weight.
238 """
239 RunQueueScheduler.__init__(self, runqueue, rqdata)
240
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600241 weights = {}
242 for tid in self.rqdata.runtaskentries:
243 weight = self.rqdata.runtaskentries[tid].weight
244 if not weight in weights:
245 weights[weight] = []
246 weights[weight].append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500247
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600248 self.prio_map = []
249 for weight in sorted(weights):
250 for w in weights[weight]:
251 self.prio_map.append(w)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500252
253 self.prio_map.reverse()
254
255class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
256 """
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500257 A scheduler optimised to complete .bb files as quickly as possible. The
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500258 priority map is sorted by task weight, but then reordered so once a given
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500259 .bb file starts to build, it's completed as quickly as possible by
260 running all tasks related to the same .bb file one after the after.
261 This works well where disk space is at a premium and classes like OE's
262 rm_work are in force.
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500263 """
264 name = "completion"
265
266 def __init__(self, runqueue, rqdata):
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500267 super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500268
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500269 # Extract list of tasks for each recipe, with tasks sorted
270 # ascending from "must run first" (typically do_fetch) to
271 # "runs last" (do_build). The speed scheduler prioritizes
272 # tasks that must run first before the ones that run later;
273 # this is what we depend on here.
274 task_lists = {}
275 for taskid in self.prio_map:
276 fn, taskname = taskid.rsplit(':', 1)
277 task_lists.setdefault(fn, []).append(taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500278
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500279 # Now unify the different task lists. The strategy is that
280 # common tasks get skipped and new ones get inserted after the
281 # preceeding common one(s) as they are found. Because task
282 # lists should differ only by their number of tasks, but not
283 # the ordering of the common tasks, this should result in a
284 # deterministic result that is a superset of the individual
285 # task ordering.
286 all_tasks = []
287 for recipe, new_tasks in task_lists.items():
288 index = 0
289 old_task = all_tasks[index] if index < len(all_tasks) else None
290 for new_task in new_tasks:
291 if old_task == new_task:
292 # Common task, skip it. This is the fast-path which
293 # avoids a full search.
294 index += 1
295 old_task = all_tasks[index] if index < len(all_tasks) else None
296 else:
297 try:
298 index = all_tasks.index(new_task)
299 # Already present, just not at the current
300 # place. We re-synchronized by changing the
301 # index so that it matches again. Now
302 # move on to the next existing task.
303 index += 1
304 old_task = all_tasks[index] if index < len(all_tasks) else None
305 except ValueError:
306 # Not present. Insert before old_task, which
307 # remains the same (but gets shifted back).
308 all_tasks.insert(index, new_task)
309 index += 1
310 bb.debug(3, 'merged task list: %s' % all_tasks)
311
312 # Now reverse the order so that tasks that finish the work on one
313 # recipe are considered more imporant (= come first). The ordering
314 # is now so that do_build is most important.
315 all_tasks.reverse()
316
317 # Group tasks of the same kind before tasks of less important
318 # kinds at the head of the queue (because earlier = lower
319 # priority number = runs earlier), while preserving the
320 # ordering by recipe. If recipe foo is more important than
321 # bar, then the goal is to work on foo's do_populate_sysroot
322 # before bar's do_populate_sysroot and on the more important
323 # tasks of foo before any of the less important tasks in any
324 # other recipe (if those other recipes are more important than
325 # foo).
326 #
327 # All of this only applies when tasks are runable. Explicit
328 # dependencies still override this ordering by priority.
329 #
330 # Here's an example why this priority re-ordering helps with
331 # minimizing disk usage. Consider a recipe foo with a higher
332 # priority than bar where foo DEPENDS on bar. Then the
333 # implicit rule (from base.bbclass) is that foo's do_configure
334 # depends on bar's do_populate_sysroot. This ensures that
335 # bar's do_populate_sysroot gets done first. Normally the
336 # tasks from foo would continue to run once that is done, and
337 # bar only gets completed and cleaned up later. By ordering
338 # bar's task that depend on bar's do_populate_sysroot before foo's
339 # do_configure, that problem gets avoided.
340 task_index = 0
341 self.dump_prio('original priorities')
342 for task in all_tasks:
343 for index in range(task_index, self.numTasks):
344 taskid = self.prio_map[index]
345 taskname = taskid.rsplit(':', 1)[1]
346 if taskname == task:
347 del self.prio_map[index]
348 self.prio_map.insert(task_index, taskid)
349 task_index += 1
350 self.dump_prio('completion priorities')
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500351
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600352class RunTaskEntry(object):
353 def __init__(self):
354 self.depends = set()
355 self.revdeps = set()
356 self.hash = None
Brad Bishop19323692019-04-05 15:28:33 -0400357 self.unihash = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600358 self.task = None
359 self.weight = 1
360
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500361class RunQueueData:
362 """
363 BitBake Run Queue implementation
364 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600365 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500366 self.cooker = cooker
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600367 self.dataCaches = dataCaches
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500368 self.taskData = taskData
369 self.targets = targets
370 self.rq = rq
371 self.warn_multi_bb = False
372
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500373 self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
374 self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600375 self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
376 self.setscenewhitelist_checked = False
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500377 self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600378 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500379
380 self.reset()
381
382 def reset(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600383 self.runtaskentries = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500384
385 def runq_depends_names(self, ids):
386 import re
387 ret = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600388 for id in ids:
389 nam = os.path.basename(id)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500390 nam = re.sub("_[^,]*,", ",", nam)
391 ret.extend([nam])
392 return ret
393
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600394 def get_task_hash(self, tid):
395 return self.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500396
Brad Bishop19323692019-04-05 15:28:33 -0400397 def get_task_unihash(self, tid):
398 return self.runtaskentries[tid].unihash
399
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600400 def get_user_idstring(self, tid, task_name_suffix = ""):
401 return tid + task_name_suffix
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500402
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500403 def get_short_user_idstring(self, task, task_name_suffix = ""):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500404 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
405 pn = self.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600406 taskname = taskname_from_tid(task) + task_name_suffix
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500407 return "%s:%s" % (pn, taskname)
408
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500409 def circular_depchains_handler(self, tasks):
410 """
411 Some tasks aren't buildable, likely due to circular dependency issues.
412 Identify the circular dependencies and print them in a user readable format.
413 """
414 from copy import deepcopy
415
416 valid_chains = []
417 explored_deps = {}
418 msgs = []
419
Andrew Geissler99467da2019-02-25 18:54:23 -0600420 class TooManyLoops(Exception):
421 pass
422
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500423 def chain_reorder(chain):
424 """
425 Reorder a dependency chain so the lowest task id is first
426 """
427 lowest = 0
428 new_chain = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600429 for entry in range(len(chain)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500430 if chain[entry] < chain[lowest]:
431 lowest = entry
432 new_chain.extend(chain[lowest:])
433 new_chain.extend(chain[:lowest])
434 return new_chain
435
436 def chain_compare_equal(chain1, chain2):
437 """
438 Compare two dependency chains and see if they're the same
439 """
440 if len(chain1) != len(chain2):
441 return False
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600442 for index in range(len(chain1)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500443 if chain1[index] != chain2[index]:
444 return False
445 return True
446
447 def chain_array_contains(chain, chain_array):
448 """
449 Return True if chain_array contains chain
450 """
451 for ch in chain_array:
452 if chain_compare_equal(ch, chain):
453 return True
454 return False
455
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600456 def find_chains(tid, prev_chain):
457 prev_chain.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500458 total_deps = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600459 total_deps.extend(self.runtaskentries[tid].revdeps)
460 for revdep in self.runtaskentries[tid].revdeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500461 if revdep in prev_chain:
462 idx = prev_chain.index(revdep)
463 # To prevent duplicates, reorder the chain to start with the lowest taskid
464 # and search through an array of those we've already printed
465 chain = prev_chain[idx:]
466 new_chain = chain_reorder(chain)
467 if not chain_array_contains(new_chain, valid_chains):
468 valid_chains.append(new_chain)
469 msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
470 for dep in new_chain:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600471 msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500472 msgs.append("\n")
473 if len(valid_chains) > 10:
474 msgs.append("Aborted dependency loops search after 10 matches.\n")
Andrew Geissler99467da2019-02-25 18:54:23 -0600475 raise TooManyLoops
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500476 continue
477 scan = False
478 if revdep not in explored_deps:
479 scan = True
480 elif revdep in explored_deps[revdep]:
481 scan = True
482 else:
483 for dep in prev_chain:
484 if dep in explored_deps[revdep]:
485 scan = True
486 if scan:
487 find_chains(revdep, copy.deepcopy(prev_chain))
488 for dep in explored_deps[revdep]:
489 if dep not in total_deps:
490 total_deps.append(dep)
491
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600492 explored_deps[tid] = total_deps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500493
Andrew Geissler99467da2019-02-25 18:54:23 -0600494 try:
495 for task in tasks:
496 find_chains(task, [])
497 except TooManyLoops:
498 pass
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500499
500 return msgs
501
502 def calculate_task_weights(self, endpoints):
503 """
504 Calculate a number representing the "weight" of each task. Heavier weighted tasks
505 have more dependencies and hence should be executed sooner for maximum speed.
506
507 This function also sanity checks the task list finding tasks that are not
508 possible to execute due to circular dependencies.
509 """
510
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600511 numTasks = len(self.runtaskentries)
512 weight = {}
513 deps_left = {}
514 task_done = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500515
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600516 for tid in self.runtaskentries:
517 task_done[tid] = False
518 weight[tid] = 1
519 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500520
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600521 for tid in endpoints:
522 weight[tid] = 10
523 task_done[tid] = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500524
525 while True:
526 next_points = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600527 for tid in endpoints:
528 for revdep in self.runtaskentries[tid].depends:
529 weight[revdep] = weight[revdep] + weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500530 deps_left[revdep] = deps_left[revdep] - 1
531 if deps_left[revdep] == 0:
532 next_points.append(revdep)
533 task_done[revdep] = True
534 endpoints = next_points
535 if len(next_points) == 0:
536 break
537
538 # Circular dependency sanity check
539 problem_tasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600540 for tid in self.runtaskentries:
541 if task_done[tid] is False or deps_left[tid] != 0:
542 problem_tasks.append(tid)
543 logger.debug(2, "Task %s is not buildable", tid)
544 logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
545 self.runtaskentries[tid].weight = weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500546
547 if problem_tasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600548 message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500549 message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
550 message = message + "Identifying dependency loops (this may take a short while)...\n"
551 logger.error(message)
552
553 msgs = self.circular_depchains_handler(problem_tasks)
554
555 message = "\n"
556 for msg in msgs:
557 message = message + msg
558 bb.msg.fatal("RunQueue", message)
559
560 return weight
561
562 def prepare(self):
563 """
564 Turn a set of taskData into a RunQueue and compute data needed
565 to optimise the execution order.
566 """
567
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600568 runq_build = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500569 recursivetasks = {}
570 recursiveitasks = {}
571 recursivetasksselfref = set()
572
573 taskData = self.taskData
574
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600575 found = False
576 for mc in self.taskData:
577 if len(taskData[mc].taskentries) > 0:
578 found = True
579 break
580 if not found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500581 # Nothing to do
582 return 0
583
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600584 self.init_progress_reporter.start()
585 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500586
587 # Step A - Work out a list of tasks to run
588 #
589 # Taskdata gives us a list of possible providers for every build and run
590 # target ordered by priority. It also gives information on each of those
591 # providers.
592 #
593 # To create the actual list of tasks to execute we fix the list of
594 # providers and then resolve the dependencies into task IDs. This
595 # process is repeated for each type of dependency (tdepends, deptask,
596 # rdeptast, recrdeptask, idepends).
597
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600598 def add_build_dependencies(depids, tasknames, depends, mc):
599 for depname in depids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500600 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600601 if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500602 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600603 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500604 if depdata is None:
605 continue
606 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600607 t = depdata + ":" + taskname
608 if t in taskData[mc].taskentries:
609 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500610
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600611 def add_runtime_dependencies(depids, tasknames, depends, mc):
612 for depname in depids:
613 if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500614 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600615 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500616 if depdata is None:
617 continue
618 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600619 t = depdata + ":" + taskname
620 if t in taskData[mc].taskentries:
621 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500622
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800623 def add_mc_dependencies(mc, tid):
624 mcdeps = taskData[mc].get_mcdepends()
625 for dep in mcdeps:
626 mcdependency = dep.split(':')
627 pn = mcdependency[3]
628 frommc = mcdependency[1]
629 mcdep = mcdependency[2]
630 deptask = mcdependency[4]
631 if mc == frommc:
632 fn = taskData[mcdep].build_targets[pn][0]
633 newdep = '%s:%s' % (fn,deptask)
634 taskData[mc].taskentries[tid].tdepends.append(newdep)
635
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600636 for mc in taskData:
637 for tid in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500638
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600639 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
640 #runtid = build_tid(mc, fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500641
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600642 #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
643
644 depends = set()
645 task_deps = self.dataCaches[mc].task_deps[taskfn]
646
647 self.runtaskentries[tid] = RunTaskEntry()
648
649 if fn in taskData[mc].failed_fns:
650 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500651
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800652 # We add multiconfig dependencies before processing internal task deps (tdepends)
653 if 'mcdepends' in task_deps and taskname in task_deps['mcdepends']:
654 add_mc_dependencies(mc, tid)
655
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500656 # Resolve task internal dependencies
657 #
658 # e.g. addtask before X after Y
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600659 for t in taskData[mc].taskentries[tid].tdepends:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800660 (depmc, depfn, deptaskname, _) = split_tid_mcfn(t)
661 depends.add(build_tid(depmc, depfn, deptaskname))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500662
663 # Resolve 'deptask' dependencies
664 #
665 # e.g. do_sometask[deptask] = "do_someothertask"
666 # (makes sure sometask runs after someothertask of all DEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600667 if 'deptask' in task_deps and taskname in task_deps['deptask']:
668 tasknames = task_deps['deptask'][taskname].split()
669 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500670
671 # Resolve 'rdeptask' dependencies
672 #
673 # e.g. do_sometask[rdeptask] = "do_someothertask"
674 # (makes sure sometask runs after someothertask of all RDEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600675 if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
676 tasknames = task_deps['rdeptask'][taskname].split()
677 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500678
679 # Resolve inter-task dependencies
680 #
681 # e.g. do_sometask[depends] = "targetname:do_someothertask"
682 # (makes sure sometask runs after targetname's someothertask)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600683 idepends = taskData[mc].taskentries[tid].idepends
684 for (depname, idependtask) in idepends:
685 if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500686 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600687 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500688 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600689 t = depdata + ":" + idependtask
690 depends.add(t)
691 if t not in taskData[mc].taskentries:
692 bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
693 irdepends = taskData[mc].taskentries[tid].irdepends
694 for (depname, idependtask) in irdepends:
695 if depname in taskData[mc].run_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500696 # Won't be in run_targets if ASSUME_PROVIDED
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500697 if not taskData[mc].run_targets[depname]:
698 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600699 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500700 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600701 t = depdata + ":" + idependtask
702 depends.add(t)
703 if t not in taskData[mc].taskentries:
704 bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500705
706 # Resolve recursive 'recrdeptask' dependencies (Part A)
707 #
708 # e.g. do_sometask[recrdeptask] = "do_someothertask"
709 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
710 # We cover the recursive part of the dependencies below
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600711 if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
712 tasknames = task_deps['recrdeptask'][taskname].split()
713 recursivetasks[tid] = tasknames
714 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
715 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
716 if taskname in tasknames:
717 recursivetasksselfref.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500718
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600719 if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
720 recursiveitasks[tid] = []
721 for t in task_deps['recideptask'][taskname].split():
722 newdep = build_tid(mc, fn, t)
723 recursiveitasks[tid].append(newdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500724
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600725 self.runtaskentries[tid].depends = depends
Brad Bishop316dfdd2018-06-25 12:45:53 -0400726 # Remove all self references
727 self.runtaskentries[tid].depends.discard(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500728
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600729 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500730
Brad Bishop316dfdd2018-06-25 12:45:53 -0400731 self.init_progress_reporter.next_stage()
732
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500733 # Resolve recursive 'recrdeptask' dependencies (Part B)
734 #
735 # e.g. do_sometask[recrdeptask] = "do_someothertask"
736 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600737 # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600738
Brad Bishop316dfdd2018-06-25 12:45:53 -0400739 # Generating/interating recursive lists of dependencies is painful and potentially slow
740 # Precompute recursive task dependencies here by:
741 # a) create a temp list of reverse dependencies (revdeps)
742 # b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0)
743 # c) combine the total list of dependencies in cumulativedeps
744 # d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500745
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500746
Brad Bishop316dfdd2018-06-25 12:45:53 -0400747 revdeps = {}
748 deps = {}
749 cumulativedeps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600750 for tid in self.runtaskentries:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400751 deps[tid] = set(self.runtaskentries[tid].depends)
752 revdeps[tid] = set()
753 cumulativedeps[tid] = set()
754 # Generate a temp list of reverse dependencies
755 for tid in self.runtaskentries:
756 for dep in self.runtaskentries[tid].depends:
757 revdeps[dep].add(tid)
758 # Find the dependency chain endpoints
759 endpoints = set()
760 for tid in self.runtaskentries:
761 if len(deps[tid]) == 0:
762 endpoints.add(tid)
763 # Iterate the chains collating dependencies
764 while endpoints:
765 next = set()
766 for tid in endpoints:
767 for dep in revdeps[tid]:
768 cumulativedeps[dep].add(fn_from_tid(tid))
769 cumulativedeps[dep].update(cumulativedeps[tid])
770 if tid in deps[dep]:
771 deps[dep].remove(tid)
772 if len(deps[dep]) == 0:
773 next.add(dep)
774 endpoints = next
775 #for tid in deps:
776 # if len(deps[tid]) != 0:
777 # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
778
779 # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
780 # resolve these recursively until we aren't adding any further extra dependencies
781 extradeps = True
782 while extradeps:
783 extradeps = 0
784 for tid in recursivetasks:
785 tasknames = recursivetasks[tid]
786
787 totaldeps = set(self.runtaskentries[tid].depends)
788 if tid in recursiveitasks:
789 totaldeps.update(recursiveitasks[tid])
790 for dep in recursiveitasks[tid]:
791 if dep not in self.runtaskentries:
792 continue
793 totaldeps.update(self.runtaskentries[dep].depends)
794
795 deps = set()
796 for dep in totaldeps:
797 if dep in cumulativedeps:
798 deps.update(cumulativedeps[dep])
799
800 for t in deps:
801 for taskname in tasknames:
802 newtid = t + ":" + taskname
803 if newtid == tid:
804 continue
805 if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
806 extradeps += 1
807 self.runtaskentries[tid].depends.add(newtid)
808
809 # Handle recursive tasks which depend upon other recursive tasks
810 deps = set()
811 for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
812 deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
813 for newtid in deps:
814 for taskname in tasknames:
815 if not newtid.endswith(":" + taskname):
816 continue
817 if newtid in self.runtaskentries:
818 extradeps += 1
819 self.runtaskentries[tid].depends.add(newtid)
820
821 bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps)
822
823 # Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work
824 for tid in recursivetasksselfref:
825 self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600826
827 self.init_progress_reporter.next_stage()
828
829 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500830
831 # Step B - Mark all active tasks
832 #
833 # Start with the tasks we were asked to run and mark all dependencies
834 # as active too. If the task is to be 'forced', clear its stamp. Once
835 # all active tasks are marked, prune the ones we don't need.
836
837 logger.verbose("Marking Active Tasks")
838
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600839 def mark_active(tid, depth):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500840 """
841 Mark an item as active along with its depends
842 (calls itself recursively)
843 """
844
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600845 if tid in runq_build:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500846 return
847
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600848 runq_build[tid] = 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500849
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600850 depends = self.runtaskentries[tid].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500851 for depend in depends:
852 mark_active(depend, depth+1)
853
Brad Bishop79641f22019-09-10 07:20:22 -0400854 def invalidate_task(tid, error_nostamp):
855 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
856 taskdep = self.dataCaches[mc].task_deps[taskfn]
857 if fn + ":" + taskname not in taskData[mc].taskentries:
858 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
859 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
860 if error_nostamp:
861 bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
862 else:
863 bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
864 else:
865 logger.verbose("Invalidate task %s, %s", taskname, fn)
866 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn)
867
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600868 self.target_tids = []
869 for (mc, target, task, fn) in self.targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500870
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600871 if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500872 continue
873
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600874 if target in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500875 continue
876
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500877 parents = False
878 if task.endswith('-'):
879 parents = True
880 task = task[:-1]
881
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600882 if fn in taskData[mc].failed_fns:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500883 continue
884
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600885 # fn already has mc prefix
886 tid = fn + ":" + task
887 self.target_tids.append(tid)
888 if tid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500889 import difflib
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600890 tasks = []
891 for x in taskData[mc].taskentries:
892 if x.startswith(fn + ":"):
893 tasks.append(taskname_from_tid(x))
894 close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500895 if close_matches:
896 extra = ". Close matches:\n %s" % "\n ".join(close_matches)
897 else:
898 extra = ""
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600899 bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
900
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500901 # For tasks called "XXXX-", ony run their dependencies
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500902 if parents:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600903 for i in self.runtaskentries[tid].depends:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500904 mark_active(i, 1)
905 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600906 mark_active(tid, 1)
907
908 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500909
910 # Step C - Prune all inactive tasks
911 #
912 # Once all active tasks are marked, prune the ones we don't need.
913
Brad Bishop316dfdd2018-06-25 12:45:53 -0400914 delcount = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600915 for tid in list(self.runtaskentries.keys()):
916 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400917 delcount[tid] = self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600918 del self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600919
Brad Bishop316dfdd2018-06-25 12:45:53 -0400920 # Handle --runall
921 if self.cooker.configuration.runall:
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500922 # re-run the mark_active and then drop unused tasks from new list
923 runq_build = {}
Brad Bishop316dfdd2018-06-25 12:45:53 -0400924
925 for task in self.cooker.configuration.runall:
926 runall_tids = set()
927 for tid in list(self.runtaskentries):
928 wanttid = fn_from_tid(tid) + ":do_%s" % task
929 if wanttid in delcount:
930 self.runtaskentries[wanttid] = delcount[wanttid]
931 if wanttid in self.runtaskentries:
932 runall_tids.add(wanttid)
933
934 for tid in list(runall_tids):
935 mark_active(tid,1)
Brad Bishop79641f22019-09-10 07:20:22 -0400936 if self.cooker.configuration.force:
937 invalidate_task(tid, False)
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500938
939 for tid in list(self.runtaskentries.keys()):
940 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400941 delcount[tid] = self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500942 del self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500943
944 if len(self.runtaskentries) == 0:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400945 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
946
947 self.init_progress_reporter.next_stage()
948
949 # Handle runonly
950 if self.cooker.configuration.runonly:
951 # re-run the mark_active and then drop unused tasks from new list
952 runq_build = {}
953
954 for task in self.cooker.configuration.runonly:
955 runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == "do_%s" % task }
956
957 for tid in list(runonly_tids):
958 mark_active(tid,1)
Brad Bishop79641f22019-09-10 07:20:22 -0400959 if self.cooker.configuration.force:
960 invalidate_task(tid, False)
Brad Bishop316dfdd2018-06-25 12:45:53 -0400961
962 for tid in list(self.runtaskentries.keys()):
963 if tid not in runq_build:
964 delcount[tid] = self.runtaskentries[tid]
965 del self.runtaskentries[tid]
966
967 if len(self.runtaskentries) == 0:
968 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500969
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500970 #
971 # Step D - Sanity checks and computation
972 #
973
974 # Check to make sure we still have tasks to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600975 if len(self.runtaskentries) == 0:
976 if not taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500977 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
978 else:
979 bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
980
Brad Bishop316dfdd2018-06-25 12:45:53 -0400981 logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500982
983 logger.verbose("Assign Weightings")
984
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600985 self.init_progress_reporter.next_stage()
986
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500987 # Generate a list of reverse dependencies to ease future calculations
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600988 for tid in self.runtaskentries:
989 for dep in self.runtaskentries[tid].depends:
990 self.runtaskentries[dep].revdeps.add(tid)
991
992 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500993
994 # Identify tasks at the end of dependency chains
995 # Error on circular dependency loops (length two)
996 endpoints = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600997 for tid in self.runtaskentries:
998 revdeps = self.runtaskentries[tid].revdeps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500999 if len(revdeps) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001000 endpoints.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001001 for dep in revdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001002 if dep in self.runtaskentries[tid].depends:
1003 bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
1004
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001005
1006 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
1007
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001008 self.init_progress_reporter.next_stage()
1009
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001010 # Calculate task weights
1011 # Check of higher length circular dependencies
1012 self.runq_weight = self.calculate_task_weights(endpoints)
1013
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001014 self.init_progress_reporter.next_stage()
1015
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001016 # Sanity Check - Check for multiple tasks building the same provider
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001017 for mc in self.dataCaches:
1018 prov_list = {}
1019 seen_fn = []
1020 for tid in self.runtaskentries:
1021 (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1022 if taskfn in seen_fn:
1023 continue
1024 if mc != tidmc:
1025 continue
1026 seen_fn.append(taskfn)
1027 for prov in self.dataCaches[mc].fn_provides[taskfn]:
1028 if prov not in prov_list:
1029 prov_list[prov] = [taskfn]
1030 elif taskfn not in prov_list[prov]:
1031 prov_list[prov].append(taskfn)
1032 for prov in prov_list:
1033 if len(prov_list[prov]) < 2:
1034 continue
1035 if prov in self.multi_provider_whitelist:
1036 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001037 seen_pn = []
1038 # If two versions of the same PN are being built its fatal, we don't support it.
1039 for fn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001040 pn = self.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001041 if pn not in seen_pn:
1042 seen_pn.append(pn)
1043 else:
1044 bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001045 msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
1046 #
1047 # Construct a list of things which uniquely depend on each provider
1048 # since this may help the user figure out which dependency is triggering this warning
1049 #
1050 msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
1051 deplist = {}
1052 commondeps = None
1053 for provfn in prov_list[prov]:
1054 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001055 for tid in self.runtaskentries:
1056 fn = fn_from_tid(tid)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001057 if fn != provfn:
1058 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001059 for dep in self.runtaskentries[tid].revdeps:
1060 fn = fn_from_tid(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001061 if fn == provfn:
1062 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001063 deps.add(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001064 if not commondeps:
1065 commondeps = set(deps)
1066 else:
1067 commondeps &= deps
1068 deplist[provfn] = deps
1069 for provfn in deplist:
1070 msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
1071 #
1072 # Construct a list of provides and runtime providers for each recipe
1073 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
1074 #
1075 msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
1076 provide_results = {}
1077 rprovide_results = {}
1078 commonprovs = None
1079 commonrprovs = None
1080 for provfn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001081 provides = set(self.dataCaches[mc].fn_provides[provfn])
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001082 rprovides = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001083 for rprovide in self.dataCaches[mc].rproviders:
1084 if provfn in self.dataCaches[mc].rproviders[rprovide]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001085 rprovides.add(rprovide)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001086 for package in self.dataCaches[mc].packages:
1087 if provfn in self.dataCaches[mc].packages[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001088 rprovides.add(package)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001089 for package in self.dataCaches[mc].packages_dynamic:
1090 if provfn in self.dataCaches[mc].packages_dynamic[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001091 rprovides.add(package)
1092 if not commonprovs:
1093 commonprovs = set(provides)
1094 else:
1095 commonprovs &= provides
1096 provide_results[provfn] = provides
1097 if not commonrprovs:
1098 commonrprovs = set(rprovides)
1099 else:
1100 commonrprovs &= rprovides
1101 rprovide_results[provfn] = rprovides
1102 #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
1103 #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
1104 for provfn in prov_list[prov]:
1105 msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
1106 msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
1107
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001108 if self.warn_multi_bb:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001109 logger.verbnote(msg)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001110 else:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001111 logger.error(msg)
1112
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001113 self.init_progress_reporter.next_stage()
1114
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001115 # Create a whitelist usable by the stamp checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001116 self.stampfnwhitelist = {}
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001117 for mc in self.taskData:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001118 self.stampfnwhitelist[mc] = []
1119 for entry in self.stampwhitelist.split():
1120 if entry not in self.taskData[mc].build_targets:
1121 continue
1122 fn = self.taskData.build_targets[entry][0]
1123 self.stampfnwhitelist[mc].append(fn)
1124
1125 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001126
1127 # Iterate over the task list looking for tasks with a 'setscene' function
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001128 self.runq_setscene_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001129 if not self.cooker.configuration.nosetscene:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001130 for tid in self.runtaskentries:
1131 (mc, fn, taskname, _) = split_tid_mcfn(tid)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001132 setscenetid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001133 if setscenetid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001134 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001135 self.runq_setscene_tids.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001136
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001137 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001138
1139 # Invalidate task if force mode active
1140 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001141 for tid in self.target_tids:
1142 invalidate_task(tid, False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001143
1144 # Invalidate task if invalidate mode active
1145 if self.cooker.configuration.invalidate_stamp:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001146 for tid in self.target_tids:
1147 fn = fn_from_tid(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001148 for st in self.cooker.configuration.invalidate_stamp.split(','):
1149 if not st.startswith("do_"):
1150 st = "do_%s" % st
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001151 invalidate_task(fn + ":" + st, True)
1152
1153 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001154
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001155 # Create and print to the logs a virtual/xxxx -> PN (fn) table
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001156 for mc in taskData:
1157 virtmap = taskData[mc].get_providermap(prefix="virtual/")
1158 virtpnmap = {}
1159 for v in virtmap:
1160 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
1161 bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
1162 if hasattr(bb.parse.siggen, "tasks_resolved"):
1163 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1164
1165 self.init_progress_reporter.next_stage()
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001166
Brad Bishop00e122a2019-10-05 11:10:57 -04001167 bb.parse.siggen.set_setscene_tasks(self.runq_setscene_tids)
1168
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001169 # Iterate over the task list and call into the siggen code
1170 dealtwith = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001171 todeal = set(self.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001172 while len(todeal) > 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001173 for tid in todeal.copy():
1174 if len(self.runtaskentries[tid].depends - dealtwith) == 0:
1175 dealtwith.add(tid)
1176 todeal.remove(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001177 self.prepare_task_hash(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001178
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001179 bb.parse.siggen.writeout_file_checksum_cache()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001180
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001181 #self.dump_data()
1182 return len(self.runtaskentries)
1183
Brad Bishop19323692019-04-05 15:28:33 -04001184 def prepare_task_hash(self, tid):
1185 procdep = []
1186 for dep in self.runtaskentries[tid].depends:
Brad Bishop08902b02019-08-20 09:16:51 -04001187 procdep.append(dep)
1188 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, procdep, self.dataCaches[mc_from_tid(tid)])
1189 self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001190
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001191 def dump_data(self):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001192 """
1193 Dump some debug information on the internal data structures
1194 """
1195 logger.debug(3, "run_tasks:")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001196 for tid in self.runtaskentries:
1197 logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
1198 self.runtaskentries[tid].weight,
1199 self.runtaskentries[tid].depends,
1200 self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001201
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001202class RunQueueWorker():
1203 def __init__(self, process, pipe):
1204 self.process = process
1205 self.pipe = pipe
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001206
1207class RunQueue:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001208 def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001209
1210 self.cooker = cooker
1211 self.cfgData = cfgData
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001212 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001213
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001214 self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
1215 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001216 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001217
1218 self.state = runQueuePrepare
1219
1220 # For disk space monitor
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001221 # Invoked at regular time intervals via the bitbake heartbeat event
1222 # while the build is running. We generate a unique name for the handler
1223 # here, just in case that there ever is more than one RunQueue instance,
Brad Bishop96ff1982019-08-19 13:50:42 -04001224 # start the handler when reaching runQueueSceneInit, and stop it when
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001225 # done with the build.
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001226 self.dm = monitordisk.diskMonitor(cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001227 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
1228 self.dm_event_handler_registered = False
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001229 self.rqexe = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001230 self.worker = {}
1231 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001232
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001233 def _start_worker(self, mc, fakeroot = False, rqexec = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001234 logger.debug(1, "Starting bitbake-worker")
1235 magic = "decafbad"
1236 if self.cooker.configuration.profile:
1237 magic = "decafbadbad"
1238 if fakeroot:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001239 magic = magic + "beef"
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001240 mcdata = self.cooker.databuilder.mcdata[mc]
Brad Bishop19323692019-04-05 15:28:33 -04001241 fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD"))
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001242 fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001243 env = os.environ.copy()
1244 for key, value in (var.split('=') for var in fakerootenv):
1245 env[key] = value
Brad Bishop19323692019-04-05 15:28:33 -04001246 worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001247 else:
1248 worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
1249 bb.utils.nonblockingfd(worker.stdout)
1250 workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
1251
1252 workerdata = {
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001253 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1254 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1255 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1256 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001257 "sigdata" : bb.parse.siggen.get_taskdata(),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001258 "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
1259 "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
1260 "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
1261 "logdefaultdomain" : bb.msg.loggerDefaultDomains,
1262 "prhost" : self.cooker.prhost,
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001263 "buildname" : self.cfgData.getVar("BUILDNAME"),
1264 "date" : self.cfgData.getVar("DATE"),
1265 "time" : self.cfgData.getVar("TIME"),
Brad Bishopa34c0302019-09-23 22:34:48 -04001266 "hashservaddr" : self.cooker.hashservaddr,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001267 }
1268
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001269 worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001270 worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001271 worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001272 worker.stdin.flush()
1273
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001274 return RunQueueWorker(worker, workerpipe)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001275
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001276 def _teardown_worker(self, worker):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001277 if not worker:
1278 return
1279 logger.debug(1, "Teardown for bitbake-worker")
1280 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001281 worker.process.stdin.write(b"<quit></quit>")
1282 worker.process.stdin.flush()
1283 worker.process.stdin.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001284 except IOError:
1285 pass
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001286 while worker.process.returncode is None:
1287 worker.pipe.read()
1288 worker.process.poll()
1289 while worker.pipe.read():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001290 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001291 worker.pipe.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001292
1293 def start_worker(self):
1294 if self.worker:
1295 self.teardown_workers()
1296 self.teardown = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001297 for mc in self.rqdata.dataCaches:
1298 self.worker[mc] = self._start_worker(mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001299
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001300 def start_fakeworker(self, rqexec, mc):
1301 if not mc in self.fakeworker:
1302 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001303
1304 def teardown_workers(self):
1305 self.teardown = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001306 for mc in self.worker:
1307 self._teardown_worker(self.worker[mc])
1308 self.worker = {}
1309 for mc in self.fakeworker:
1310 self._teardown_worker(self.fakeworker[mc])
1311 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001312
1313 def read_workers(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001314 for mc in self.worker:
1315 self.worker[mc].pipe.read()
1316 for mc in self.fakeworker:
1317 self.fakeworker[mc].pipe.read()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001318
1319 def active_fds(self):
1320 fds = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001321 for mc in self.worker:
1322 fds.append(self.worker[mc].pipe.input)
1323 for mc in self.fakeworker:
1324 fds.append(self.fakeworker[mc].pipe.input)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001325 return fds
1326
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001327 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001328 def get_timestamp(f):
1329 try:
1330 if not os.access(f, os.F_OK):
1331 return None
1332 return os.stat(f)[stat.ST_MTIME]
1333 except:
1334 return None
1335
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001336 (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
1337 if taskname is None:
1338 taskname = tn
1339
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001340 if self.stamppolicy == "perfile":
1341 fulldeptree = False
1342 else:
1343 fulldeptree = True
1344 stampwhitelist = []
1345 if self.stamppolicy == "whitelist":
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001346 stampwhitelist = self.rqdata.stampfnwhitelist[mc]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001347
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001348 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001349
1350 # If the stamp is missing, it's not current
1351 if not os.access(stampfile, os.F_OK):
1352 logger.debug(2, "Stampfile %s not available", stampfile)
1353 return False
1354 # If it's a 'nostamp' task, it's not current
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001355 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001356 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1357 logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
1358 return False
1359
1360 if taskname != "do_setscene" and taskname.endswith("_setscene"):
1361 return True
1362
1363 if cache is None:
1364 cache = {}
1365
1366 iscurrent = True
1367 t1 = get_timestamp(stampfile)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001368 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001369 if iscurrent:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001370 (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
1371 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1372 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001373 t2 = get_timestamp(stampfile2)
1374 t3 = get_timestamp(stampfile3)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001375 if t3 and not t2:
1376 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001377 if t3 and t3 > t2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001378 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001379 if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
1380 if not t2:
1381 logger.debug(2, 'Stampfile %s does not exist', stampfile2)
1382 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001383 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001384 if t1 < t2:
1385 logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
1386 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001387 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001388 if recurse and iscurrent:
1389 if dep in cache:
1390 iscurrent = cache[dep]
1391 if not iscurrent:
1392 logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
1393 else:
1394 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1395 cache[dep] = iscurrent
1396 if recurse:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001397 cache[tid] = iscurrent
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001398 return iscurrent
1399
Brad Bishopa34c0302019-09-23 22:34:48 -04001400 def validate_hashes(self, tocheck, data, currentcount=0, siginfo=False):
Brad Bishop96ff1982019-08-19 13:50:42 -04001401 valid = set()
1402 if self.hashvalidate:
Brad Bishop08902b02019-08-20 09:16:51 -04001403 sq_data = {}
1404 sq_data['hash'] = {}
1405 sq_data['hashfn'] = {}
1406 sq_data['unihash'] = {}
Brad Bishop96ff1982019-08-19 13:50:42 -04001407 for tid in tocheck:
1408 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04001409 sq_data['hash'][tid] = self.rqdata.runtaskentries[tid].hash
1410 sq_data['hashfn'][tid] = self.rqdata.dataCaches[mc].hashfn[taskfn]
1411 sq_data['unihash'][tid] = self.rqdata.runtaskentries[tid].unihash
Brad Bishop96ff1982019-08-19 13:50:42 -04001412
Brad Bishop08902b02019-08-20 09:16:51 -04001413 valid = self.validate_hash(sq_data, data, siginfo, currentcount)
Brad Bishop96ff1982019-08-19 13:50:42 -04001414
1415 return valid
1416
Brad Bishop08902b02019-08-20 09:16:51 -04001417 def validate_hash(self, sq_data, d, siginfo, currentcount):
1418 locs = {"sq_data" : sq_data, "d" : d, "siginfo" : siginfo, "currentcount" : currentcount}
Brad Bishop19323692019-04-05 15:28:33 -04001419
Brad Bishop08902b02019-08-20 09:16:51 -04001420 # Metadata has **kwargs so args can be added, sq_data can also gain new fields
1421 call = self.hashvalidate + "(sq_data, d, siginfo=siginfo, currentcount=currentcount)"
Brad Bishop19323692019-04-05 15:28:33 -04001422
Brad Bishop19323692019-04-05 15:28:33 -04001423 return bb.utils.better_eval(call, locs)
1424
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001425 def _execute_runqueue(self):
1426 """
1427 Run the tasks in a queue prepared by rqdata.prepare()
1428 Upon failure, optionally try to recover the build using any alternate providers
1429 (if the abort on failure configuration option isn't set)
1430 """
1431
1432 retval = True
1433
1434 if self.state is runQueuePrepare:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001435 # NOTE: if you add, remove or significantly refactor the stages of this
1436 # process then you should recalculate the weightings here. This is quite
1437 # easy to do - just change the next line temporarily to pass debug=True as
1438 # the last parameter and you'll get a printout of the weightings as well
1439 # as a map to the lines where next_stage() was called. Of course this isn't
1440 # critical, but it helps to keep the progress reporting accurate.
1441 self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
1442 "Initialising tasks",
1443 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001444 if self.rqdata.prepare() == 0:
1445 self.state = runQueueComplete
1446 else:
1447 self.state = runQueueSceneInit
Brad Bishop00e122a2019-10-05 11:10:57 -04001448 bb.parse.siggen.save_unitaskhashes()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001449
1450 if self.state is runQueueSceneInit:
Brad Bishop96ff1982019-08-19 13:50:42 -04001451 self.rqdata.init_progress_reporter.next_stage()
1452
1453 # we are ready to run, emit dependency info to any UI or class which
1454 # needs it
1455 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1456 self.rqdata.init_progress_reporter.next_stage()
1457 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
1458
Brad Bishope2d5b612018-11-23 10:55:50 +13001459 if not self.dm_event_handler_registered:
1460 res = bb.event.register(self.dm_event_handler_name,
Brad Bishop96ff1982019-08-19 13:50:42 -04001461 lambda x: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False,
Brad Bishope2d5b612018-11-23 10:55:50 +13001462 ('bb.event.HeartbeatEvent',))
1463 self.dm_event_handler_registered = True
1464
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001465 dump = self.cooker.configuration.dump_signatures
1466 if dump:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001467 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001468 if 'printdiff' in dump:
1469 invalidtasks = self.print_diffscenetasks()
1470 self.dump_signatures(dump)
1471 if 'printdiff' in dump:
1472 self.write_diffscenetasks(invalidtasks)
1473 self.state = runQueueComplete
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001474
Brad Bishop96ff1982019-08-19 13:50:42 -04001475 if self.state is runQueueSceneInit:
1476 self.rqdata.init_progress_reporter.next_stage()
1477 self.start_worker()
1478 self.rqdata.init_progress_reporter.next_stage()
1479 self.rqexe = RunQueueExecute(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001480
Brad Bishop96ff1982019-08-19 13:50:42 -04001481 # If we don't have any setscene functions, skip execution
1482 if len(self.rqdata.runq_setscene_tids) == 0:
1483 logger.info('No setscene tasks')
1484 for tid in self.rqdata.runtaskentries:
1485 if len(self.rqdata.runtaskentries[tid].depends) == 0:
1486 self.rqexe.setbuildable(tid)
1487 self.rqexe.tasks_notcovered.add(tid)
1488 self.rqexe.sqdone = True
1489 logger.info('Executing Tasks')
1490 self.state = runQueueRunning
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001491
1492 if self.state is runQueueRunning:
1493 retval = self.rqexe.execute()
1494
1495 if self.state is runQueueCleanUp:
1496 retval = self.rqexe.finish()
1497
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001498 build_done = self.state is runQueueComplete or self.state is runQueueFailed
1499
1500 if build_done and self.dm_event_handler_registered:
1501 bb.event.remove(self.dm_event_handler_name, None)
1502 self.dm_event_handler_registered = False
1503
1504 if build_done and self.rqexe:
Brad Bishop08902b02019-08-20 09:16:51 -04001505 bb.parse.siggen.save_unitaskhashes()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001506 self.teardown_workers()
Brad Bishop96ff1982019-08-19 13:50:42 -04001507 if self.rqexe:
1508 if self.rqexe.stats.failed:
1509 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
1510 else:
1511 # Let's avoid the word "failed" if nothing actually did
1512 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001513
1514 if self.state is runQueueFailed:
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001515 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001516
1517 if self.state is runQueueComplete:
1518 # All done
1519 return False
1520
1521 # Loop
1522 return retval
1523
1524 def execute_runqueue(self):
1525 # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
1526 try:
1527 return self._execute_runqueue()
1528 except bb.runqueue.TaskFailure:
1529 raise
1530 except SystemExit:
1531 raise
1532 except bb.BBHandledException:
1533 try:
1534 self.teardown_workers()
1535 except:
1536 pass
1537 self.state = runQueueComplete
1538 raise
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001539 except Exception as err:
1540 logger.exception("An uncaught exception occurred in runqueue")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001541 try:
1542 self.teardown_workers()
1543 except:
1544 pass
1545 self.state = runQueueComplete
1546 raise
1547
1548 def finish_runqueue(self, now = False):
1549 if not self.rqexe:
1550 self.state = runQueueComplete
1551 return
1552
1553 if now:
1554 self.rqexe.finish_now()
1555 else:
1556 self.rqexe.finish()
1557
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001558 def rq_dump_sigfn(self, fn, options):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001559 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001560 the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
1561 siggen = bb.parse.siggen
1562 dataCaches = self.rqdata.dataCaches
1563 siggen.dump_sigfn(fn, dataCaches, options)
1564
1565 def dump_signatures(self, options):
1566 fns = set()
1567 bb.note("Reparsing files to collect dependency data")
1568
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001569 for tid in self.rqdata.runtaskentries:
1570 fn = fn_from_tid(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001571 fns.add(fn)
1572
1573 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1574 # We cannot use the real multiprocessing.Pool easily due to some local data
1575 # that can't be pickled. This is a cheap multi-process solution.
1576 launched = []
1577 while fns:
1578 if len(launched) < max_process:
1579 p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
1580 p.start()
1581 launched.append(p)
1582 for q in launched:
1583 # The finished processes are joined when calling is_alive()
1584 if not q.is_alive():
1585 launched.remove(q)
1586 for p in launched:
1587 p.join()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001588
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001589 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001590
1591 return
1592
1593 def print_diffscenetasks(self):
1594
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001595 noexec = []
Brad Bishop96ff1982019-08-19 13:50:42 -04001596 tocheck = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001597
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001598 for tid in self.rqdata.runtaskentries:
1599 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1600 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001601
1602 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001603 noexec.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001604 continue
1605
Brad Bishop96ff1982019-08-19 13:50:42 -04001606 tocheck.add(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001607
Brad Bishopa34c0302019-09-23 22:34:48 -04001608 valid_new = self.validate_hashes(tocheck, self.cooker.data, 0, True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001609
1610 # Tasks which are both setscene and noexec never care about dependencies
1611 # We therefore find tasks which are setscene and noexec and mark their
1612 # unique dependencies as valid.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001613 for tid in noexec:
1614 if tid not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001615 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001616 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001617 hasnoexecparents = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001618 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1619 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001620 continue
1621 hasnoexecparents = False
1622 break
1623 if hasnoexecparents:
1624 valid_new.add(dep)
1625
1626 invalidtasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001627 for tid in self.rqdata.runtaskentries:
1628 if tid not in valid_new and tid not in noexec:
1629 invalidtasks.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001630
1631 found = set()
1632 processed = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001633 for tid in invalidtasks:
1634 toprocess = set([tid])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001635 while toprocess:
1636 next = set()
1637 for t in toprocess:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001638 for dep in self.rqdata.runtaskentries[t].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001639 if dep in invalidtasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001640 found.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001641 if dep not in processed:
1642 processed.add(dep)
1643 next.add(dep)
1644 toprocess = next
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001645 if tid in found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001646 toprocess = set()
1647
1648 tasklist = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001649 for tid in invalidtasks.difference(found):
1650 tasklist.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001651
1652 if tasklist:
1653 bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
1654
1655 return invalidtasks.difference(found)
1656
1657 def write_diffscenetasks(self, invalidtasks):
1658
1659 # Define recursion callback
1660 def recursecb(key, hash1, hash2):
1661 hashes = [hash1, hash2]
1662 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1663
1664 recout = []
1665 if len(hashfiles) == 2:
1666 out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
Brad Bishopc342db32019-05-15 21:57:59 -04001667 recout.extend(list(' ' + l for l in out2))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001668 else:
1669 recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
1670
1671 return recout
1672
1673
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001674 for tid in invalidtasks:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001675 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1676 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001677 h = self.rqdata.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001678 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
1679 match = None
1680 for m in matches:
1681 if h in m:
1682 match = m
1683 if match is None:
1684 bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001685 matches = {k : v for k, v in iter(matches.items()) if h not in k}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001686 if matches:
1687 latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
Brad Bishop19323692019-04-05 15:28:33 -04001688 prevh = __find_sha256__.search(latestmatch).group(0)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001689 output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
1690 bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
1691
Brad Bishop96ff1982019-08-19 13:50:42 -04001692
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001693class RunQueueExecute:
1694
1695 def __init__(self, rq):
1696 self.rq = rq
1697 self.cooker = rq.cooker
1698 self.cfgData = rq.cfgData
1699 self.rqdata = rq.rqdata
1700
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001701 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
1702 self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001703
Brad Bishop96ff1982019-08-19 13:50:42 -04001704 self.sq_buildable = set()
1705 self.sq_running = set()
1706 self.sq_live = set()
1707
Brad Bishop08902b02019-08-20 09:16:51 -04001708 self.updated_taskhash_queue = []
1709 self.pending_migrations = set()
1710
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001711 self.runq_buildable = set()
1712 self.runq_running = set()
1713 self.runq_complete = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001714
1715 self.build_stamps = {}
1716 self.build_stamps2 = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001717 self.failed_tids = []
Brad Bishop96ff1982019-08-19 13:50:42 -04001718 self.sq_deferred = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001719
1720 self.stampcache = {}
1721
Brad Bishop08902b02019-08-20 09:16:51 -04001722 self.holdoff_tasks = set()
Brad Bishopc68388fc2019-08-26 01:33:31 -04001723 self.holdoff_need_update = True
Brad Bishop96ff1982019-08-19 13:50:42 -04001724 self.sqdone = False
1725
1726 self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
1727 self.sq_stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
1728
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001729 for mc in rq.worker:
1730 rq.worker[mc].pipe.setrunqueueexec(self)
1731 for mc in rq.fakeworker:
1732 rq.fakeworker[mc].pipe.setrunqueueexec(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001733
1734 if self.number_tasks <= 0:
1735 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1736
Brad Bishop96ff1982019-08-19 13:50:42 -04001737 # List of setscene tasks which we've covered
1738 self.scenequeue_covered = set()
1739 # List of tasks which are covered (including setscene ones)
1740 self.tasks_covered = set()
1741 self.tasks_scenequeue_done = set()
1742 self.scenequeue_notcovered = set()
1743 self.tasks_notcovered = set()
1744 self.scenequeue_notneeded = set()
1745
Brad Bishop08902b02019-08-20 09:16:51 -04001746 # We can't skip specified target tasks which aren't setscene tasks
1747 self.cantskip = set(self.rqdata.target_tids)
1748 self.cantskip.difference_update(self.rqdata.runq_setscene_tids)
1749 self.cantskip.intersection_update(self.rqdata.runtaskentries)
Brad Bishop96ff1982019-08-19 13:50:42 -04001750
1751 schedulers = self.get_schedulers()
1752 for scheduler in schedulers:
1753 if self.scheduler == scheduler.name:
1754 self.sched = scheduler(self, self.rqdata)
1755 logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
1756 break
1757 else:
1758 bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
1759 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1760
Brad Bishop08902b02019-08-20 09:16:51 -04001761 #if len(self.rqdata.runq_setscene_tids) > 0:
1762 self.sqdata = SQData()
1763 build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
Brad Bishop96ff1982019-08-19 13:50:42 -04001764
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001765 def runqueue_process_waitpid(self, task, status):
1766
1767 # self.build_stamps[pid] may not exist when use shared work directory.
1768 if task in self.build_stamps:
1769 self.build_stamps2.remove(self.build_stamps[task])
1770 del self.build_stamps[task]
1771
Brad Bishop96ff1982019-08-19 13:50:42 -04001772 if task in self.sq_live:
1773 if status != 0:
1774 self.sq_task_fail(task, status)
1775 else:
1776 self.sq_task_complete(task)
1777 self.sq_live.remove(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001778 else:
Brad Bishop96ff1982019-08-19 13:50:42 -04001779 if status != 0:
1780 self.task_fail(task, status)
1781 else:
1782 self.task_complete(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001783 return True
1784
1785 def finish_now(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001786 for mc in self.rq.worker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001787 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001788 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1789 self.rq.worker[mc].process.stdin.flush()
1790 except IOError:
1791 # worker must have died?
1792 pass
1793 for mc in self.rq.fakeworker:
1794 try:
1795 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1796 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001797 except IOError:
1798 # worker must have died?
1799 pass
1800
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001801 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001802 self.rq.state = runQueueFailed
1803 return
1804
1805 self.rq.state = runQueueComplete
1806 return
1807
1808 def finish(self):
1809 self.rq.state = runQueueCleanUp
1810
Brad Bishop96ff1982019-08-19 13:50:42 -04001811 active = self.stats.active + self.sq_stats.active
1812 if active > 0:
1813 bb.event.fire(runQueueExitWait(active), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001814 self.rq.read_workers()
1815 return self.rq.active_fds()
1816
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001817 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001818 self.rq.state = runQueueFailed
1819 return True
1820
1821 self.rq.state = runQueueComplete
1822 return True
1823
Brad Bishop96ff1982019-08-19 13:50:42 -04001824 # Used by setscene only
1825 def check_dependencies(self, task, taskdeps):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001826 if not self.rq.depvalidate:
1827 return False
1828
Brad Bishop08902b02019-08-20 09:16:51 -04001829 # Must not edit parent data
1830 taskdeps = set(taskdeps)
1831
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001832 taskdata = {}
1833 taskdeps.add(task)
1834 for dep in taskdeps:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001835 (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
1836 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001837 taskdata[dep] = [pn, taskname, fn]
1838 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001839 locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001840 valid = bb.utils.better_eval(call, locs)
1841 return valid
1842
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001843 def can_start_task(self):
Brad Bishop96ff1982019-08-19 13:50:42 -04001844 active = self.stats.active + self.sq_stats.active
1845 can_start = active < self.number_tasks
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001846 return can_start
1847
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001848 def get_schedulers(self):
1849 schedulers = set(obj for obj in globals().values()
1850 if type(obj) is type and
1851 issubclass(obj, RunQueueScheduler))
1852
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001853 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001854 if user_schedulers:
1855 for sched in user_schedulers.split():
1856 if not "." in sched:
1857 bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
1858 continue
1859
1860 modname, name = sched.rsplit(".", 1)
1861 try:
1862 module = __import__(modname, fromlist=(name,))
1863 except ImportError as exc:
1864 logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
1865 raise SystemExit(1)
1866 else:
1867 schedulers.add(getattr(module, name))
1868 return schedulers
1869
1870 def setbuildable(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001871 self.runq_buildable.add(task)
Brad Bishop316dfdd2018-06-25 12:45:53 -04001872 self.sched.newbuildable(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001873
1874 def task_completeoutright(self, task):
1875 """
1876 Mark a task as completed
1877 Look at the reverse dependencies and mark any task with
1878 completed dependencies as buildable
1879 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001880 self.runq_complete.add(task)
1881 for revdep in self.rqdata.runtaskentries[task].revdeps:
1882 if revdep in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001883 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001884 if revdep in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001885 continue
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001886 alldeps = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001887 for dep in self.rqdata.runtaskentries[revdep].depends:
1888 if dep not in self.runq_complete:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001889 alldeps = False
1890 break
1891 if alldeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001892 self.setbuildable(revdep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001893 logger.debug(1, "Marking task %s as buildable", revdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001894
1895 def task_complete(self, task):
1896 self.stats.taskCompleted()
1897 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1898 self.task_completeoutright(task)
1899
1900 def task_fail(self, task, exitcode):
1901 """
1902 Called when a task has failed
1903 Updates the state engine with the failure
1904 """
1905 self.stats.taskFailed()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001906 self.failed_tids.append(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001907 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001908 if self.rqdata.taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001909 self.rq.state = runQueueCleanUp
1910
1911 def task_skip(self, task, reason):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001912 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001913 self.setbuildable(task)
1914 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
1915 self.task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001916 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001917 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001918
Brad Bishop08902b02019-08-20 09:16:51 -04001919 def summarise_scenequeue_errors(self):
1920 err = False
1921 if not self.sqdone:
1922 logger.debug(1, 'We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered)))
1923 completeevent = sceneQueueComplete(self.sq_stats, self.rq)
1924 bb.event.fire(completeevent, self.cfgData)
1925 if self.sq_deferred:
1926 logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred))
1927 err = True
1928 if self.updated_taskhash_queue:
1929 logger.error("Scenequeue had unprocessed changed taskhash entries: %s" % pprint.pformat(self.updated_taskhash_queue))
1930 err = True
1931 if self.holdoff_tasks:
1932 logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks))
1933 err = True
1934
1935 for tid in self.rqdata.runq_setscene_tids:
1936 if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
1937 err = True
1938 logger.error("Setscene Task %s was never marked as covered or not covered" % tid)
1939 if tid not in self.sq_buildable:
1940 err = True
1941 logger.error("Setscene Task %s was never marked as buildable" % tid)
1942 if tid not in self.sq_running:
1943 err = True
1944 logger.error("Setscene Task %s was never marked as running" % tid)
1945
1946 for x in self.rqdata.runtaskentries:
1947 if x not in self.tasks_covered and x not in self.tasks_notcovered:
1948 logger.error("Task %s was never moved from the setscene queue" % x)
1949 err = True
1950 if x not in self.tasks_scenequeue_done:
1951 logger.error("Task %s was never processed by the setscene code" % x)
1952 err = True
1953 if len(self.rqdata.runtaskentries[x].depends) == 0 and x not in self.runq_buildable:
1954 logger.error("Task %s was never marked as buildable by the setscene code" % x)
1955 err = True
1956 return err
1957
1958
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001959 def execute(self):
1960 """
Brad Bishop96ff1982019-08-19 13:50:42 -04001961 Run the tasks in a queue prepared by prepare_runqueue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001962 """
1963
1964 self.rq.read_workers()
Brad Bishop08902b02019-08-20 09:16:51 -04001965 self.process_possible_migrations()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001966
Brad Bishop96ff1982019-08-19 13:50:42 -04001967 task = None
1968 if not self.sqdone and self.can_start_task():
1969 # Find the next setscene to run
Brad Bishop08902b02019-08-20 09:16:51 -04001970 for nexttask in sorted(self.rqdata.runq_setscene_tids):
Brad Bishop96ff1982019-08-19 13:50:42 -04001971 if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values():
1972 if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
1973 if nexttask not in self.rqdata.target_tids:
1974 logger.debug(2, "Skipping setscene for task %s" % nexttask)
1975 self.sq_task_skip(nexttask)
1976 self.scenequeue_notneeded.add(nexttask)
1977 if nexttask in self.sq_deferred:
1978 del self.sq_deferred[nexttask]
1979 return True
Brad Bishop08902b02019-08-20 09:16:51 -04001980 # If covered tasks are running, need to wait for them to complete
1981 for t in self.sqdata.sq_covered_tasks[nexttask]:
1982 if t in self.runq_running and t not in self.runq_complete:
1983 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04001984 if nexttask in self.sq_deferred:
1985 if self.sq_deferred[nexttask] not in self.runq_complete:
1986 continue
1987 logger.debug(1, "Task %s no longer deferred" % nexttask)
1988 del self.sq_deferred[nexttask]
Brad Bishopa34c0302019-09-23 22:34:48 -04001989 valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False)
Brad Bishop96ff1982019-08-19 13:50:42 -04001990 if not valid:
1991 logger.debug(1, "%s didn't become valid, skipping setscene" % nexttask)
1992 self.sq_task_failoutright(nexttask)
1993 return True
1994 else:
1995 self.sqdata.outrightfail.remove(nexttask)
1996 if nexttask in self.sqdata.outrightfail:
1997 logger.debug(2, 'No package found, so skipping setscene task %s', nexttask)
1998 self.sq_task_failoutright(nexttask)
1999 return True
2000 if nexttask in self.sqdata.unskippable:
2001 logger.debug(2, "Setscene task %s is unskippable" % nexttask)
2002 task = nexttask
2003 break
2004 if task is not None:
2005 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
2006 taskname = taskname + "_setscene"
2007 if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
2008 logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
2009 self.sq_task_failoutright(task)
2010 return True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002011
Brad Bishop96ff1982019-08-19 13:50:42 -04002012 if self.cooker.configuration.force:
2013 if task in self.rqdata.target_tids:
2014 self.sq_task_failoutright(task)
2015 return True
2016
2017 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
2018 logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
2019 self.sq_task_skip(task)
2020 return True
2021
2022 if self.cooker.configuration.skipsetscene:
2023 logger.debug(2, 'No setscene tasks should be executed. Skipping %s', task)
2024 self.sq_task_failoutright(task)
2025 return True
2026
2027 startevent = sceneQueueTaskStarted(task, self.sq_stats, self.rq)
2028 bb.event.fire(startevent, self.cfgData)
2029
2030 taskdepdata = self.sq_build_taskdepdata(task)
2031
2032 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2033 taskhash = self.rqdata.get_task_hash(task)
2034 unihash = self.rqdata.get_task_unihash(task)
2035 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
2036 if not mc in self.rq.fakeworker:
2037 self.rq.start_fakeworker(self, mc)
2038 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
2039 self.rq.fakeworker[mc].process.stdin.flush()
2040 else:
2041 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
2042 self.rq.worker[mc].process.stdin.flush()
2043
2044 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2045 self.build_stamps2.append(self.build_stamps[task])
2046 self.sq_running.add(task)
2047 self.sq_live.add(task)
2048 self.sq_stats.taskActive()
2049 if self.can_start_task():
2050 return True
2051
Brad Bishopc68388fc2019-08-26 01:33:31 -04002052 self.update_holdofftasks()
2053
Brad Bishop08902b02019-08-20 09:16:51 -04002054 if not self.sq_live and not self.sqdone and not self.sq_deferred and not self.updated_taskhash_queue and not self.holdoff_tasks:
Brad Bishop96ff1982019-08-19 13:50:42 -04002055 logger.info("Setscene tasks completed")
Brad Bishop96ff1982019-08-19 13:50:42 -04002056
Brad Bishop08902b02019-08-20 09:16:51 -04002057 err = self.summarise_scenequeue_errors()
Brad Bishop96ff1982019-08-19 13:50:42 -04002058 if err:
2059 self.rq.state = runQueueFailed
2060 return True
2061
2062 if self.cooker.configuration.setsceneonly:
2063 self.rq.state = runQueueComplete
2064 return True
2065 self.sqdone = True
2066
2067 if self.stats.total == 0:
2068 # nothing to do
2069 self.rq.state = runQueueComplete
2070 return True
2071
2072 if self.cooker.configuration.setsceneonly:
2073 task = None
2074 else:
2075 task = self.sched.next()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002076 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002077 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002078
Brad Bishop96ff1982019-08-19 13:50:42 -04002079 if self.rqdata.setscenewhitelist is not None:
2080 if self.check_setscenewhitelist(task):
2081 self.task_fail(task, "setscene whitelist")
2082 return True
2083
2084 if task in self.tasks_covered:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002085 logger.debug(2, "Setscene covered task %s", task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002086 self.task_skip(task, "covered")
2087 return True
2088
2089 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002090 logger.debug(2, "Stamp current task %s", task)
2091
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002092 self.task_skip(task, "existing")
2093 return True
2094
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002095 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002096 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2097 startevent = runQueueTaskStarted(task, self.stats, self.rq,
2098 noexec=True)
2099 bb.event.fire(startevent, self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002100 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002101 self.stats.taskActive()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002102 if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002103 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002104 self.task_complete(task)
2105 return True
2106 else:
2107 startevent = runQueueTaskStarted(task, self.stats, self.rq)
2108 bb.event.fire(startevent, self.cfgData)
2109
2110 taskdepdata = self.build_taskdepdata(task)
2111
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002112 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop19323692019-04-05 15:28:33 -04002113 taskhash = self.rqdata.get_task_hash(task)
2114 unihash = self.rqdata.get_task_unihash(task)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002115 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002116 if not mc in self.rq.fakeworker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002117 try:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002118 self.rq.start_fakeworker(self, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002119 except OSError as exc:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002120 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002121 self.rq.state = runQueueFailed
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002122 self.stats.taskFailed()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002123 return True
Brad Bishop19323692019-04-05 15:28:33 -04002124 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002125 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002126 else:
Brad Bishop19323692019-04-05 15:28:33 -04002127 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002128 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002129
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002130 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2131 self.build_stamps2.append(self.build_stamps[task])
2132 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002133 self.stats.taskActive()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002134 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002135 return True
2136
Brad Bishop96ff1982019-08-19 13:50:42 -04002137 if self.stats.active > 0 or self.sq_stats.active > 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002138 self.rq.read_workers()
2139 return self.rq.active_fds()
2140
Brad Bishop96ff1982019-08-19 13:50:42 -04002141 # No more tasks can be run. If we have deferred setscene tasks we should run them.
2142 if self.sq_deferred:
2143 tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0])
2144 logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid)
2145 self.sq_task_failoutright(tid)
2146 return True
2147
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002148 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002149 self.rq.state = runQueueFailed
2150 return True
2151
2152 # Sanity Checks
Brad Bishop08902b02019-08-20 09:16:51 -04002153 err = self.summarise_scenequeue_errors()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002154 for task in self.rqdata.runtaskentries:
2155 if task not in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002156 logger.error("Task %s never buildable!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002157 err = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002158 elif task not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002159 logger.error("Task %s never ran!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002160 err = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002161 elif task not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002162 logger.error("Task %s never completed!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002163 err = True
2164
2165 if err:
2166 self.rq.state = runQueueFailed
2167 else:
2168 self.rq.state = runQueueComplete
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002169
2170 return True
2171
Brad Bishopc68388fc2019-08-26 01:33:31 -04002172 def filtermcdeps(self, task, mc, deps):
Andrew Geissler99467da2019-02-25 18:54:23 -06002173 ret = set()
Andrew Geissler99467da2019-02-25 18:54:23 -06002174 for dep in deps:
Brad Bishopc68388fc2019-08-26 01:33:31 -04002175 thismc = mc_from_tid(dep)
2176 if thismc != mc:
Andrew Geissler99467da2019-02-25 18:54:23 -06002177 continue
2178 ret.add(dep)
2179 return ret
2180
Brad Bishopa34c0302019-09-23 22:34:48 -04002181 # We filter out multiconfig dependencies from taskdepdata we pass to the tasks
Andrew Geissler99467da2019-02-25 18:54:23 -06002182 # as most code can't handle them
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002183 def build_taskdepdata(self, task):
2184 taskdepdata = {}
Brad Bishopc68388fc2019-08-26 01:33:31 -04002185 mc = mc_from_tid(task)
Brad Bishop08902b02019-08-20 09:16:51 -04002186 next = self.rqdata.runtaskentries[task].depends.copy()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002187 next.add(task)
Brad Bishopc68388fc2019-08-26 01:33:31 -04002188 next = self.filtermcdeps(task, mc, next)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002189 while next:
2190 additional = []
2191 for revdep in next:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002192 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2193 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2194 deps = self.rqdata.runtaskentries[revdep].depends
2195 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002196 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002197 unihash = self.rqdata.runtaskentries[revdep].unihash
Brad Bishopc68388fc2019-08-26 01:33:31 -04002198 deps = self.filtermcdeps(task, mc, deps)
Brad Bishop19323692019-04-05 15:28:33 -04002199 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002200 for revdep2 in deps:
2201 if revdep2 not in taskdepdata:
2202 additional.append(revdep2)
2203 next = additional
2204
2205 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2206 return taskdepdata
2207
Brad Bishop08902b02019-08-20 09:16:51 -04002208 def update_holdofftasks(self):
Brad Bishopc68388fc2019-08-26 01:33:31 -04002209
2210 if not self.holdoff_need_update:
2211 return
2212
2213 notcovered = set(self.scenequeue_notcovered)
2214 notcovered |= self.cantskip
2215 for tid in self.scenequeue_notcovered:
2216 notcovered |= self.sqdata.sq_covered_tasks[tid]
2217 notcovered |= self.sqdata.unskippable.difference(self.rqdata.runq_setscene_tids)
2218 notcovered.intersection_update(self.tasks_scenequeue_done)
2219
2220 covered = set(self.scenequeue_covered)
2221 for tid in self.scenequeue_covered:
2222 covered |= self.sqdata.sq_covered_tasks[tid]
2223 covered.difference_update(notcovered)
2224 covered.intersection_update(self.tasks_scenequeue_done)
2225
2226 for tid in notcovered | covered:
2227 if len(self.rqdata.runtaskentries[tid].depends) == 0:
2228 self.setbuildable(tid)
2229 elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete):
2230 self.setbuildable(tid)
2231
2232 self.tasks_covered = covered
2233 self.tasks_notcovered = notcovered
2234
Brad Bishop08902b02019-08-20 09:16:51 -04002235 self.holdoff_tasks = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002236
Brad Bishop08902b02019-08-20 09:16:51 -04002237 for tid in self.rqdata.runq_setscene_tids:
2238 if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
2239 self.holdoff_tasks.add(tid)
2240
2241 for tid in self.holdoff_tasks.copy():
2242 for dep in self.sqdata.sq_covered_tasks[tid]:
2243 if dep not in self.runq_complete:
2244 self.holdoff_tasks.add(dep)
2245
Brad Bishopc68388fc2019-08-26 01:33:31 -04002246 self.holdoff_need_update = False
2247
Brad Bishop08902b02019-08-20 09:16:51 -04002248 def process_possible_migrations(self):
2249
2250 changed = set()
2251 for tid, unihash in self.updated_taskhash_queue.copy():
2252 if tid in self.runq_running and tid not in self.runq_complete:
2253 continue
2254
2255 self.updated_taskhash_queue.remove((tid, unihash))
2256
2257 if unihash != self.rqdata.runtaskentries[tid].unihash:
2258 logger.info("Task %s unihash changed to %s" % (tid, unihash))
2259 self.rqdata.runtaskentries[tid].unihash = unihash
2260 bb.parse.siggen.set_unihash(tid, unihash)
2261
2262 # Work out all tasks which depend on this one
2263 total = set()
2264 next = set(self.rqdata.runtaskentries[tid].revdeps)
2265 while next:
2266 current = next.copy()
2267 total = total |next
2268 next = set()
2269 for ntid in current:
2270 next |= self.rqdata.runtaskentries[ntid].revdeps
2271 next.difference_update(total)
2272
2273 # Now iterate those tasks in dependency order to regenerate their taskhash/unihash
2274 done = set()
2275 next = set(self.rqdata.runtaskentries[tid].revdeps)
2276 while next:
2277 current = next.copy()
2278 next = set()
2279 for tid in current:
2280 if not self.rqdata.runtaskentries[tid].depends.isdisjoint(total):
2281 continue
2282 procdep = []
2283 for dep in self.rqdata.runtaskentries[tid].depends:
2284 procdep.append(dep)
2285 orighash = self.rqdata.runtaskentries[tid].hash
2286 self.rqdata.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, procdep, self.rqdata.dataCaches[mc_from_tid(tid)])
2287 origuni = self.rqdata.runtaskentries[tid].unihash
2288 self.rqdata.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
2289 logger.debug(1, "Task %s hash changes: %s->%s %s->%s" % (tid, orighash, self.rqdata.runtaskentries[tid].hash, origuni, self.rqdata.runtaskentries[tid].unihash))
2290 next |= self.rqdata.runtaskentries[tid].revdeps
2291 changed.add(tid)
2292 total.remove(tid)
2293 next.intersection_update(total)
2294
2295 if changed:
2296 for mc in self.rq.worker:
2297 self.rq.worker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
2298 for mc in self.rq.fakeworker:
2299 self.rq.fakeworker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
2300
2301 logger.debug(1, pprint.pformat("Tasks changed:\n%s" % (changed)))
2302
2303 for tid in changed:
2304 if tid not in self.rqdata.runq_setscene_tids:
2305 continue
Brad Bishop08902b02019-08-20 09:16:51 -04002306 if tid in self.runq_running:
2307 continue
Brad Bishop00e122a2019-10-05 11:10:57 -04002308 if tid in self.scenequeue_covered:
2309 # Potentially risky, should we report this hash as a match?
2310 logger.info("Already covered setscene for %s so ignoring rehash" % (tid))
2311 continue
Brad Bishop08902b02019-08-20 09:16:51 -04002312 if tid not in self.pending_migrations:
2313 self.pending_migrations.add(tid)
2314
2315 for tid in self.pending_migrations.copy():
2316 valid = True
2317 # Check no tasks this covers are running
2318 for dep in self.sqdata.sq_covered_tasks[tid]:
2319 if dep in self.runq_running and dep not in self.runq_complete:
2320 logger.debug(2, "Task %s is running which blocks setscene for %s from running" % (dep, tid))
2321 valid = False
2322 break
2323 if not valid:
2324 continue
2325
2326 self.pending_migrations.remove(tid)
Brad Bishopc68388fc2019-08-26 01:33:31 -04002327 changed = True
Brad Bishop08902b02019-08-20 09:16:51 -04002328
2329 if tid in self.tasks_scenequeue_done:
2330 self.tasks_scenequeue_done.remove(tid)
2331 for dep in self.sqdata.sq_covered_tasks[tid]:
2332 if dep not in self.runq_complete:
2333 if dep in self.tasks_scenequeue_done and dep not in self.sqdata.unskippable:
2334 self.tasks_scenequeue_done.remove(dep)
2335
2336 if tid in self.sq_buildable:
2337 self.sq_buildable.remove(tid)
2338 if tid in self.sq_running:
2339 self.sq_running.remove(tid)
2340 if self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
2341 if tid not in self.sq_buildable:
2342 self.sq_buildable.add(tid)
2343 if len(self.sqdata.sq_revdeps[tid]) == 0:
2344 self.sq_buildable.add(tid)
2345
2346 if tid in self.sqdata.outrightfail:
2347 self.sqdata.outrightfail.remove(tid)
2348 if tid in self.scenequeue_notcovered:
2349 self.scenequeue_notcovered.remove(tid)
2350 if tid in self.scenequeue_covered:
2351 self.scenequeue_covered.remove(tid)
2352 if tid in self.scenequeue_notneeded:
2353 self.scenequeue_notneeded.remove(tid)
2354
2355 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2356 self.sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
2357
2358 if tid in self.stampcache:
2359 del self.stampcache[tid]
2360
2361 if tid in self.build_stamps:
2362 del self.build_stamps[tid]
2363
2364 logger.info("Setscene task %s now valid and being rerun" % tid)
2365 self.sqdone = False
Brad Bishop00e122a2019-10-05 11:10:57 -04002366 update_scenequeue_data([tid], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
Brad Bishop08902b02019-08-20 09:16:51 -04002367
2368 if changed:
Brad Bishopc68388fc2019-08-26 01:33:31 -04002369 self.holdoff_need_update = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002370
Brad Bishop96ff1982019-08-19 13:50:42 -04002371 def scenequeue_updatecounters(self, task, fail=False):
Brad Bishop08902b02019-08-20 09:16:51 -04002372
2373 for dep in sorted(self.sqdata.sq_deps[task]):
Brad Bishop96ff1982019-08-19 13:50:42 -04002374 if fail and task in self.sqdata.sq_harddeps and dep in self.sqdata.sq_harddeps[task]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002375 logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
Brad Bishop96ff1982019-08-19 13:50:42 -04002376 self.sq_task_failoutright(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002377 continue
Brad Bishop08902b02019-08-20 09:16:51 -04002378 if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
2379 if dep not in self.sq_buildable:
2380 self.sq_buildable.add(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002381
Brad Bishop96ff1982019-08-19 13:50:42 -04002382 next = set([task])
2383 while next:
2384 new = set()
Brad Bishop08902b02019-08-20 09:16:51 -04002385 for t in sorted(next):
Brad Bishop96ff1982019-08-19 13:50:42 -04002386 self.tasks_scenequeue_done.add(t)
2387 # Look down the dependency chain for non-setscene things which this task depends on
2388 # and mark as 'done'
2389 for dep in self.rqdata.runtaskentries[t].depends:
2390 if dep in self.rqdata.runq_setscene_tids or dep in self.tasks_scenequeue_done:
2391 continue
2392 if self.rqdata.runtaskentries[dep].revdeps.issubset(self.tasks_scenequeue_done):
2393 new.add(dep)
Brad Bishop96ff1982019-08-19 13:50:42 -04002394 next = new
2395
Brad Bishopc68388fc2019-08-26 01:33:31 -04002396 self.holdoff_need_update = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002397
2398 def sq_task_completeoutright(self, task):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002399 """
2400 Mark a task as completed
2401 Look at the reverse dependencies and mark any task with
2402 completed dependencies as buildable
2403 """
2404
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002405 logger.debug(1, 'Found task %s which could be accelerated', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002406 self.scenequeue_covered.add(task)
2407 self.scenequeue_updatecounters(task)
2408
Brad Bishop96ff1982019-08-19 13:50:42 -04002409 def sq_check_taskfail(self, task):
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002410 if self.rqdata.setscenewhitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002411 realtask = task.split('_setscene')[0]
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002412 (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
2413 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002414 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2415 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2416 self.rq.state = runQueueCleanUp
2417
Brad Bishop96ff1982019-08-19 13:50:42 -04002418 def sq_task_complete(self, task):
2419 self.sq_stats.taskCompleted()
2420 bb.event.fire(sceneQueueTaskCompleted(task, self.sq_stats, self.rq), self.cfgData)
2421 self.sq_task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002422
Brad Bishop96ff1982019-08-19 13:50:42 -04002423 def sq_task_fail(self, task, result):
2424 self.sq_stats.taskFailed()
2425 bb.event.fire(sceneQueueTaskFailed(task, self.sq_stats, result, self), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002426 self.scenequeue_notcovered.add(task)
2427 self.scenequeue_updatecounters(task, True)
Brad Bishop96ff1982019-08-19 13:50:42 -04002428 self.sq_check_taskfail(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002429
Brad Bishop96ff1982019-08-19 13:50:42 -04002430 def sq_task_failoutright(self, task):
2431 self.sq_running.add(task)
2432 self.sq_buildable.add(task)
2433 self.sq_stats.taskSkipped()
2434 self.sq_stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002435 self.scenequeue_notcovered.add(task)
2436 self.scenequeue_updatecounters(task, True)
2437
Brad Bishop96ff1982019-08-19 13:50:42 -04002438 def sq_task_skip(self, task):
2439 self.sq_running.add(task)
2440 self.sq_buildable.add(task)
2441 self.sq_task_completeoutright(task)
2442 self.sq_stats.taskSkipped()
2443 self.sq_stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002444
Brad Bishop96ff1982019-08-19 13:50:42 -04002445 def sq_build_taskdepdata(self, task):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002446 def getsetscenedeps(tid):
2447 deps = set()
2448 (mc, fn, taskname, _) = split_tid_mcfn(tid)
2449 realtid = tid + "_setscene"
2450 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
2451 for (depname, idependtask) in idepends:
2452 if depname not in self.rqdata.taskData[mc].build_targets:
2453 continue
2454
2455 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2456 if depfn is None:
2457 continue
2458 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2459 deps.add(deptid)
2460 return deps
2461
2462 taskdepdata = {}
2463 next = getsetscenedeps(task)
2464 next.add(task)
2465 while next:
2466 additional = []
2467 for revdep in next:
2468 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2469 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2470 deps = getsetscenedeps(revdep)
2471 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2472 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002473 unihash = self.rqdata.runtaskentries[revdep].unihash
2474 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002475 for revdep2 in deps:
2476 if revdep2 not in taskdepdata:
2477 additional.append(revdep2)
2478 next = additional
2479
2480 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2481 return taskdepdata
2482
Brad Bishop96ff1982019-08-19 13:50:42 -04002483 def check_setscenewhitelist(self, tid):
2484 # Check task that is going to run against the whitelist
2485 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2486 # Ignore covered tasks
2487 if tid in self.tasks_covered:
2488 return False
2489 # Ignore stamped tasks
2490 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
2491 return False
2492 # Ignore noexec tasks
2493 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2494 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2495 return False
2496
2497 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2498 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2499 if tid in self.rqdata.runq_setscene_tids:
2500 msg = 'Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)
2501 else:
2502 msg = 'Task %s.%s attempted to execute unexpectedly' % (pn, taskname)
2503 logger.error(msg + '\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered))
2504 return True
2505 return False
2506
2507class SQData(object):
2508 def __init__(self):
2509 # SceneQueue dependencies
2510 self.sq_deps = {}
2511 # SceneQueue reverse dependencies
2512 self.sq_revdeps = {}
Brad Bishop96ff1982019-08-19 13:50:42 -04002513 # Injected inter-setscene task dependencies
2514 self.sq_harddeps = {}
2515 # Cache of stamp files so duplicates can't run in parallel
2516 self.stamps = {}
2517 # Setscene tasks directly depended upon by the build
2518 self.unskippable = set()
2519 # List of setscene tasks which aren't present
2520 self.outrightfail = set()
2521 # A list of normal tasks a setscene task covers
2522 self.sq_covered_tasks = {}
2523
2524def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
2525
2526 sq_revdeps = {}
2527 sq_revdeps_squash = {}
2528 sq_collated_deps = {}
2529
2530 # We need to construct a dependency graph for the setscene functions. Intermediate
2531 # dependencies between the setscene tasks only complicate the code. This code
2532 # therefore aims to collapse the huge runqueue dependency tree into a smaller one
2533 # only containing the setscene functions.
2534
2535 rqdata.init_progress_reporter.next_stage()
2536
2537 # First process the chains up to the first setscene task.
2538 endpoints = {}
2539 for tid in rqdata.runtaskentries:
2540 sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps)
2541 sq_revdeps_squash[tid] = set()
2542 if (len(sq_revdeps[tid]) == 0) and tid not in rqdata.runq_setscene_tids:
2543 #bb.warn("Added endpoint %s" % (tid))
2544 endpoints[tid] = set()
2545
2546 rqdata.init_progress_reporter.next_stage()
2547
2548 # Secondly process the chains between setscene tasks.
2549 for tid in rqdata.runq_setscene_tids:
2550 sq_collated_deps[tid] = set()
2551 #bb.warn("Added endpoint 2 %s" % (tid))
2552 for dep in rqdata.runtaskentries[tid].depends:
2553 if tid in sq_revdeps[dep]:
2554 sq_revdeps[dep].remove(tid)
2555 if dep not in endpoints:
2556 endpoints[dep] = set()
2557 #bb.warn(" Added endpoint 3 %s" % (dep))
2558 endpoints[dep].add(tid)
2559
2560 rqdata.init_progress_reporter.next_stage()
2561
2562 def process_endpoints(endpoints):
2563 newendpoints = {}
2564 for point, task in endpoints.items():
2565 tasks = set()
2566 if task:
2567 tasks |= task
2568 if sq_revdeps_squash[point]:
2569 tasks |= sq_revdeps_squash[point]
2570 if point not in rqdata.runq_setscene_tids:
2571 for t in tasks:
2572 sq_collated_deps[t].add(point)
2573 sq_revdeps_squash[point] = set()
2574 if point in rqdata.runq_setscene_tids:
2575 sq_revdeps_squash[point] = tasks
2576 tasks = set()
2577 continue
2578 for dep in rqdata.runtaskentries[point].depends:
2579 if point in sq_revdeps[dep]:
2580 sq_revdeps[dep].remove(point)
2581 if tasks:
2582 sq_revdeps_squash[dep] |= tasks
2583 if len(sq_revdeps[dep]) == 0 and dep not in rqdata.runq_setscene_tids:
2584 newendpoints[dep] = task
2585 if len(newendpoints) != 0:
2586 process_endpoints(newendpoints)
2587
2588 process_endpoints(endpoints)
2589
2590 rqdata.init_progress_reporter.next_stage()
2591
Brad Bishop08902b02019-08-20 09:16:51 -04002592 # Build a list of tasks which are "unskippable"
2593 # These are direct endpoints referenced by the build upto and including setscene tasks
Brad Bishop96ff1982019-08-19 13:50:42 -04002594 # Take the build endpoints (no revdeps) and find the sstate tasks they depend upon
2595 new = True
2596 for tid in rqdata.runtaskentries:
2597 if len(rqdata.runtaskentries[tid].revdeps) == 0:
2598 sqdata.unskippable.add(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04002599 sqdata.unskippable |= sqrq.cantskip
Brad Bishop96ff1982019-08-19 13:50:42 -04002600 while new:
2601 new = False
Brad Bishop08902b02019-08-20 09:16:51 -04002602 orig = sqdata.unskippable.copy()
2603 for tid in sorted(orig, reverse=True):
Brad Bishop96ff1982019-08-19 13:50:42 -04002604 if tid in rqdata.runq_setscene_tids:
2605 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04002606 if len(rqdata.runtaskentries[tid].depends) == 0:
2607 # These are tasks which have no setscene tasks in their chain, need to mark as directly buildable
Brad Bishop96ff1982019-08-19 13:50:42 -04002608 sqrq.setbuildable(tid)
Brad Bishop96ff1982019-08-19 13:50:42 -04002609 sqdata.unskippable |= rqdata.runtaskentries[tid].depends
Brad Bishop08902b02019-08-20 09:16:51 -04002610 if sqdata.unskippable != orig:
2611 new = True
2612
2613 sqrq.tasks_scenequeue_done |= sqdata.unskippable.difference(rqdata.runq_setscene_tids)
Brad Bishop96ff1982019-08-19 13:50:42 -04002614
2615 rqdata.init_progress_reporter.next_stage(len(rqdata.runtaskentries))
2616
2617 # Sanity check all dependencies could be changed to setscene task references
2618 for taskcounter, tid in enumerate(rqdata.runtaskentries):
2619 if tid in rqdata.runq_setscene_tids:
2620 pass
2621 elif len(sq_revdeps_squash[tid]) != 0:
2622 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
2623 else:
2624 del sq_revdeps_squash[tid]
2625 rqdata.init_progress_reporter.update(taskcounter)
2626
2627 rqdata.init_progress_reporter.next_stage()
2628
2629 # Resolve setscene inter-task dependencies
2630 # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
2631 # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
2632 for tid in rqdata.runq_setscene_tids:
2633 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2634 realtid = tid + "_setscene"
2635 idepends = rqdata.taskData[mc].taskentries[realtid].idepends
2636 sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", rqdata.dataCaches[mc], taskfn, noextra=True)
2637 for (depname, idependtask) in idepends:
2638
2639 if depname not in rqdata.taskData[mc].build_targets:
2640 continue
2641
2642 depfn = rqdata.taskData[mc].build_targets[depname][0]
2643 if depfn is None:
2644 continue
2645 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2646 if deptid not in rqdata.runtaskentries:
2647 bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
2648
2649 if not deptid in sqdata.sq_harddeps:
2650 sqdata.sq_harddeps[deptid] = set()
2651 sqdata.sq_harddeps[deptid].add(tid)
2652
2653 sq_revdeps_squash[tid].add(deptid)
2654 # Have to zero this to avoid circular dependencies
2655 sq_revdeps_squash[deptid] = set()
2656
2657 rqdata.init_progress_reporter.next_stage()
2658
2659 for task in sqdata.sq_harddeps:
2660 for dep in sqdata.sq_harddeps[task]:
2661 sq_revdeps_squash[dep].add(task)
2662
2663 rqdata.init_progress_reporter.next_stage()
2664
2665 #for tid in sq_revdeps_squash:
2666 # data = ""
2667 # for dep in sq_revdeps_squash[tid]:
2668 # data = data + "\n %s" % dep
2669 # bb.warn("Task %s_setscene: is %s " % (tid, data))
2670
2671 sqdata.sq_revdeps = sq_revdeps_squash
Brad Bishop96ff1982019-08-19 13:50:42 -04002672 sqdata.sq_covered_tasks = sq_collated_deps
2673
2674 # Build reverse version of revdeps to populate deps structure
2675 for tid in sqdata.sq_revdeps:
2676 sqdata.sq_deps[tid] = set()
2677 for tid in sqdata.sq_revdeps:
2678 for dep in sqdata.sq_revdeps[tid]:
2679 sqdata.sq_deps[dep].add(tid)
2680
2681 rqdata.init_progress_reporter.next_stage()
2682
Brad Bishop00e122a2019-10-05 11:10:57 -04002683 sqdata.multiconfigs = set()
Brad Bishop96ff1982019-08-19 13:50:42 -04002684 for tid in sqdata.sq_revdeps:
Brad Bishop00e122a2019-10-05 11:10:57 -04002685 sqdata.multiconfigs.add(mc_from_tid(tid))
Brad Bishop96ff1982019-08-19 13:50:42 -04002686 if len(sqdata.sq_revdeps[tid]) == 0:
2687 sqrq.sq_buildable.add(tid)
2688
2689 rqdata.init_progress_reporter.finish()
2690
Brad Bishop00e122a2019-10-05 11:10:57 -04002691 sqdata.noexec = set()
2692 sqdata.stamppresent = set()
2693 sqdata.valid = set()
Brad Bishop96ff1982019-08-19 13:50:42 -04002694
Brad Bishop00e122a2019-10-05 11:10:57 -04002695 update_scenequeue_data(sqdata.sq_revdeps, sqdata, rqdata, rq, cooker, stampcache, sqrq)
2696
2697def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq):
2698
2699 tocheck = set()
2700
2701 for tid in sorted(tids):
2702 if tid in sqdata.stamppresent:
2703 sqdata.stamppresent.remove(tid)
2704 if tid in sqdata.valid:
2705 sqdata.valid.remove(tid)
2706
2707 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2708
2709 taskdep = rqdata.dataCaches[mc].task_deps[taskfn]
2710
2711 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2712 sqdata.noexec.add(tid)
2713 sqrq.sq_task_skip(tid)
2714 bb.build.make_stamp(taskname + "_setscene", rqdata.dataCaches[mc], taskfn)
2715 continue
2716
2717 if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache):
2718 logger.debug(2, 'Setscene stamp current for task %s', tid)
2719 sqdata.stamppresent.add(tid)
2720 sqrq.sq_task_skip(tid)
2721 continue
2722
2723 if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache):
2724 logger.debug(2, 'Normal stamp current for task %s', tid)
2725 sqdata.stamppresent.add(tid)
2726 sqrq.sq_task_skip(tid)
2727 continue
2728
2729 tocheck.add(tid)
2730
2731 sqdata.valid |= rq.validate_hashes(tocheck, cooker.data, len(sqdata.stamppresent), False)
2732
2733 sqdata.hashes = {}
2734 for mc in sorted(sqdata.multiconfigs):
Brad Bishop08902b02019-08-20 09:16:51 -04002735 for tid in sorted(sqdata.sq_revdeps):
Brad Bishop96ff1982019-08-19 13:50:42 -04002736 if mc_from_tid(tid) != mc:
2737 continue
Brad Bishop00e122a2019-10-05 11:10:57 -04002738 if tid in sqdata.stamppresent:
2739 continue
2740 if tid in sqdata.valid:
2741 continue
2742 if tid in sqdata.noexec:
2743 continue
2744 if tid in sqrq.scenequeue_notcovered:
2745 continue
2746 sqdata.outrightfail.add(tid)
Brad Bishop96ff1982019-08-19 13:50:42 -04002747
Brad Bishop00e122a2019-10-05 11:10:57 -04002748 h = pending_hash_index(tid, rqdata)
2749 if h not in sqdata.hashes:
2750 sqdata.hashes[h] = tid
2751 else:
2752 sqrq.sq_deferred[tid] = sqdata.hashes[h]
2753 bb.warn("Deferring %s after %s" % (tid, sqdata.hashes[h]))
Brad Bishop96ff1982019-08-19 13:50:42 -04002754
2755
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002756class TaskFailure(Exception):
2757 """
2758 Exception raised when a task in a runqueue fails
2759 """
2760 def __init__(self, x):
2761 self.args = x
2762
2763
2764class runQueueExitWait(bb.event.Event):
2765 """
2766 Event when waiting for task processes to exit
2767 """
2768
2769 def __init__(self, remain):
2770 self.remain = remain
2771 self.message = "Waiting for %s active tasks to finish" % remain
2772 bb.event.Event.__init__(self)
2773
2774class runQueueEvent(bb.event.Event):
2775 """
2776 Base runQueue event class
2777 """
2778 def __init__(self, task, stats, rq):
2779 self.taskid = task
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002780 self.taskstring = task
2781 self.taskname = taskname_from_tid(task)
2782 self.taskfile = fn_from_tid(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002783 self.taskhash = rq.rqdata.get_task_hash(task)
2784 self.stats = stats.copy()
2785 bb.event.Event.__init__(self)
2786
2787class sceneQueueEvent(runQueueEvent):
2788 """
2789 Base sceneQueue event class
2790 """
2791 def __init__(self, task, stats, rq, noexec=False):
2792 runQueueEvent.__init__(self, task, stats, rq)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002793 self.taskstring = task + "_setscene"
2794 self.taskname = taskname_from_tid(task) + "_setscene"
2795 self.taskfile = fn_from_tid(task)
2796 self.taskhash = rq.rqdata.get_task_hash(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002797
2798class runQueueTaskStarted(runQueueEvent):
2799 """
2800 Event notifying a task was started
2801 """
2802 def __init__(self, task, stats, rq, noexec=False):
2803 runQueueEvent.__init__(self, task, stats, rq)
2804 self.noexec = noexec
2805
2806class sceneQueueTaskStarted(sceneQueueEvent):
2807 """
2808 Event notifying a setscene task was started
2809 """
2810 def __init__(self, task, stats, rq, noexec=False):
2811 sceneQueueEvent.__init__(self, task, stats, rq)
2812 self.noexec = noexec
2813
2814class runQueueTaskFailed(runQueueEvent):
2815 """
2816 Event notifying a task failed
2817 """
2818 def __init__(self, task, stats, exitcode, rq):
2819 runQueueEvent.__init__(self, task, stats, rq)
2820 self.exitcode = exitcode
2821
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002822 def __str__(self):
2823 return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
2824
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002825class sceneQueueTaskFailed(sceneQueueEvent):
2826 """
2827 Event notifying a setscene task failed
2828 """
2829 def __init__(self, task, stats, exitcode, rq):
2830 sceneQueueEvent.__init__(self, task, stats, rq)
2831 self.exitcode = exitcode
2832
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002833 def __str__(self):
2834 return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
2835
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002836class sceneQueueComplete(sceneQueueEvent):
2837 """
2838 Event when all the sceneQueue tasks are complete
2839 """
2840 def __init__(self, stats, rq):
2841 self.stats = stats.copy()
2842 bb.event.Event.__init__(self)
2843
2844class runQueueTaskCompleted(runQueueEvent):
2845 """
2846 Event notifying a task completed
2847 """
2848
2849class sceneQueueTaskCompleted(sceneQueueEvent):
2850 """
2851 Event notifying a setscene task completed
2852 """
2853
2854class runQueueTaskSkipped(runQueueEvent):
2855 """
2856 Event notifying a task was skipped
2857 """
2858 def __init__(self, task, stats, rq, reason):
2859 runQueueEvent.__init__(self, task, stats, rq)
2860 self.reason = reason
2861
Brad Bishop08902b02019-08-20 09:16:51 -04002862class taskUniHashUpdate(bb.event.Event):
2863 """
2864 Base runQueue event class
2865 """
2866 def __init__(self, task, unihash):
2867 self.taskid = task
2868 self.unihash = unihash
2869 bb.event.Event.__init__(self)
2870
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002871class runQueuePipe():
2872 """
2873 Abstraction for a pipe between a worker thread and the server
2874 """
2875 def __init__(self, pipein, pipeout, d, rq, rqexec):
2876 self.input = pipein
2877 if pipeout:
2878 pipeout.close()
2879 bb.utils.nonblockingfd(self.input)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002880 self.queue = b""
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002881 self.d = d
2882 self.rq = rq
2883 self.rqexec = rqexec
2884
2885 def setrunqueueexec(self, rqexec):
2886 self.rqexec = rqexec
2887
2888 def read(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002889 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
2890 for worker in workers.values():
2891 worker.process.poll()
2892 if worker.process.returncode is not None and not self.rq.teardown:
2893 bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
2894 self.rq.finish_runqueue(True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002895
2896 start = len(self.queue)
2897 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002898 self.queue = self.queue + (self.input.read(102400) or b"")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002899 except (OSError, IOError) as e:
2900 if e.errno != errno.EAGAIN:
2901 raise
2902 end = len(self.queue)
2903 found = True
2904 while found and len(self.queue):
2905 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002906 index = self.queue.find(b"</event>")
2907 while index != -1 and self.queue.startswith(b"<event>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002908 try:
2909 event = pickle.loads(self.queue[7:index])
2910 except ValueError as e:
2911 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
2912 bb.event.fire_from_worker(event, self.d)
Brad Bishop08902b02019-08-20 09:16:51 -04002913 if isinstance(event, taskUniHashUpdate):
2914 self.rqexec.updated_taskhash_queue.append((event.taskid, event.unihash))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002915 found = True
2916 self.queue = self.queue[index+8:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002917 index = self.queue.find(b"</event>")
2918 index = self.queue.find(b"</exitcode>")
2919 while index != -1 and self.queue.startswith(b"<exitcode>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002920 try:
2921 task, status = pickle.loads(self.queue[10:index])
2922 except ValueError as e:
2923 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
2924 self.rqexec.runqueue_process_waitpid(task, status)
2925 found = True
2926 self.queue = self.queue[index+11:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002927 index = self.queue.find(b"</exitcode>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002928 return (end > start)
2929
2930 def close(self):
2931 while self.read():
2932 continue
2933 if len(self.queue) > 0:
2934 print("Warning, worker left partial message: %s" % self.queue)
2935 self.input.close()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002936
2937def get_setscene_enforce_whitelist(d):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002938 if d.getVar('BB_SETSCENE_ENFORCE') != '1':
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002939 return None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002940 whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002941 outlist = []
2942 for item in whitelist[:]:
2943 if item.startswith('%:'):
2944 for target in sys.argv[1:]:
2945 if not target.startswith('-'):
2946 outlist.append(target.split(':')[0] + ':' + item.split(':')[1])
2947 else:
2948 outlist.append(item)
2949 return outlist
2950
2951def check_setscene_enforce_whitelist(pn, taskname, whitelist):
2952 import fnmatch
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002953 if whitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002954 item = '%s:%s' % (pn, taskname)
2955 for whitelist_item in whitelist:
2956 if fnmatch.fnmatch(item, whitelist_item):
2957 return True
2958 return False
2959 return True