blob: 4f69578e466829931a139fcc195852a1a0a0a2e9 [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001"""
2BitBake 'RunQueue' implementation
3
4Handles preparation and execution of a queue of tasks
5"""
6
7# Copyright (C) 2006-2007 Richard Purdie
8#
Brad Bishopc342db32019-05-15 21:57:59 -04009# SPDX-License-Identifier: GPL-2.0-only
Patrick Williamsc124f4f2015-09-15 14:41:29 -050010#
Patrick Williamsc124f4f2015-09-15 14:41:29 -050011
12import copy
13import os
14import sys
15import signal
16import stat
17import fcntl
18import errno
19import logging
20import re
21import bb
22from bb import msg, data, event
23from bb import monitordisk
24import subprocess
Patrick Williamsc0f7c042017-02-23 20:41:17 -060025import pickle
Brad Bishop6e60e8b2018-02-01 10:27:11 -050026from multiprocessing import Process
Brad Bishop19323692019-04-05 15:28:33 -040027import shlex
Brad Bishop96ff1982019-08-19 13:50:42 -040028import pprint
Patrick Williamsc124f4f2015-09-15 14:41:29 -050029
30bblogger = logging.getLogger("BitBake")
31logger = logging.getLogger("BitBake.RunQueue")
32
Brad Bishop19323692019-04-05 15:28:33 -040033__find_sha256__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{64}(?![a-z0-9])' )
Patrick Williamsc124f4f2015-09-15 14:41:29 -050034
Patrick Williamsc0f7c042017-02-23 20:41:17 -060035def fn_from_tid(tid):
36 return tid.rsplit(":", 1)[0]
37
38def taskname_from_tid(tid):
39 return tid.rsplit(":", 1)[1]
40
Andrew Geissler99467da2019-02-25 18:54:23 -060041def mc_from_tid(tid):
Brad Bishop15ae2502019-06-18 21:44:24 -040042 if tid.startswith('mc:'):
Andrew Geissler99467da2019-02-25 18:54:23 -060043 return tid.split(':')[1]
44 return ""
45
Patrick Williamsc0f7c042017-02-23 20:41:17 -060046def split_tid(tid):
47 (mc, fn, taskname, _) = split_tid_mcfn(tid)
48 return (mc, fn, taskname)
49
50def split_tid_mcfn(tid):
Brad Bishop15ae2502019-06-18 21:44:24 -040051 if tid.startswith('mc:'):
Patrick Williamsc0f7c042017-02-23 20:41:17 -060052 elems = tid.split(':')
53 mc = elems[1]
54 fn = ":".join(elems[2:-1])
55 taskname = elems[-1]
Brad Bishop15ae2502019-06-18 21:44:24 -040056 mcfn = "mc:" + mc + ":" + fn
Patrick Williamsc0f7c042017-02-23 20:41:17 -060057 else:
58 tid = tid.rsplit(":", 1)
59 mc = ""
60 fn = tid[0]
61 taskname = tid[1]
62 mcfn = fn
63
64 return (mc, fn, taskname, mcfn)
65
66def build_tid(mc, fn, taskname):
67 if mc:
Brad Bishop15ae2502019-06-18 21:44:24 -040068 return "mc:" + mc + ":" + fn + ":" + taskname
Patrick Williamsc0f7c042017-02-23 20:41:17 -060069 return fn + ":" + taskname
70
Brad Bishop96ff1982019-08-19 13:50:42 -040071# Index used to pair up potentially matching multiconfig tasks
72# We match on PN, taskname and hash being equal
73def pending_hash_index(tid, rqdata):
74 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
75 pn = rqdata.dataCaches[mc].pkg_fn[taskfn]
76 h = rqdata.runtaskentries[tid].hash
77 return pn + ":" + "taskname" + h
78
Patrick Williamsc124f4f2015-09-15 14:41:29 -050079class RunQueueStats:
80 """
81 Holds statistics on the tasks handled by the associated runQueue
82 """
83 def __init__(self, total):
84 self.completed = 0
85 self.skipped = 0
86 self.failed = 0
87 self.active = 0
88 self.total = total
89
90 def copy(self):
91 obj = self.__class__(self.total)
92 obj.__dict__.update(self.__dict__)
93 return obj
94
95 def taskFailed(self):
96 self.active = self.active - 1
97 self.failed = self.failed + 1
98
Brad Bishop1a4b7ee2018-12-16 17:11:34 -080099 def taskCompleted(self):
100 self.active = self.active - 1
101 self.completed = self.completed + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500102
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800103 def taskSkipped(self):
104 self.active = self.active + 1
105 self.skipped = self.skipped + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500106
107 def taskActive(self):
108 self.active = self.active + 1
109
110# These values indicate the next step due to be run in the
111# runQueue state machine
112runQueuePrepare = 2
113runQueueSceneInit = 3
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500114runQueueRunning = 6
115runQueueFailed = 7
116runQueueCleanUp = 8
117runQueueComplete = 9
118
119class RunQueueScheduler(object):
120 """
121 Control the order tasks are scheduled in.
122 """
123 name = "basic"
124
125 def __init__(self, runqueue, rqdata):
126 """
127 The default scheduler just returns the first buildable task (the
128 priority map is sorted by task number)
129 """
130 self.rq = runqueue
131 self.rqdata = rqdata
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600132 self.numTasks = len(self.rqdata.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500133
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600134 self.prio_map = [self.rqdata.runtaskentries.keys()]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500135
Brad Bishop08902b02019-08-20 09:16:51 -0400136 self.buildable = set()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800137 self.skip_maxthread = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500138 self.stamps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600139 for tid in self.rqdata.runtaskentries:
140 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
141 self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
142 if tid in self.rq.runq_buildable:
143 self.buildable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500144
145 self.rev_prio_map = None
146
147 def next_buildable_task(self):
148 """
149 Return the id of the first task we find that is buildable
150 """
Brad Bishop08902b02019-08-20 09:16:51 -0400151 buildable = set(self.buildable)
152 buildable.difference_update(self.rq.runq_running)
153 buildable.difference_update(self.rq.holdoff_tasks)
154 buildable.intersection_update(self.rq.tasks_covered | self.rq.tasks_notcovered)
Brad Bishop96ff1982019-08-19 13:50:42 -0400155 if not buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500156 return None
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800157
158 # Filter out tasks that have a max number of threads that have been exceeded
159 skip_buildable = {}
160 for running in self.rq.runq_running.difference(self.rq.runq_complete):
161 rtaskname = taskname_from_tid(running)
162 if rtaskname not in self.skip_maxthread:
163 self.skip_maxthread[rtaskname] = self.rq.cfgData.getVarFlag(rtaskname, "number_threads")
164 if not self.skip_maxthread[rtaskname]:
165 continue
166 if rtaskname in skip_buildable:
167 skip_buildable[rtaskname] += 1
168 else:
169 skip_buildable[rtaskname] = 1
170
Brad Bishop96ff1982019-08-19 13:50:42 -0400171 if len(buildable) == 1:
Brad Bishop08902b02019-08-20 09:16:51 -0400172 tid = buildable.pop()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800173 taskname = taskname_from_tid(tid)
174 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
175 return None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600176 stamp = self.stamps[tid]
177 if stamp not in self.rq.build_stamps.values():
178 return tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500179
180 if not self.rev_prio_map:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600181 self.rev_prio_map = {}
182 for tid in self.rqdata.runtaskentries:
183 self.rev_prio_map[tid] = self.prio_map.index(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500184
185 best = None
186 bestprio = None
Brad Bishop96ff1982019-08-19 13:50:42 -0400187 for tid in buildable:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800188 taskname = taskname_from_tid(tid)
189 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
190 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600191 prio = self.rev_prio_map[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500192 if bestprio is None or bestprio > prio:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600193 stamp = self.stamps[tid]
194 if stamp in self.rq.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500195 continue
196 bestprio = prio
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600197 best = tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500198
199 return best
200
201 def next(self):
202 """
203 Return the id of the task we should build next
204 """
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800205 if self.rq.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500206 return self.next_buildable_task()
207
Brad Bishop316dfdd2018-06-25 12:45:53 -0400208 def newbuildable(self, task):
Brad Bishop08902b02019-08-20 09:16:51 -0400209 self.buildable.add(task)
210
211 def removebuildable(self, task):
212 self.buildable.remove(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500213
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500214 def describe_task(self, taskid):
215 result = 'ID %s' % taskid
216 if self.rev_prio_map:
217 result = result + (' pri %d' % self.rev_prio_map[taskid])
218 return result
219
220 def dump_prio(self, comment):
221 bb.debug(3, '%s (most important first):\n%s' %
222 (comment,
223 '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
224 index, taskid in enumerate(self.prio_map)])))
225
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500226class RunQueueSchedulerSpeed(RunQueueScheduler):
227 """
228 A scheduler optimised for speed. The priority map is sorted by task weight,
229 heavier weighted tasks (tasks needed by the most other tasks) are run first.
230 """
231 name = "speed"
232
233 def __init__(self, runqueue, rqdata):
234 """
235 The priority map is sorted by task weight.
236 """
237 RunQueueScheduler.__init__(self, runqueue, rqdata)
238
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600239 weights = {}
240 for tid in self.rqdata.runtaskentries:
241 weight = self.rqdata.runtaskentries[tid].weight
242 if not weight in weights:
243 weights[weight] = []
244 weights[weight].append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500245
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600246 self.prio_map = []
247 for weight in sorted(weights):
248 for w in weights[weight]:
249 self.prio_map.append(w)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500250
251 self.prio_map.reverse()
252
253class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
254 """
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500255 A scheduler optimised to complete .bb files as quickly as possible. The
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500256 priority map is sorted by task weight, but then reordered so once a given
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500257 .bb file starts to build, it's completed as quickly as possible by
258 running all tasks related to the same .bb file one after the after.
259 This works well where disk space is at a premium and classes like OE's
260 rm_work are in force.
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500261 """
262 name = "completion"
263
264 def __init__(self, runqueue, rqdata):
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500265 super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500266
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500267 # Extract list of tasks for each recipe, with tasks sorted
268 # ascending from "must run first" (typically do_fetch) to
269 # "runs last" (do_build). The speed scheduler prioritizes
270 # tasks that must run first before the ones that run later;
271 # this is what we depend on here.
272 task_lists = {}
273 for taskid in self.prio_map:
274 fn, taskname = taskid.rsplit(':', 1)
275 task_lists.setdefault(fn, []).append(taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500276
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500277 # Now unify the different task lists. The strategy is that
278 # common tasks get skipped and new ones get inserted after the
279 # preceeding common one(s) as they are found. Because task
280 # lists should differ only by their number of tasks, but not
281 # the ordering of the common tasks, this should result in a
282 # deterministic result that is a superset of the individual
283 # task ordering.
284 all_tasks = []
285 for recipe, new_tasks in task_lists.items():
286 index = 0
287 old_task = all_tasks[index] if index < len(all_tasks) else None
288 for new_task in new_tasks:
289 if old_task == new_task:
290 # Common task, skip it. This is the fast-path which
291 # avoids a full search.
292 index += 1
293 old_task = all_tasks[index] if index < len(all_tasks) else None
294 else:
295 try:
296 index = all_tasks.index(new_task)
297 # Already present, just not at the current
298 # place. We re-synchronized by changing the
299 # index so that it matches again. Now
300 # move on to the next existing task.
301 index += 1
302 old_task = all_tasks[index] if index < len(all_tasks) else None
303 except ValueError:
304 # Not present. Insert before old_task, which
305 # remains the same (but gets shifted back).
306 all_tasks.insert(index, new_task)
307 index += 1
308 bb.debug(3, 'merged task list: %s' % all_tasks)
309
310 # Now reverse the order so that tasks that finish the work on one
311 # recipe are considered more imporant (= come first). The ordering
312 # is now so that do_build is most important.
313 all_tasks.reverse()
314
315 # Group tasks of the same kind before tasks of less important
316 # kinds at the head of the queue (because earlier = lower
317 # priority number = runs earlier), while preserving the
318 # ordering by recipe. If recipe foo is more important than
319 # bar, then the goal is to work on foo's do_populate_sysroot
320 # before bar's do_populate_sysroot and on the more important
321 # tasks of foo before any of the less important tasks in any
322 # other recipe (if those other recipes are more important than
323 # foo).
324 #
325 # All of this only applies when tasks are runable. Explicit
326 # dependencies still override this ordering by priority.
327 #
328 # Here's an example why this priority re-ordering helps with
329 # minimizing disk usage. Consider a recipe foo with a higher
330 # priority than bar where foo DEPENDS on bar. Then the
331 # implicit rule (from base.bbclass) is that foo's do_configure
332 # depends on bar's do_populate_sysroot. This ensures that
333 # bar's do_populate_sysroot gets done first. Normally the
334 # tasks from foo would continue to run once that is done, and
335 # bar only gets completed and cleaned up later. By ordering
336 # bar's task that depend on bar's do_populate_sysroot before foo's
337 # do_configure, that problem gets avoided.
338 task_index = 0
339 self.dump_prio('original priorities')
340 for task in all_tasks:
341 for index in range(task_index, self.numTasks):
342 taskid = self.prio_map[index]
343 taskname = taskid.rsplit(':', 1)[1]
344 if taskname == task:
345 del self.prio_map[index]
346 self.prio_map.insert(task_index, taskid)
347 task_index += 1
348 self.dump_prio('completion priorities')
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500349
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600350class RunTaskEntry(object):
351 def __init__(self):
352 self.depends = set()
353 self.revdeps = set()
354 self.hash = None
Brad Bishop19323692019-04-05 15:28:33 -0400355 self.unihash = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600356 self.task = None
357 self.weight = 1
358
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500359class RunQueueData:
360 """
361 BitBake Run Queue implementation
362 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600363 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500364 self.cooker = cooker
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600365 self.dataCaches = dataCaches
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500366 self.taskData = taskData
367 self.targets = targets
368 self.rq = rq
369 self.warn_multi_bb = False
370
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500371 self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
372 self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600373 self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
374 self.setscenewhitelist_checked = False
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500375 self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600376 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500377
378 self.reset()
379
380 def reset(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600381 self.runtaskentries = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500382
383 def runq_depends_names(self, ids):
384 import re
385 ret = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600386 for id in ids:
387 nam = os.path.basename(id)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500388 nam = re.sub("_[^,]*,", ",", nam)
389 ret.extend([nam])
390 return ret
391
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600392 def get_task_hash(self, tid):
393 return self.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500394
Brad Bishop19323692019-04-05 15:28:33 -0400395 def get_task_unihash(self, tid):
396 return self.runtaskentries[tid].unihash
397
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600398 def get_user_idstring(self, tid, task_name_suffix = ""):
399 return tid + task_name_suffix
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500400
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500401 def get_short_user_idstring(self, task, task_name_suffix = ""):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500402 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
403 pn = self.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600404 taskname = taskname_from_tid(task) + task_name_suffix
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500405 return "%s:%s" % (pn, taskname)
406
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500407 def circular_depchains_handler(self, tasks):
408 """
409 Some tasks aren't buildable, likely due to circular dependency issues.
410 Identify the circular dependencies and print them in a user readable format.
411 """
412 from copy import deepcopy
413
414 valid_chains = []
415 explored_deps = {}
416 msgs = []
417
Andrew Geissler99467da2019-02-25 18:54:23 -0600418 class TooManyLoops(Exception):
419 pass
420
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500421 def chain_reorder(chain):
422 """
423 Reorder a dependency chain so the lowest task id is first
424 """
425 lowest = 0
426 new_chain = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600427 for entry in range(len(chain)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500428 if chain[entry] < chain[lowest]:
429 lowest = entry
430 new_chain.extend(chain[lowest:])
431 new_chain.extend(chain[:lowest])
432 return new_chain
433
434 def chain_compare_equal(chain1, chain2):
435 """
436 Compare two dependency chains and see if they're the same
437 """
438 if len(chain1) != len(chain2):
439 return False
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600440 for index in range(len(chain1)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500441 if chain1[index] != chain2[index]:
442 return False
443 return True
444
445 def chain_array_contains(chain, chain_array):
446 """
447 Return True if chain_array contains chain
448 """
449 for ch in chain_array:
450 if chain_compare_equal(ch, chain):
451 return True
452 return False
453
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600454 def find_chains(tid, prev_chain):
455 prev_chain.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500456 total_deps = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600457 total_deps.extend(self.runtaskentries[tid].revdeps)
458 for revdep in self.runtaskentries[tid].revdeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500459 if revdep in prev_chain:
460 idx = prev_chain.index(revdep)
461 # To prevent duplicates, reorder the chain to start with the lowest taskid
462 # and search through an array of those we've already printed
463 chain = prev_chain[idx:]
464 new_chain = chain_reorder(chain)
465 if not chain_array_contains(new_chain, valid_chains):
466 valid_chains.append(new_chain)
467 msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
468 for dep in new_chain:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600469 msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500470 msgs.append("\n")
471 if len(valid_chains) > 10:
472 msgs.append("Aborted dependency loops search after 10 matches.\n")
Andrew Geissler99467da2019-02-25 18:54:23 -0600473 raise TooManyLoops
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500474 continue
475 scan = False
476 if revdep not in explored_deps:
477 scan = True
478 elif revdep in explored_deps[revdep]:
479 scan = True
480 else:
481 for dep in prev_chain:
482 if dep in explored_deps[revdep]:
483 scan = True
484 if scan:
485 find_chains(revdep, copy.deepcopy(prev_chain))
486 for dep in explored_deps[revdep]:
487 if dep not in total_deps:
488 total_deps.append(dep)
489
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600490 explored_deps[tid] = total_deps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500491
Andrew Geissler99467da2019-02-25 18:54:23 -0600492 try:
493 for task in tasks:
494 find_chains(task, [])
495 except TooManyLoops:
496 pass
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500497
498 return msgs
499
500 def calculate_task_weights(self, endpoints):
501 """
502 Calculate a number representing the "weight" of each task. Heavier weighted tasks
503 have more dependencies and hence should be executed sooner for maximum speed.
504
505 This function also sanity checks the task list finding tasks that are not
506 possible to execute due to circular dependencies.
507 """
508
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600509 numTasks = len(self.runtaskentries)
510 weight = {}
511 deps_left = {}
512 task_done = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500513
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600514 for tid in self.runtaskentries:
515 task_done[tid] = False
516 weight[tid] = 1
517 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500518
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600519 for tid in endpoints:
520 weight[tid] = 10
521 task_done[tid] = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500522
523 while True:
524 next_points = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600525 for tid in endpoints:
526 for revdep in self.runtaskentries[tid].depends:
527 weight[revdep] = weight[revdep] + weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500528 deps_left[revdep] = deps_left[revdep] - 1
529 if deps_left[revdep] == 0:
530 next_points.append(revdep)
531 task_done[revdep] = True
532 endpoints = next_points
533 if len(next_points) == 0:
534 break
535
536 # Circular dependency sanity check
537 problem_tasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600538 for tid in self.runtaskentries:
539 if task_done[tid] is False or deps_left[tid] != 0:
540 problem_tasks.append(tid)
541 logger.debug(2, "Task %s is not buildable", tid)
542 logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
543 self.runtaskentries[tid].weight = weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500544
545 if problem_tasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600546 message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500547 message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
548 message = message + "Identifying dependency loops (this may take a short while)...\n"
549 logger.error(message)
550
551 msgs = self.circular_depchains_handler(problem_tasks)
552
553 message = "\n"
554 for msg in msgs:
555 message = message + msg
556 bb.msg.fatal("RunQueue", message)
557
558 return weight
559
560 def prepare(self):
561 """
562 Turn a set of taskData into a RunQueue and compute data needed
563 to optimise the execution order.
564 """
565
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600566 runq_build = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500567 recursivetasks = {}
568 recursiveitasks = {}
569 recursivetasksselfref = set()
570
571 taskData = self.taskData
572
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600573 found = False
574 for mc in self.taskData:
575 if len(taskData[mc].taskentries) > 0:
576 found = True
577 break
578 if not found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500579 # Nothing to do
580 return 0
581
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600582 self.init_progress_reporter.start()
583 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500584
585 # Step A - Work out a list of tasks to run
586 #
587 # Taskdata gives us a list of possible providers for every build and run
588 # target ordered by priority. It also gives information on each of those
589 # providers.
590 #
591 # To create the actual list of tasks to execute we fix the list of
592 # providers and then resolve the dependencies into task IDs. This
593 # process is repeated for each type of dependency (tdepends, deptask,
594 # rdeptast, recrdeptask, idepends).
595
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600596 def add_build_dependencies(depids, tasknames, depends, mc):
597 for depname in depids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500598 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600599 if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500600 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600601 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500602 if depdata is None:
603 continue
604 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600605 t = depdata + ":" + taskname
606 if t in taskData[mc].taskentries:
607 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500608
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600609 def add_runtime_dependencies(depids, tasknames, depends, mc):
610 for depname in depids:
611 if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500612 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600613 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500614 if depdata is None:
615 continue
616 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600617 t = depdata + ":" + taskname
618 if t in taskData[mc].taskentries:
619 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500620
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800621 def add_mc_dependencies(mc, tid):
622 mcdeps = taskData[mc].get_mcdepends()
623 for dep in mcdeps:
624 mcdependency = dep.split(':')
625 pn = mcdependency[3]
626 frommc = mcdependency[1]
627 mcdep = mcdependency[2]
628 deptask = mcdependency[4]
629 if mc == frommc:
630 fn = taskData[mcdep].build_targets[pn][0]
631 newdep = '%s:%s' % (fn,deptask)
632 taskData[mc].taskentries[tid].tdepends.append(newdep)
633
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600634 for mc in taskData:
635 for tid in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500636
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600637 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
638 #runtid = build_tid(mc, fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500639
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600640 #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
641
642 depends = set()
643 task_deps = self.dataCaches[mc].task_deps[taskfn]
644
645 self.runtaskentries[tid] = RunTaskEntry()
646
647 if fn in taskData[mc].failed_fns:
648 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500649
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800650 # We add multiconfig dependencies before processing internal task deps (tdepends)
651 if 'mcdepends' in task_deps and taskname in task_deps['mcdepends']:
652 add_mc_dependencies(mc, tid)
653
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500654 # Resolve task internal dependencies
655 #
656 # e.g. addtask before X after Y
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600657 for t in taskData[mc].taskentries[tid].tdepends:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800658 (depmc, depfn, deptaskname, _) = split_tid_mcfn(t)
659 depends.add(build_tid(depmc, depfn, deptaskname))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500660
661 # Resolve 'deptask' dependencies
662 #
663 # e.g. do_sometask[deptask] = "do_someothertask"
664 # (makes sure sometask runs after someothertask of all DEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600665 if 'deptask' in task_deps and taskname in task_deps['deptask']:
666 tasknames = task_deps['deptask'][taskname].split()
667 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500668
669 # Resolve 'rdeptask' dependencies
670 #
671 # e.g. do_sometask[rdeptask] = "do_someothertask"
672 # (makes sure sometask runs after someothertask of all RDEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600673 if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
674 tasknames = task_deps['rdeptask'][taskname].split()
675 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500676
677 # Resolve inter-task dependencies
678 #
679 # e.g. do_sometask[depends] = "targetname:do_someothertask"
680 # (makes sure sometask runs after targetname's someothertask)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600681 idepends = taskData[mc].taskentries[tid].idepends
682 for (depname, idependtask) in idepends:
683 if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500684 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600685 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500686 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600687 t = depdata + ":" + idependtask
688 depends.add(t)
689 if t not in taskData[mc].taskentries:
690 bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
691 irdepends = taskData[mc].taskentries[tid].irdepends
692 for (depname, idependtask) in irdepends:
693 if depname in taskData[mc].run_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500694 # Won't be in run_targets if ASSUME_PROVIDED
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500695 if not taskData[mc].run_targets[depname]:
696 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600697 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500698 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600699 t = depdata + ":" + idependtask
700 depends.add(t)
701 if t not in taskData[mc].taskentries:
702 bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500703
704 # Resolve recursive 'recrdeptask' dependencies (Part A)
705 #
706 # e.g. do_sometask[recrdeptask] = "do_someothertask"
707 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
708 # We cover the recursive part of the dependencies below
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600709 if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
710 tasknames = task_deps['recrdeptask'][taskname].split()
711 recursivetasks[tid] = tasknames
712 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
713 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
714 if taskname in tasknames:
715 recursivetasksselfref.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500716
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600717 if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
718 recursiveitasks[tid] = []
719 for t in task_deps['recideptask'][taskname].split():
720 newdep = build_tid(mc, fn, t)
721 recursiveitasks[tid].append(newdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500722
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600723 self.runtaskentries[tid].depends = depends
Brad Bishop316dfdd2018-06-25 12:45:53 -0400724 # Remove all self references
725 self.runtaskentries[tid].depends.discard(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500726
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600727 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500728
Brad Bishop316dfdd2018-06-25 12:45:53 -0400729 self.init_progress_reporter.next_stage()
730
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500731 # Resolve recursive 'recrdeptask' dependencies (Part B)
732 #
733 # e.g. do_sometask[recrdeptask] = "do_someothertask"
734 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600735 # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600736
Brad Bishop316dfdd2018-06-25 12:45:53 -0400737 # Generating/interating recursive lists of dependencies is painful and potentially slow
738 # Precompute recursive task dependencies here by:
739 # a) create a temp list of reverse dependencies (revdeps)
740 # b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0)
741 # c) combine the total list of dependencies in cumulativedeps
742 # d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500743
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500744
Brad Bishop316dfdd2018-06-25 12:45:53 -0400745 revdeps = {}
746 deps = {}
747 cumulativedeps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600748 for tid in self.runtaskentries:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400749 deps[tid] = set(self.runtaskentries[tid].depends)
750 revdeps[tid] = set()
751 cumulativedeps[tid] = set()
752 # Generate a temp list of reverse dependencies
753 for tid in self.runtaskentries:
754 for dep in self.runtaskentries[tid].depends:
755 revdeps[dep].add(tid)
756 # Find the dependency chain endpoints
757 endpoints = set()
758 for tid in self.runtaskentries:
759 if len(deps[tid]) == 0:
760 endpoints.add(tid)
761 # Iterate the chains collating dependencies
762 while endpoints:
763 next = set()
764 for tid in endpoints:
765 for dep in revdeps[tid]:
766 cumulativedeps[dep].add(fn_from_tid(tid))
767 cumulativedeps[dep].update(cumulativedeps[tid])
768 if tid in deps[dep]:
769 deps[dep].remove(tid)
770 if len(deps[dep]) == 0:
771 next.add(dep)
772 endpoints = next
773 #for tid in deps:
774 # if len(deps[tid]) != 0:
775 # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
776
777 # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
778 # resolve these recursively until we aren't adding any further extra dependencies
779 extradeps = True
780 while extradeps:
781 extradeps = 0
782 for tid in recursivetasks:
783 tasknames = recursivetasks[tid]
784
785 totaldeps = set(self.runtaskentries[tid].depends)
786 if tid in recursiveitasks:
787 totaldeps.update(recursiveitasks[tid])
788 for dep in recursiveitasks[tid]:
789 if dep not in self.runtaskentries:
790 continue
791 totaldeps.update(self.runtaskentries[dep].depends)
792
793 deps = set()
794 for dep in totaldeps:
795 if dep in cumulativedeps:
796 deps.update(cumulativedeps[dep])
797
798 for t in deps:
799 for taskname in tasknames:
800 newtid = t + ":" + taskname
801 if newtid == tid:
802 continue
803 if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
804 extradeps += 1
805 self.runtaskentries[tid].depends.add(newtid)
806
807 # Handle recursive tasks which depend upon other recursive tasks
808 deps = set()
809 for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
810 deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
811 for newtid in deps:
812 for taskname in tasknames:
813 if not newtid.endswith(":" + taskname):
814 continue
815 if newtid in self.runtaskentries:
816 extradeps += 1
817 self.runtaskentries[tid].depends.add(newtid)
818
819 bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps)
820
821 # Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work
822 for tid in recursivetasksselfref:
823 self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600824
825 self.init_progress_reporter.next_stage()
826
827 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500828
829 # Step B - Mark all active tasks
830 #
831 # Start with the tasks we were asked to run and mark all dependencies
832 # as active too. If the task is to be 'forced', clear its stamp. Once
833 # all active tasks are marked, prune the ones we don't need.
834
835 logger.verbose("Marking Active Tasks")
836
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600837 def mark_active(tid, depth):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500838 """
839 Mark an item as active along with its depends
840 (calls itself recursively)
841 """
842
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600843 if tid in runq_build:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500844 return
845
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600846 runq_build[tid] = 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500847
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600848 depends = self.runtaskentries[tid].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500849 for depend in depends:
850 mark_active(depend, depth+1)
851
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600852 self.target_tids = []
853 for (mc, target, task, fn) in self.targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500854
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600855 if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500856 continue
857
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600858 if target in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500859 continue
860
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500861 parents = False
862 if task.endswith('-'):
863 parents = True
864 task = task[:-1]
865
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600866 if fn in taskData[mc].failed_fns:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500867 continue
868
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600869 # fn already has mc prefix
870 tid = fn + ":" + task
871 self.target_tids.append(tid)
872 if tid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500873 import difflib
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600874 tasks = []
875 for x in taskData[mc].taskentries:
876 if x.startswith(fn + ":"):
877 tasks.append(taskname_from_tid(x))
878 close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500879 if close_matches:
880 extra = ". Close matches:\n %s" % "\n ".join(close_matches)
881 else:
882 extra = ""
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600883 bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
884
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500885 # For tasks called "XXXX-", ony run their dependencies
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500886 if parents:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600887 for i in self.runtaskentries[tid].depends:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500888 mark_active(i, 1)
889 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600890 mark_active(tid, 1)
891
892 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500893
894 # Step C - Prune all inactive tasks
895 #
896 # Once all active tasks are marked, prune the ones we don't need.
897
Brad Bishop316dfdd2018-06-25 12:45:53 -0400898 delcount = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600899 for tid in list(self.runtaskentries.keys()):
900 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400901 delcount[tid] = self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600902 del self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600903
Brad Bishop316dfdd2018-06-25 12:45:53 -0400904 # Handle --runall
905 if self.cooker.configuration.runall:
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500906 # re-run the mark_active and then drop unused tasks from new list
907 runq_build = {}
Brad Bishop316dfdd2018-06-25 12:45:53 -0400908
909 for task in self.cooker.configuration.runall:
910 runall_tids = set()
911 for tid in list(self.runtaskentries):
912 wanttid = fn_from_tid(tid) + ":do_%s" % task
913 if wanttid in delcount:
914 self.runtaskentries[wanttid] = delcount[wanttid]
915 if wanttid in self.runtaskentries:
916 runall_tids.add(wanttid)
917
918 for tid in list(runall_tids):
919 mark_active(tid,1)
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500920
921 for tid in list(self.runtaskentries.keys()):
922 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400923 delcount[tid] = self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500924 del self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500925
926 if len(self.runtaskentries) == 0:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400927 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
928
929 self.init_progress_reporter.next_stage()
930
931 # Handle runonly
932 if self.cooker.configuration.runonly:
933 # re-run the mark_active and then drop unused tasks from new list
934 runq_build = {}
935
936 for task in self.cooker.configuration.runonly:
937 runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == "do_%s" % task }
938
939 for tid in list(runonly_tids):
940 mark_active(tid,1)
941
942 for tid in list(self.runtaskentries.keys()):
943 if tid not in runq_build:
944 delcount[tid] = self.runtaskentries[tid]
945 del self.runtaskentries[tid]
946
947 if len(self.runtaskentries) == 0:
948 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500949
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500950 #
951 # Step D - Sanity checks and computation
952 #
953
954 # Check to make sure we still have tasks to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600955 if len(self.runtaskentries) == 0:
956 if not taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500957 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
958 else:
959 bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
960
Brad Bishop316dfdd2018-06-25 12:45:53 -0400961 logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500962
963 logger.verbose("Assign Weightings")
964
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600965 self.init_progress_reporter.next_stage()
966
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500967 # Generate a list of reverse dependencies to ease future calculations
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600968 for tid in self.runtaskentries:
969 for dep in self.runtaskentries[tid].depends:
970 self.runtaskentries[dep].revdeps.add(tid)
971
972 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500973
974 # Identify tasks at the end of dependency chains
975 # Error on circular dependency loops (length two)
976 endpoints = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600977 for tid in self.runtaskentries:
978 revdeps = self.runtaskentries[tid].revdeps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500979 if len(revdeps) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600980 endpoints.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500981 for dep in revdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600982 if dep in self.runtaskentries[tid].depends:
983 bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
984
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500985
986 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
987
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600988 self.init_progress_reporter.next_stage()
989
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500990 # Calculate task weights
991 # Check of higher length circular dependencies
992 self.runq_weight = self.calculate_task_weights(endpoints)
993
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600994 self.init_progress_reporter.next_stage()
995
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500996 # Sanity Check - Check for multiple tasks building the same provider
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600997 for mc in self.dataCaches:
998 prov_list = {}
999 seen_fn = []
1000 for tid in self.runtaskentries:
1001 (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1002 if taskfn in seen_fn:
1003 continue
1004 if mc != tidmc:
1005 continue
1006 seen_fn.append(taskfn)
1007 for prov in self.dataCaches[mc].fn_provides[taskfn]:
1008 if prov not in prov_list:
1009 prov_list[prov] = [taskfn]
1010 elif taskfn not in prov_list[prov]:
1011 prov_list[prov].append(taskfn)
1012 for prov in prov_list:
1013 if len(prov_list[prov]) < 2:
1014 continue
1015 if prov in self.multi_provider_whitelist:
1016 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001017 seen_pn = []
1018 # If two versions of the same PN are being built its fatal, we don't support it.
1019 for fn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001020 pn = self.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001021 if pn not in seen_pn:
1022 seen_pn.append(pn)
1023 else:
1024 bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001025 msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
1026 #
1027 # Construct a list of things which uniquely depend on each provider
1028 # since this may help the user figure out which dependency is triggering this warning
1029 #
1030 msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
1031 deplist = {}
1032 commondeps = None
1033 for provfn in prov_list[prov]:
1034 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001035 for tid in self.runtaskentries:
1036 fn = fn_from_tid(tid)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001037 if fn != provfn:
1038 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001039 for dep in self.runtaskentries[tid].revdeps:
1040 fn = fn_from_tid(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001041 if fn == provfn:
1042 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001043 deps.add(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001044 if not commondeps:
1045 commondeps = set(deps)
1046 else:
1047 commondeps &= deps
1048 deplist[provfn] = deps
1049 for provfn in deplist:
1050 msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
1051 #
1052 # Construct a list of provides and runtime providers for each recipe
1053 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
1054 #
1055 msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
1056 provide_results = {}
1057 rprovide_results = {}
1058 commonprovs = None
1059 commonrprovs = None
1060 for provfn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001061 provides = set(self.dataCaches[mc].fn_provides[provfn])
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001062 rprovides = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001063 for rprovide in self.dataCaches[mc].rproviders:
1064 if provfn in self.dataCaches[mc].rproviders[rprovide]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001065 rprovides.add(rprovide)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001066 for package in self.dataCaches[mc].packages:
1067 if provfn in self.dataCaches[mc].packages[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001068 rprovides.add(package)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001069 for package in self.dataCaches[mc].packages_dynamic:
1070 if provfn in self.dataCaches[mc].packages_dynamic[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001071 rprovides.add(package)
1072 if not commonprovs:
1073 commonprovs = set(provides)
1074 else:
1075 commonprovs &= provides
1076 provide_results[provfn] = provides
1077 if not commonrprovs:
1078 commonrprovs = set(rprovides)
1079 else:
1080 commonrprovs &= rprovides
1081 rprovide_results[provfn] = rprovides
1082 #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
1083 #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
1084 for provfn in prov_list[prov]:
1085 msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
1086 msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
1087
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001088 if self.warn_multi_bb:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001089 logger.verbnote(msg)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001090 else:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001091 logger.error(msg)
1092
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001093 self.init_progress_reporter.next_stage()
1094
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001095 # Create a whitelist usable by the stamp checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001096 self.stampfnwhitelist = {}
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001097 for mc in self.taskData:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001098 self.stampfnwhitelist[mc] = []
1099 for entry in self.stampwhitelist.split():
1100 if entry not in self.taskData[mc].build_targets:
1101 continue
1102 fn = self.taskData.build_targets[entry][0]
1103 self.stampfnwhitelist[mc].append(fn)
1104
1105 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001106
1107 # Iterate over the task list looking for tasks with a 'setscene' function
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001108 self.runq_setscene_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001109 if not self.cooker.configuration.nosetscene:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001110 for tid in self.runtaskentries:
1111 (mc, fn, taskname, _) = split_tid_mcfn(tid)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001112 setscenetid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001113 if setscenetid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001114 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001115 self.runq_setscene_tids.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001116
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001117 def invalidate_task(tid, error_nostamp):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001118 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1119 taskdep = self.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001120 if fn + ":" + taskname not in taskData[mc].taskentries:
1121 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001122 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1123 if error_nostamp:
1124 bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
1125 else:
1126 bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
1127 else:
1128 logger.verbose("Invalidate task %s, %s", taskname, fn)
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001129 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001130
1131 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001132
1133 # Invalidate task if force mode active
1134 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001135 for tid in self.target_tids:
1136 invalidate_task(tid, False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001137
1138 # Invalidate task if invalidate mode active
1139 if self.cooker.configuration.invalidate_stamp:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001140 for tid in self.target_tids:
1141 fn = fn_from_tid(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001142 for st in self.cooker.configuration.invalidate_stamp.split(','):
1143 if not st.startswith("do_"):
1144 st = "do_%s" % st
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001145 invalidate_task(fn + ":" + st, True)
1146
1147 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001148
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001149 # Create and print to the logs a virtual/xxxx -> PN (fn) table
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001150 for mc in taskData:
1151 virtmap = taskData[mc].get_providermap(prefix="virtual/")
1152 virtpnmap = {}
1153 for v in virtmap:
1154 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
1155 bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
1156 if hasattr(bb.parse.siggen, "tasks_resolved"):
1157 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1158
1159 self.init_progress_reporter.next_stage()
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001160
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001161 # Iterate over the task list and call into the siggen code
1162 dealtwith = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001163 todeal = set(self.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001164 while len(todeal) > 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001165 for tid in todeal.copy():
1166 if len(self.runtaskentries[tid].depends - dealtwith) == 0:
1167 dealtwith.add(tid)
1168 todeal.remove(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001169 self.prepare_task_hash(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001170
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001171 bb.parse.siggen.writeout_file_checksum_cache()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001172
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001173 #self.dump_data()
1174 return len(self.runtaskentries)
1175
Brad Bishop19323692019-04-05 15:28:33 -04001176 def prepare_task_hash(self, tid):
1177 procdep = []
1178 for dep in self.runtaskentries[tid].depends:
Brad Bishop08902b02019-08-20 09:16:51 -04001179 procdep.append(dep)
1180 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, procdep, self.dataCaches[mc_from_tid(tid)])
1181 self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001182
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001183 def dump_data(self):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001184 """
1185 Dump some debug information on the internal data structures
1186 """
1187 logger.debug(3, "run_tasks:")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001188 for tid in self.runtaskentries:
1189 logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
1190 self.runtaskentries[tid].weight,
1191 self.runtaskentries[tid].depends,
1192 self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001193
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001194class RunQueueWorker():
1195 def __init__(self, process, pipe):
1196 self.process = process
1197 self.pipe = pipe
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001198
1199class RunQueue:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001200 def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001201
1202 self.cooker = cooker
1203 self.cfgData = cfgData
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001204 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001205
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001206 self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
1207 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001208 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001209
1210 self.state = runQueuePrepare
1211
1212 # For disk space monitor
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001213 # Invoked at regular time intervals via the bitbake heartbeat event
1214 # while the build is running. We generate a unique name for the handler
1215 # here, just in case that there ever is more than one RunQueue instance,
Brad Bishop96ff1982019-08-19 13:50:42 -04001216 # start the handler when reaching runQueueSceneInit, and stop it when
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001217 # done with the build.
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001218 self.dm = monitordisk.diskMonitor(cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001219 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
1220 self.dm_event_handler_registered = False
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001221 self.rqexe = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001222 self.worker = {}
1223 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001224
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001225 def _start_worker(self, mc, fakeroot = False, rqexec = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001226 logger.debug(1, "Starting bitbake-worker")
1227 magic = "decafbad"
1228 if self.cooker.configuration.profile:
1229 magic = "decafbadbad"
1230 if fakeroot:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001231 magic = magic + "beef"
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001232 mcdata = self.cooker.databuilder.mcdata[mc]
Brad Bishop19323692019-04-05 15:28:33 -04001233 fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD"))
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001234 fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001235 env = os.environ.copy()
1236 for key, value in (var.split('=') for var in fakerootenv):
1237 env[key] = value
Brad Bishop19323692019-04-05 15:28:33 -04001238 worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001239 else:
1240 worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
1241 bb.utils.nonblockingfd(worker.stdout)
1242 workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
1243
1244 workerdata = {
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001245 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1246 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1247 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1248 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001249 "sigdata" : bb.parse.siggen.get_taskdata(),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001250 "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
1251 "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
1252 "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
1253 "logdefaultdomain" : bb.msg.loggerDefaultDomains,
1254 "prhost" : self.cooker.prhost,
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001255 "buildname" : self.cfgData.getVar("BUILDNAME"),
1256 "date" : self.cfgData.getVar("DATE"),
1257 "time" : self.cfgData.getVar("TIME"),
Brad Bishop08902b02019-08-20 09:16:51 -04001258 "hashservport" : self.cooker.hashservport,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001259 }
1260
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001261 worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001262 worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001263 worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001264 worker.stdin.flush()
1265
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001266 return RunQueueWorker(worker, workerpipe)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001267
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001268 def _teardown_worker(self, worker):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001269 if not worker:
1270 return
1271 logger.debug(1, "Teardown for bitbake-worker")
1272 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001273 worker.process.stdin.write(b"<quit></quit>")
1274 worker.process.stdin.flush()
1275 worker.process.stdin.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001276 except IOError:
1277 pass
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001278 while worker.process.returncode is None:
1279 worker.pipe.read()
1280 worker.process.poll()
1281 while worker.pipe.read():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001282 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001283 worker.pipe.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001284
1285 def start_worker(self):
1286 if self.worker:
1287 self.teardown_workers()
1288 self.teardown = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001289 for mc in self.rqdata.dataCaches:
1290 self.worker[mc] = self._start_worker(mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001291
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001292 def start_fakeworker(self, rqexec, mc):
1293 if not mc in self.fakeworker:
1294 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001295
1296 def teardown_workers(self):
1297 self.teardown = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001298 for mc in self.worker:
1299 self._teardown_worker(self.worker[mc])
1300 self.worker = {}
1301 for mc in self.fakeworker:
1302 self._teardown_worker(self.fakeworker[mc])
1303 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001304
1305 def read_workers(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001306 for mc in self.worker:
1307 self.worker[mc].pipe.read()
1308 for mc in self.fakeworker:
1309 self.fakeworker[mc].pipe.read()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001310
1311 def active_fds(self):
1312 fds = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001313 for mc in self.worker:
1314 fds.append(self.worker[mc].pipe.input)
1315 for mc in self.fakeworker:
1316 fds.append(self.fakeworker[mc].pipe.input)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001317 return fds
1318
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001319 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001320 def get_timestamp(f):
1321 try:
1322 if not os.access(f, os.F_OK):
1323 return None
1324 return os.stat(f)[stat.ST_MTIME]
1325 except:
1326 return None
1327
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001328 (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
1329 if taskname is None:
1330 taskname = tn
1331
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001332 if self.stamppolicy == "perfile":
1333 fulldeptree = False
1334 else:
1335 fulldeptree = True
1336 stampwhitelist = []
1337 if self.stamppolicy == "whitelist":
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001338 stampwhitelist = self.rqdata.stampfnwhitelist[mc]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001339
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001340 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001341
1342 # If the stamp is missing, it's not current
1343 if not os.access(stampfile, os.F_OK):
1344 logger.debug(2, "Stampfile %s not available", stampfile)
1345 return False
1346 # If it's a 'nostamp' task, it's not current
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001347 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001348 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1349 logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
1350 return False
1351
1352 if taskname != "do_setscene" and taskname.endswith("_setscene"):
1353 return True
1354
1355 if cache is None:
1356 cache = {}
1357
1358 iscurrent = True
1359 t1 = get_timestamp(stampfile)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001360 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001361 if iscurrent:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001362 (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
1363 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1364 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001365 t2 = get_timestamp(stampfile2)
1366 t3 = get_timestamp(stampfile3)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001367 if t3 and not t2:
1368 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001369 if t3 and t3 > t2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001370 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001371 if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
1372 if not t2:
1373 logger.debug(2, 'Stampfile %s does not exist', stampfile2)
1374 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001375 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001376 if t1 < t2:
1377 logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
1378 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001379 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001380 if recurse and iscurrent:
1381 if dep in cache:
1382 iscurrent = cache[dep]
1383 if not iscurrent:
1384 logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
1385 else:
1386 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1387 cache[dep] = iscurrent
1388 if recurse:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001389 cache[tid] = iscurrent
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001390 return iscurrent
1391
Brad Bishop08902b02019-08-20 09:16:51 -04001392 def validate_hashes(self, tocheck, data, currentcount=None, siginfo=False):
Brad Bishop96ff1982019-08-19 13:50:42 -04001393 valid = set()
1394 if self.hashvalidate:
Brad Bishop08902b02019-08-20 09:16:51 -04001395 sq_data = {}
1396 sq_data['hash'] = {}
1397 sq_data['hashfn'] = {}
1398 sq_data['unihash'] = {}
Brad Bishop96ff1982019-08-19 13:50:42 -04001399 for tid in tocheck:
1400 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04001401 sq_data['hash'][tid] = self.rqdata.runtaskentries[tid].hash
1402 sq_data['hashfn'][tid] = self.rqdata.dataCaches[mc].hashfn[taskfn]
1403 sq_data['unihash'][tid] = self.rqdata.runtaskentries[tid].unihash
Brad Bishop96ff1982019-08-19 13:50:42 -04001404
Brad Bishop08902b02019-08-20 09:16:51 -04001405 valid = self.validate_hash(sq_data, data, siginfo, currentcount)
Brad Bishop96ff1982019-08-19 13:50:42 -04001406
1407 return valid
1408
Brad Bishop08902b02019-08-20 09:16:51 -04001409 def validate_hash(self, sq_data, d, siginfo, currentcount):
1410 locs = {"sq_data" : sq_data, "d" : d, "siginfo" : siginfo, "currentcount" : currentcount}
Brad Bishop19323692019-04-05 15:28:33 -04001411
Brad Bishop08902b02019-08-20 09:16:51 -04001412 # Metadata has **kwargs so args can be added, sq_data can also gain new fields
1413 call = self.hashvalidate + "(sq_data, d, siginfo=siginfo, currentcount=currentcount)"
Brad Bishop19323692019-04-05 15:28:33 -04001414
Brad Bishop19323692019-04-05 15:28:33 -04001415 return bb.utils.better_eval(call, locs)
1416
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001417 def _execute_runqueue(self):
1418 """
1419 Run the tasks in a queue prepared by rqdata.prepare()
1420 Upon failure, optionally try to recover the build using any alternate providers
1421 (if the abort on failure configuration option isn't set)
1422 """
1423
1424 retval = True
1425
1426 if self.state is runQueuePrepare:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001427 # NOTE: if you add, remove or significantly refactor the stages of this
1428 # process then you should recalculate the weightings here. This is quite
1429 # easy to do - just change the next line temporarily to pass debug=True as
1430 # the last parameter and you'll get a printout of the weightings as well
1431 # as a map to the lines where next_stage() was called. Of course this isn't
1432 # critical, but it helps to keep the progress reporting accurate.
1433 self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
1434 "Initialising tasks",
1435 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001436 if self.rqdata.prepare() == 0:
1437 self.state = runQueueComplete
1438 else:
1439 self.state = runQueueSceneInit
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001440
1441 if self.state is runQueueSceneInit:
Brad Bishop96ff1982019-08-19 13:50:42 -04001442 self.rqdata.init_progress_reporter.next_stage()
1443
1444 # we are ready to run, emit dependency info to any UI or class which
1445 # needs it
1446 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1447 self.rqdata.init_progress_reporter.next_stage()
1448 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
1449
Brad Bishope2d5b612018-11-23 10:55:50 +13001450 if not self.dm_event_handler_registered:
1451 res = bb.event.register(self.dm_event_handler_name,
Brad Bishop96ff1982019-08-19 13:50:42 -04001452 lambda x: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False,
Brad Bishope2d5b612018-11-23 10:55:50 +13001453 ('bb.event.HeartbeatEvent',))
1454 self.dm_event_handler_registered = True
1455
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001456 dump = self.cooker.configuration.dump_signatures
1457 if dump:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001458 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001459 if 'printdiff' in dump:
1460 invalidtasks = self.print_diffscenetasks()
1461 self.dump_signatures(dump)
1462 if 'printdiff' in dump:
1463 self.write_diffscenetasks(invalidtasks)
1464 self.state = runQueueComplete
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001465
Brad Bishop96ff1982019-08-19 13:50:42 -04001466 if self.state is runQueueSceneInit:
1467 self.rqdata.init_progress_reporter.next_stage()
1468 self.start_worker()
1469 self.rqdata.init_progress_reporter.next_stage()
1470 self.rqexe = RunQueueExecute(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001471
Brad Bishop96ff1982019-08-19 13:50:42 -04001472 # If we don't have any setscene functions, skip execution
1473 if len(self.rqdata.runq_setscene_tids) == 0:
1474 logger.info('No setscene tasks')
1475 for tid in self.rqdata.runtaskentries:
1476 if len(self.rqdata.runtaskentries[tid].depends) == 0:
1477 self.rqexe.setbuildable(tid)
1478 self.rqexe.tasks_notcovered.add(tid)
1479 self.rqexe.sqdone = True
1480 logger.info('Executing Tasks')
1481 self.state = runQueueRunning
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001482
1483 if self.state is runQueueRunning:
1484 retval = self.rqexe.execute()
1485
1486 if self.state is runQueueCleanUp:
1487 retval = self.rqexe.finish()
1488
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001489 build_done = self.state is runQueueComplete or self.state is runQueueFailed
1490
1491 if build_done and self.dm_event_handler_registered:
1492 bb.event.remove(self.dm_event_handler_name, None)
1493 self.dm_event_handler_registered = False
1494
1495 if build_done and self.rqexe:
Brad Bishop08902b02019-08-20 09:16:51 -04001496 bb.parse.siggen.save_unitaskhashes()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001497 self.teardown_workers()
Brad Bishop96ff1982019-08-19 13:50:42 -04001498 if self.rqexe:
1499 if self.rqexe.stats.failed:
1500 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
1501 else:
1502 # Let's avoid the word "failed" if nothing actually did
1503 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001504
1505 if self.state is runQueueFailed:
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001506 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001507
1508 if self.state is runQueueComplete:
1509 # All done
1510 return False
1511
1512 # Loop
1513 return retval
1514
1515 def execute_runqueue(self):
1516 # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
1517 try:
1518 return self._execute_runqueue()
1519 except bb.runqueue.TaskFailure:
1520 raise
1521 except SystemExit:
1522 raise
1523 except bb.BBHandledException:
1524 try:
1525 self.teardown_workers()
1526 except:
1527 pass
1528 self.state = runQueueComplete
1529 raise
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001530 except Exception as err:
1531 logger.exception("An uncaught exception occurred in runqueue")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001532 try:
1533 self.teardown_workers()
1534 except:
1535 pass
1536 self.state = runQueueComplete
1537 raise
1538
1539 def finish_runqueue(self, now = False):
1540 if not self.rqexe:
1541 self.state = runQueueComplete
1542 return
1543
1544 if now:
1545 self.rqexe.finish_now()
1546 else:
1547 self.rqexe.finish()
1548
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001549 def rq_dump_sigfn(self, fn, options):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001550 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001551 the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
1552 siggen = bb.parse.siggen
1553 dataCaches = self.rqdata.dataCaches
1554 siggen.dump_sigfn(fn, dataCaches, options)
1555
1556 def dump_signatures(self, options):
1557 fns = set()
1558 bb.note("Reparsing files to collect dependency data")
1559
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001560 for tid in self.rqdata.runtaskentries:
1561 fn = fn_from_tid(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001562 fns.add(fn)
1563
1564 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1565 # We cannot use the real multiprocessing.Pool easily due to some local data
1566 # that can't be pickled. This is a cheap multi-process solution.
1567 launched = []
1568 while fns:
1569 if len(launched) < max_process:
1570 p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
1571 p.start()
1572 launched.append(p)
1573 for q in launched:
1574 # The finished processes are joined when calling is_alive()
1575 if not q.is_alive():
1576 launched.remove(q)
1577 for p in launched:
1578 p.join()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001579
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001580 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001581
1582 return
1583
1584 def print_diffscenetasks(self):
1585
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001586 noexec = []
Brad Bishop96ff1982019-08-19 13:50:42 -04001587 tocheck = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001588
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001589 for tid in self.rqdata.runtaskentries:
1590 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1591 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001592
1593 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001594 noexec.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001595 continue
1596
Brad Bishop96ff1982019-08-19 13:50:42 -04001597 tocheck.add(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001598
Brad Bishop96ff1982019-08-19 13:50:42 -04001599 valid_new = self.validate_hashes(tocheck, self.cooker.data, None, True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001600
1601 # Tasks which are both setscene and noexec never care about dependencies
1602 # We therefore find tasks which are setscene and noexec and mark their
1603 # unique dependencies as valid.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001604 for tid in noexec:
1605 if tid not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001606 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001607 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001608 hasnoexecparents = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001609 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1610 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001611 continue
1612 hasnoexecparents = False
1613 break
1614 if hasnoexecparents:
1615 valid_new.add(dep)
1616
1617 invalidtasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001618 for tid in self.rqdata.runtaskentries:
1619 if tid not in valid_new and tid not in noexec:
1620 invalidtasks.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001621
1622 found = set()
1623 processed = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001624 for tid in invalidtasks:
1625 toprocess = set([tid])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001626 while toprocess:
1627 next = set()
1628 for t in toprocess:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001629 for dep in self.rqdata.runtaskentries[t].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001630 if dep in invalidtasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001631 found.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001632 if dep not in processed:
1633 processed.add(dep)
1634 next.add(dep)
1635 toprocess = next
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001636 if tid in found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001637 toprocess = set()
1638
1639 tasklist = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001640 for tid in invalidtasks.difference(found):
1641 tasklist.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001642
1643 if tasklist:
1644 bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
1645
1646 return invalidtasks.difference(found)
1647
1648 def write_diffscenetasks(self, invalidtasks):
1649
1650 # Define recursion callback
1651 def recursecb(key, hash1, hash2):
1652 hashes = [hash1, hash2]
1653 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1654
1655 recout = []
1656 if len(hashfiles) == 2:
1657 out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
Brad Bishopc342db32019-05-15 21:57:59 -04001658 recout.extend(list(' ' + l for l in out2))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001659 else:
1660 recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
1661
1662 return recout
1663
1664
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001665 for tid in invalidtasks:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001666 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1667 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001668 h = self.rqdata.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001669 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
1670 match = None
1671 for m in matches:
1672 if h in m:
1673 match = m
1674 if match is None:
1675 bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001676 matches = {k : v for k, v in iter(matches.items()) if h not in k}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001677 if matches:
1678 latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
Brad Bishop19323692019-04-05 15:28:33 -04001679 prevh = __find_sha256__.search(latestmatch).group(0)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001680 output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
1681 bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
1682
Brad Bishop96ff1982019-08-19 13:50:42 -04001683
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001684class RunQueueExecute:
1685
1686 def __init__(self, rq):
1687 self.rq = rq
1688 self.cooker = rq.cooker
1689 self.cfgData = rq.cfgData
1690 self.rqdata = rq.rqdata
1691
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001692 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
1693 self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001694
Brad Bishop96ff1982019-08-19 13:50:42 -04001695 self.sq_buildable = set()
1696 self.sq_running = set()
1697 self.sq_live = set()
1698
Brad Bishop08902b02019-08-20 09:16:51 -04001699 self.updated_taskhash_queue = []
1700 self.pending_migrations = set()
1701
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001702 self.runq_buildable = set()
1703 self.runq_running = set()
1704 self.runq_complete = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001705
1706 self.build_stamps = {}
1707 self.build_stamps2 = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001708 self.failed_tids = []
Brad Bishop96ff1982019-08-19 13:50:42 -04001709 self.sq_deferred = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001710
1711 self.stampcache = {}
1712
Brad Bishop08902b02019-08-20 09:16:51 -04001713 self.holdoff_tasks = set()
Brad Bishopc68388fc2019-08-26 01:33:31 -04001714 self.holdoff_need_update = True
Brad Bishop96ff1982019-08-19 13:50:42 -04001715 self.sqdone = False
1716
1717 self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
1718 self.sq_stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
1719
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001720 for mc in rq.worker:
1721 rq.worker[mc].pipe.setrunqueueexec(self)
1722 for mc in rq.fakeworker:
1723 rq.fakeworker[mc].pipe.setrunqueueexec(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001724
1725 if self.number_tasks <= 0:
1726 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1727
Brad Bishop96ff1982019-08-19 13:50:42 -04001728 # List of setscene tasks which we've covered
1729 self.scenequeue_covered = set()
1730 # List of tasks which are covered (including setscene ones)
1731 self.tasks_covered = set()
1732 self.tasks_scenequeue_done = set()
1733 self.scenequeue_notcovered = set()
1734 self.tasks_notcovered = set()
1735 self.scenequeue_notneeded = set()
1736
Brad Bishop08902b02019-08-20 09:16:51 -04001737 # We can't skip specified target tasks which aren't setscene tasks
1738 self.cantskip = set(self.rqdata.target_tids)
1739 self.cantskip.difference_update(self.rqdata.runq_setscene_tids)
1740 self.cantskip.intersection_update(self.rqdata.runtaskentries)
Brad Bishop96ff1982019-08-19 13:50:42 -04001741
1742 schedulers = self.get_schedulers()
1743 for scheduler in schedulers:
1744 if self.scheduler == scheduler.name:
1745 self.sched = scheduler(self, self.rqdata)
1746 logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
1747 break
1748 else:
1749 bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
1750 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1751
Brad Bishop08902b02019-08-20 09:16:51 -04001752 #if len(self.rqdata.runq_setscene_tids) > 0:
1753 self.sqdata = SQData()
1754 build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
Brad Bishop96ff1982019-08-19 13:50:42 -04001755
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001756 def runqueue_process_waitpid(self, task, status):
1757
1758 # self.build_stamps[pid] may not exist when use shared work directory.
1759 if task in self.build_stamps:
1760 self.build_stamps2.remove(self.build_stamps[task])
1761 del self.build_stamps[task]
1762
Brad Bishop96ff1982019-08-19 13:50:42 -04001763 if task in self.sq_live:
1764 if status != 0:
1765 self.sq_task_fail(task, status)
1766 else:
1767 self.sq_task_complete(task)
1768 self.sq_live.remove(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001769 else:
Brad Bishop96ff1982019-08-19 13:50:42 -04001770 if status != 0:
1771 self.task_fail(task, status)
1772 else:
1773 self.task_complete(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001774 return True
1775
1776 def finish_now(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001777 for mc in self.rq.worker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001778 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001779 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1780 self.rq.worker[mc].process.stdin.flush()
1781 except IOError:
1782 # worker must have died?
1783 pass
1784 for mc in self.rq.fakeworker:
1785 try:
1786 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1787 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001788 except IOError:
1789 # worker must have died?
1790 pass
1791
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001792 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001793 self.rq.state = runQueueFailed
1794 return
1795
1796 self.rq.state = runQueueComplete
1797 return
1798
1799 def finish(self):
1800 self.rq.state = runQueueCleanUp
1801
Brad Bishop96ff1982019-08-19 13:50:42 -04001802 active = self.stats.active + self.sq_stats.active
1803 if active > 0:
1804 bb.event.fire(runQueueExitWait(active), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001805 self.rq.read_workers()
1806 return self.rq.active_fds()
1807
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001808 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001809 self.rq.state = runQueueFailed
1810 return True
1811
1812 self.rq.state = runQueueComplete
1813 return True
1814
Brad Bishop96ff1982019-08-19 13:50:42 -04001815 # Used by setscene only
1816 def check_dependencies(self, task, taskdeps):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001817 if not self.rq.depvalidate:
1818 return False
1819
Brad Bishop08902b02019-08-20 09:16:51 -04001820 # Must not edit parent data
1821 taskdeps = set(taskdeps)
1822
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001823 taskdata = {}
1824 taskdeps.add(task)
1825 for dep in taskdeps:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001826 (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
1827 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001828 taskdata[dep] = [pn, taskname, fn]
1829 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001830 locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001831 valid = bb.utils.better_eval(call, locs)
1832 return valid
1833
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001834 def can_start_task(self):
Brad Bishop96ff1982019-08-19 13:50:42 -04001835 active = self.stats.active + self.sq_stats.active
1836 can_start = active < self.number_tasks
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001837 return can_start
1838
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001839 def get_schedulers(self):
1840 schedulers = set(obj for obj in globals().values()
1841 if type(obj) is type and
1842 issubclass(obj, RunQueueScheduler))
1843
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001844 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001845 if user_schedulers:
1846 for sched in user_schedulers.split():
1847 if not "." in sched:
1848 bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
1849 continue
1850
1851 modname, name = sched.rsplit(".", 1)
1852 try:
1853 module = __import__(modname, fromlist=(name,))
1854 except ImportError as exc:
1855 logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
1856 raise SystemExit(1)
1857 else:
1858 schedulers.add(getattr(module, name))
1859 return schedulers
1860
1861 def setbuildable(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001862 self.runq_buildable.add(task)
Brad Bishop316dfdd2018-06-25 12:45:53 -04001863 self.sched.newbuildable(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001864
1865 def task_completeoutright(self, task):
1866 """
1867 Mark a task as completed
1868 Look at the reverse dependencies and mark any task with
1869 completed dependencies as buildable
1870 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001871 self.runq_complete.add(task)
1872 for revdep in self.rqdata.runtaskentries[task].revdeps:
1873 if revdep in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001874 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001875 if revdep in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001876 continue
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001877 alldeps = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001878 for dep in self.rqdata.runtaskentries[revdep].depends:
1879 if dep not in self.runq_complete:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001880 alldeps = False
1881 break
1882 if alldeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001883 self.setbuildable(revdep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001884 logger.debug(1, "Marking task %s as buildable", revdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001885
1886 def task_complete(self, task):
1887 self.stats.taskCompleted()
1888 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1889 self.task_completeoutright(task)
1890
1891 def task_fail(self, task, exitcode):
1892 """
1893 Called when a task has failed
1894 Updates the state engine with the failure
1895 """
1896 self.stats.taskFailed()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001897 self.failed_tids.append(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001898 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001899 if self.rqdata.taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001900 self.rq.state = runQueueCleanUp
1901
1902 def task_skip(self, task, reason):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001903 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001904 self.setbuildable(task)
1905 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
1906 self.task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001907 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001908 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001909
Brad Bishop08902b02019-08-20 09:16:51 -04001910 def summarise_scenequeue_errors(self):
1911 err = False
1912 if not self.sqdone:
1913 logger.debug(1, 'We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered)))
1914 completeevent = sceneQueueComplete(self.sq_stats, self.rq)
1915 bb.event.fire(completeevent, self.cfgData)
1916 if self.sq_deferred:
1917 logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred))
1918 err = True
1919 if self.updated_taskhash_queue:
1920 logger.error("Scenequeue had unprocessed changed taskhash entries: %s" % pprint.pformat(self.updated_taskhash_queue))
1921 err = True
1922 if self.holdoff_tasks:
1923 logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks))
1924 err = True
1925
1926 for tid in self.rqdata.runq_setscene_tids:
1927 if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
1928 err = True
1929 logger.error("Setscene Task %s was never marked as covered or not covered" % tid)
1930 if tid not in self.sq_buildable:
1931 err = True
1932 logger.error("Setscene Task %s was never marked as buildable" % tid)
1933 if tid not in self.sq_running:
1934 err = True
1935 logger.error("Setscene Task %s was never marked as running" % tid)
1936
1937 for x in self.rqdata.runtaskentries:
1938 if x not in self.tasks_covered and x not in self.tasks_notcovered:
1939 logger.error("Task %s was never moved from the setscene queue" % x)
1940 err = True
1941 if x not in self.tasks_scenequeue_done:
1942 logger.error("Task %s was never processed by the setscene code" % x)
1943 err = True
1944 if len(self.rqdata.runtaskentries[x].depends) == 0 and x not in self.runq_buildable:
1945 logger.error("Task %s was never marked as buildable by the setscene code" % x)
1946 err = True
1947 return err
1948
1949
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001950 def execute(self):
1951 """
Brad Bishop96ff1982019-08-19 13:50:42 -04001952 Run the tasks in a queue prepared by prepare_runqueue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001953 """
1954
1955 self.rq.read_workers()
Brad Bishop08902b02019-08-20 09:16:51 -04001956 self.process_possible_migrations()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001957
Brad Bishop96ff1982019-08-19 13:50:42 -04001958 task = None
1959 if not self.sqdone and self.can_start_task():
1960 # Find the next setscene to run
Brad Bishop08902b02019-08-20 09:16:51 -04001961 for nexttask in sorted(self.rqdata.runq_setscene_tids):
Brad Bishop96ff1982019-08-19 13:50:42 -04001962 if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values():
1963 if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
1964 if nexttask not in self.rqdata.target_tids:
1965 logger.debug(2, "Skipping setscene for task %s" % nexttask)
1966 self.sq_task_skip(nexttask)
1967 self.scenequeue_notneeded.add(nexttask)
1968 if nexttask in self.sq_deferred:
1969 del self.sq_deferred[nexttask]
1970 return True
Brad Bishop08902b02019-08-20 09:16:51 -04001971 # If covered tasks are running, need to wait for them to complete
1972 for t in self.sqdata.sq_covered_tasks[nexttask]:
1973 if t in self.runq_running and t not in self.runq_complete:
1974 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04001975 if nexttask in self.sq_deferred:
1976 if self.sq_deferred[nexttask] not in self.runq_complete:
1977 continue
1978 logger.debug(1, "Task %s no longer deferred" % nexttask)
1979 del self.sq_deferred[nexttask]
1980 valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, None, False)
1981 if not valid:
1982 logger.debug(1, "%s didn't become valid, skipping setscene" % nexttask)
1983 self.sq_task_failoutright(nexttask)
1984 return True
1985 else:
1986 self.sqdata.outrightfail.remove(nexttask)
1987 if nexttask in self.sqdata.outrightfail:
1988 logger.debug(2, 'No package found, so skipping setscene task %s', nexttask)
1989 self.sq_task_failoutright(nexttask)
1990 return True
1991 if nexttask in self.sqdata.unskippable:
1992 logger.debug(2, "Setscene task %s is unskippable" % nexttask)
1993 task = nexttask
1994 break
1995 if task is not None:
1996 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
1997 taskname = taskname + "_setscene"
1998 if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
1999 logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
2000 self.sq_task_failoutright(task)
2001 return True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002002
Brad Bishop96ff1982019-08-19 13:50:42 -04002003 if self.cooker.configuration.force:
2004 if task in self.rqdata.target_tids:
2005 self.sq_task_failoutright(task)
2006 return True
2007
2008 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
2009 logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
2010 self.sq_task_skip(task)
2011 return True
2012
2013 if self.cooker.configuration.skipsetscene:
2014 logger.debug(2, 'No setscene tasks should be executed. Skipping %s', task)
2015 self.sq_task_failoutright(task)
2016 return True
2017
2018 startevent = sceneQueueTaskStarted(task, self.sq_stats, self.rq)
2019 bb.event.fire(startevent, self.cfgData)
2020
2021 taskdepdata = self.sq_build_taskdepdata(task)
2022
2023 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2024 taskhash = self.rqdata.get_task_hash(task)
2025 unihash = self.rqdata.get_task_unihash(task)
2026 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
2027 if not mc in self.rq.fakeworker:
2028 self.rq.start_fakeworker(self, mc)
2029 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
2030 self.rq.fakeworker[mc].process.stdin.flush()
2031 else:
2032 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
2033 self.rq.worker[mc].process.stdin.flush()
2034
2035 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2036 self.build_stamps2.append(self.build_stamps[task])
2037 self.sq_running.add(task)
2038 self.sq_live.add(task)
2039 self.sq_stats.taskActive()
2040 if self.can_start_task():
2041 return True
2042
Brad Bishopc68388fc2019-08-26 01:33:31 -04002043 self.update_holdofftasks()
2044
Brad Bishop08902b02019-08-20 09:16:51 -04002045 if not self.sq_live and not self.sqdone and not self.sq_deferred and not self.updated_taskhash_queue and not self.holdoff_tasks:
Brad Bishop96ff1982019-08-19 13:50:42 -04002046 logger.info("Setscene tasks completed")
Brad Bishop96ff1982019-08-19 13:50:42 -04002047
Brad Bishop08902b02019-08-20 09:16:51 -04002048 err = self.summarise_scenequeue_errors()
Brad Bishop96ff1982019-08-19 13:50:42 -04002049 if err:
2050 self.rq.state = runQueueFailed
2051 return True
2052
2053 if self.cooker.configuration.setsceneonly:
2054 self.rq.state = runQueueComplete
2055 return True
2056 self.sqdone = True
2057
2058 if self.stats.total == 0:
2059 # nothing to do
2060 self.rq.state = runQueueComplete
2061 return True
2062
2063 if self.cooker.configuration.setsceneonly:
2064 task = None
2065 else:
2066 task = self.sched.next()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002067 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002068 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002069
Brad Bishop96ff1982019-08-19 13:50:42 -04002070 if self.rqdata.setscenewhitelist is not None:
2071 if self.check_setscenewhitelist(task):
2072 self.task_fail(task, "setscene whitelist")
2073 return True
2074
2075 if task in self.tasks_covered:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002076 logger.debug(2, "Setscene covered task %s", task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002077 self.task_skip(task, "covered")
2078 return True
2079
2080 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002081 logger.debug(2, "Stamp current task %s", task)
2082
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002083 self.task_skip(task, "existing")
2084 return True
2085
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002086 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002087 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2088 startevent = runQueueTaskStarted(task, self.stats, self.rq,
2089 noexec=True)
2090 bb.event.fire(startevent, self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002091 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002092 self.stats.taskActive()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002093 if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002094 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002095 self.task_complete(task)
2096 return True
2097 else:
2098 startevent = runQueueTaskStarted(task, self.stats, self.rq)
2099 bb.event.fire(startevent, self.cfgData)
2100
2101 taskdepdata = self.build_taskdepdata(task)
2102
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002103 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop19323692019-04-05 15:28:33 -04002104 taskhash = self.rqdata.get_task_hash(task)
2105 unihash = self.rqdata.get_task_unihash(task)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002106 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002107 if not mc in self.rq.fakeworker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002108 try:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002109 self.rq.start_fakeworker(self, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002110 except OSError as exc:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002111 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002112 self.rq.state = runQueueFailed
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002113 self.stats.taskFailed()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002114 return True
Brad Bishop19323692019-04-05 15:28:33 -04002115 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002116 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002117 else:
Brad Bishop19323692019-04-05 15:28:33 -04002118 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002119 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002120
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002121 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2122 self.build_stamps2.append(self.build_stamps[task])
2123 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002124 self.stats.taskActive()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002125 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002126 return True
2127
Brad Bishop96ff1982019-08-19 13:50:42 -04002128 if self.stats.active > 0 or self.sq_stats.active > 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002129 self.rq.read_workers()
2130 return self.rq.active_fds()
2131
Brad Bishop96ff1982019-08-19 13:50:42 -04002132 # No more tasks can be run. If we have deferred setscene tasks we should run them.
2133 if self.sq_deferred:
2134 tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0])
2135 logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid)
2136 self.sq_task_failoutright(tid)
2137 return True
2138
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002139 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002140 self.rq.state = runQueueFailed
2141 return True
2142
2143 # Sanity Checks
Brad Bishop08902b02019-08-20 09:16:51 -04002144 err = self.summarise_scenequeue_errors()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002145 for task in self.rqdata.runtaskentries:
2146 if task not in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002147 logger.error("Task %s never buildable!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002148 err = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002149 elif task not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002150 logger.error("Task %s never ran!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002151 err = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002152 elif task not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002153 logger.error("Task %s never completed!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002154 err = True
2155
2156 if err:
2157 self.rq.state = runQueueFailed
2158 else:
2159 self.rq.state = runQueueComplete
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002160
2161 return True
2162
Brad Bishopc68388fc2019-08-26 01:33:31 -04002163 def filtermcdeps(self, task, mc, deps):
Andrew Geissler99467da2019-02-25 18:54:23 -06002164 ret = set()
Andrew Geissler99467da2019-02-25 18:54:23 -06002165 for dep in deps:
Brad Bishopc68388fc2019-08-26 01:33:31 -04002166 thismc = mc_from_tid(dep)
2167 if thismc != mc:
Andrew Geissler99467da2019-02-25 18:54:23 -06002168 continue
2169 ret.add(dep)
2170 return ret
2171
2172 # We filter out multiconfig dependencies from taskdepdata we pass to the tasks
2173 # as most code can't handle them
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002174 def build_taskdepdata(self, task):
2175 taskdepdata = {}
Brad Bishopc68388fc2019-08-26 01:33:31 -04002176 mc = mc_from_tid(task)
Brad Bishop08902b02019-08-20 09:16:51 -04002177 next = self.rqdata.runtaskentries[task].depends.copy()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002178 next.add(task)
Brad Bishopc68388fc2019-08-26 01:33:31 -04002179 next = self.filtermcdeps(task, mc, next)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002180 while next:
2181 additional = []
2182 for revdep in next:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002183 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2184 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2185 deps = self.rqdata.runtaskentries[revdep].depends
2186 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002187 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002188 unihash = self.rqdata.runtaskentries[revdep].unihash
Brad Bishopc68388fc2019-08-26 01:33:31 -04002189 deps = self.filtermcdeps(task, mc, deps)
Brad Bishop19323692019-04-05 15:28:33 -04002190 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002191 for revdep2 in deps:
2192 if revdep2 not in taskdepdata:
2193 additional.append(revdep2)
2194 next = additional
2195
2196 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2197 return taskdepdata
2198
Brad Bishop08902b02019-08-20 09:16:51 -04002199 def update_holdofftasks(self):
Brad Bishopc68388fc2019-08-26 01:33:31 -04002200
2201 if not self.holdoff_need_update:
2202 return
2203
2204 notcovered = set(self.scenequeue_notcovered)
2205 notcovered |= self.cantskip
2206 for tid in self.scenequeue_notcovered:
2207 notcovered |= self.sqdata.sq_covered_tasks[tid]
2208 notcovered |= self.sqdata.unskippable.difference(self.rqdata.runq_setscene_tids)
2209 notcovered.intersection_update(self.tasks_scenequeue_done)
2210
2211 covered = set(self.scenequeue_covered)
2212 for tid in self.scenequeue_covered:
2213 covered |= self.sqdata.sq_covered_tasks[tid]
2214 covered.difference_update(notcovered)
2215 covered.intersection_update(self.tasks_scenequeue_done)
2216
2217 for tid in notcovered | covered:
2218 if len(self.rqdata.runtaskentries[tid].depends) == 0:
2219 self.setbuildable(tid)
2220 elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete):
2221 self.setbuildable(tid)
2222
2223 self.tasks_covered = covered
2224 self.tasks_notcovered = notcovered
2225
Brad Bishop08902b02019-08-20 09:16:51 -04002226 self.holdoff_tasks = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002227
Brad Bishop08902b02019-08-20 09:16:51 -04002228 for tid in self.rqdata.runq_setscene_tids:
2229 if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
2230 self.holdoff_tasks.add(tid)
2231
2232 for tid in self.holdoff_tasks.copy():
2233 for dep in self.sqdata.sq_covered_tasks[tid]:
2234 if dep not in self.runq_complete:
2235 self.holdoff_tasks.add(dep)
2236
Brad Bishopc68388fc2019-08-26 01:33:31 -04002237 self.holdoff_need_update = False
2238
Brad Bishop08902b02019-08-20 09:16:51 -04002239 def process_possible_migrations(self):
2240
2241 changed = set()
2242 for tid, unihash in self.updated_taskhash_queue.copy():
2243 if tid in self.runq_running and tid not in self.runq_complete:
2244 continue
2245
2246 self.updated_taskhash_queue.remove((tid, unihash))
2247
2248 if unihash != self.rqdata.runtaskentries[tid].unihash:
2249 logger.info("Task %s unihash changed to %s" % (tid, unihash))
2250 self.rqdata.runtaskentries[tid].unihash = unihash
2251 bb.parse.siggen.set_unihash(tid, unihash)
2252
2253 # Work out all tasks which depend on this one
2254 total = set()
2255 next = set(self.rqdata.runtaskentries[tid].revdeps)
2256 while next:
2257 current = next.copy()
2258 total = total |next
2259 next = set()
2260 for ntid in current:
2261 next |= self.rqdata.runtaskentries[ntid].revdeps
2262 next.difference_update(total)
2263
2264 # Now iterate those tasks in dependency order to regenerate their taskhash/unihash
2265 done = set()
2266 next = set(self.rqdata.runtaskentries[tid].revdeps)
2267 while next:
2268 current = next.copy()
2269 next = set()
2270 for tid in current:
2271 if not self.rqdata.runtaskentries[tid].depends.isdisjoint(total):
2272 continue
2273 procdep = []
2274 for dep in self.rqdata.runtaskentries[tid].depends:
2275 procdep.append(dep)
2276 orighash = self.rqdata.runtaskentries[tid].hash
2277 self.rqdata.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, procdep, self.rqdata.dataCaches[mc_from_tid(tid)])
2278 origuni = self.rqdata.runtaskentries[tid].unihash
2279 self.rqdata.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
2280 logger.debug(1, "Task %s hash changes: %s->%s %s->%s" % (tid, orighash, self.rqdata.runtaskentries[tid].hash, origuni, self.rqdata.runtaskentries[tid].unihash))
2281 next |= self.rqdata.runtaskentries[tid].revdeps
2282 changed.add(tid)
2283 total.remove(tid)
2284 next.intersection_update(total)
2285
2286 if changed:
2287 for mc in self.rq.worker:
2288 self.rq.worker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
2289 for mc in self.rq.fakeworker:
2290 self.rq.fakeworker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
2291
2292 logger.debug(1, pprint.pformat("Tasks changed:\n%s" % (changed)))
2293
2294 for tid in changed:
2295 if tid not in self.rqdata.runq_setscene_tids:
2296 continue
2297 valid = self.rq.validate_hashes(set([tid]), self.cooker.data, None, False)
2298 if not valid:
2299 continue
2300 if tid in self.runq_running:
2301 continue
2302 if tid not in self.pending_migrations:
2303 self.pending_migrations.add(tid)
2304
2305 for tid in self.pending_migrations.copy():
2306 valid = True
2307 # Check no tasks this covers are running
2308 for dep in self.sqdata.sq_covered_tasks[tid]:
2309 if dep in self.runq_running and dep not in self.runq_complete:
2310 logger.debug(2, "Task %s is running which blocks setscene for %s from running" % (dep, tid))
2311 valid = False
2312 break
2313 if not valid:
2314 continue
2315
2316 self.pending_migrations.remove(tid)
Brad Bishopc68388fc2019-08-26 01:33:31 -04002317 changed = True
Brad Bishop08902b02019-08-20 09:16:51 -04002318
2319 if tid in self.tasks_scenequeue_done:
2320 self.tasks_scenequeue_done.remove(tid)
2321 for dep in self.sqdata.sq_covered_tasks[tid]:
2322 if dep not in self.runq_complete:
2323 if dep in self.tasks_scenequeue_done and dep not in self.sqdata.unskippable:
2324 self.tasks_scenequeue_done.remove(dep)
2325
2326 if tid in self.sq_buildable:
2327 self.sq_buildable.remove(tid)
2328 if tid in self.sq_running:
2329 self.sq_running.remove(tid)
2330 if self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
2331 if tid not in self.sq_buildable:
2332 self.sq_buildable.add(tid)
2333 if len(self.sqdata.sq_revdeps[tid]) == 0:
2334 self.sq_buildable.add(tid)
2335
2336 if tid in self.sqdata.outrightfail:
2337 self.sqdata.outrightfail.remove(tid)
2338 if tid in self.scenequeue_notcovered:
2339 self.scenequeue_notcovered.remove(tid)
2340 if tid in self.scenequeue_covered:
2341 self.scenequeue_covered.remove(tid)
2342 if tid in self.scenequeue_notneeded:
2343 self.scenequeue_notneeded.remove(tid)
2344
2345 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2346 self.sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
2347
2348 if tid in self.stampcache:
2349 del self.stampcache[tid]
2350
2351 if tid in self.build_stamps:
2352 del self.build_stamps[tid]
2353
2354 logger.info("Setscene task %s now valid and being rerun" % tid)
2355 self.sqdone = False
2356
2357 if changed:
Brad Bishopc68388fc2019-08-26 01:33:31 -04002358 self.holdoff_need_update = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002359
Brad Bishop96ff1982019-08-19 13:50:42 -04002360 def scenequeue_updatecounters(self, task, fail=False):
Brad Bishop08902b02019-08-20 09:16:51 -04002361
2362 for dep in sorted(self.sqdata.sq_deps[task]):
Brad Bishop96ff1982019-08-19 13:50:42 -04002363 if fail and task in self.sqdata.sq_harddeps and dep in self.sqdata.sq_harddeps[task]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002364 logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
Brad Bishop96ff1982019-08-19 13:50:42 -04002365 self.sq_task_failoutright(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002366 continue
Brad Bishop08902b02019-08-20 09:16:51 -04002367 if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
2368 if dep not in self.sq_buildable:
2369 self.sq_buildable.add(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002370
Brad Bishop96ff1982019-08-19 13:50:42 -04002371 next = set([task])
2372 while next:
2373 new = set()
Brad Bishop08902b02019-08-20 09:16:51 -04002374 for t in sorted(next):
Brad Bishop96ff1982019-08-19 13:50:42 -04002375 self.tasks_scenequeue_done.add(t)
2376 # Look down the dependency chain for non-setscene things which this task depends on
2377 # and mark as 'done'
2378 for dep in self.rqdata.runtaskentries[t].depends:
2379 if dep in self.rqdata.runq_setscene_tids or dep in self.tasks_scenequeue_done:
2380 continue
2381 if self.rqdata.runtaskentries[dep].revdeps.issubset(self.tasks_scenequeue_done):
2382 new.add(dep)
Brad Bishop96ff1982019-08-19 13:50:42 -04002383 next = new
2384
Brad Bishopc68388fc2019-08-26 01:33:31 -04002385 self.holdoff_need_update = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002386
2387 def sq_task_completeoutright(self, task):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002388 """
2389 Mark a task as completed
2390 Look at the reverse dependencies and mark any task with
2391 completed dependencies as buildable
2392 """
2393
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002394 logger.debug(1, 'Found task %s which could be accelerated', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002395 self.scenequeue_covered.add(task)
2396 self.scenequeue_updatecounters(task)
2397
Brad Bishop96ff1982019-08-19 13:50:42 -04002398 def sq_check_taskfail(self, task):
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002399 if self.rqdata.setscenewhitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002400 realtask = task.split('_setscene')[0]
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002401 (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
2402 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002403 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2404 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2405 self.rq.state = runQueueCleanUp
2406
Brad Bishop96ff1982019-08-19 13:50:42 -04002407 def sq_task_complete(self, task):
2408 self.sq_stats.taskCompleted()
2409 bb.event.fire(sceneQueueTaskCompleted(task, self.sq_stats, self.rq), self.cfgData)
2410 self.sq_task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002411
Brad Bishop96ff1982019-08-19 13:50:42 -04002412 def sq_task_fail(self, task, result):
2413 self.sq_stats.taskFailed()
2414 bb.event.fire(sceneQueueTaskFailed(task, self.sq_stats, result, self), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002415 self.scenequeue_notcovered.add(task)
2416 self.scenequeue_updatecounters(task, True)
Brad Bishop96ff1982019-08-19 13:50:42 -04002417 self.sq_check_taskfail(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002418
Brad Bishop96ff1982019-08-19 13:50:42 -04002419 def sq_task_failoutright(self, task):
2420 self.sq_running.add(task)
2421 self.sq_buildable.add(task)
2422 self.sq_stats.taskSkipped()
2423 self.sq_stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002424 self.scenequeue_notcovered.add(task)
2425 self.scenequeue_updatecounters(task, True)
2426
Brad Bishop96ff1982019-08-19 13:50:42 -04002427 def sq_task_skip(self, task):
2428 self.sq_running.add(task)
2429 self.sq_buildable.add(task)
2430 self.sq_task_completeoutright(task)
2431 self.sq_stats.taskSkipped()
2432 self.sq_stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002433
Brad Bishop96ff1982019-08-19 13:50:42 -04002434 def sq_build_taskdepdata(self, task):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002435 def getsetscenedeps(tid):
2436 deps = set()
2437 (mc, fn, taskname, _) = split_tid_mcfn(tid)
2438 realtid = tid + "_setscene"
2439 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
2440 for (depname, idependtask) in idepends:
2441 if depname not in self.rqdata.taskData[mc].build_targets:
2442 continue
2443
2444 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2445 if depfn is None:
2446 continue
2447 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2448 deps.add(deptid)
2449 return deps
2450
2451 taskdepdata = {}
2452 next = getsetscenedeps(task)
2453 next.add(task)
2454 while next:
2455 additional = []
2456 for revdep in next:
2457 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2458 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2459 deps = getsetscenedeps(revdep)
2460 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2461 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002462 unihash = self.rqdata.runtaskentries[revdep].unihash
2463 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002464 for revdep2 in deps:
2465 if revdep2 not in taskdepdata:
2466 additional.append(revdep2)
2467 next = additional
2468
2469 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2470 return taskdepdata
2471
Brad Bishop96ff1982019-08-19 13:50:42 -04002472 def check_setscenewhitelist(self, tid):
2473 # Check task that is going to run against the whitelist
2474 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2475 # Ignore covered tasks
2476 if tid in self.tasks_covered:
2477 return False
2478 # Ignore stamped tasks
2479 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
2480 return False
2481 # Ignore noexec tasks
2482 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2483 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2484 return False
2485
2486 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2487 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2488 if tid in self.rqdata.runq_setscene_tids:
2489 msg = 'Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)
2490 else:
2491 msg = 'Task %s.%s attempted to execute unexpectedly' % (pn, taskname)
2492 logger.error(msg + '\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered))
2493 return True
2494 return False
2495
2496class SQData(object):
2497 def __init__(self):
2498 # SceneQueue dependencies
2499 self.sq_deps = {}
2500 # SceneQueue reverse dependencies
2501 self.sq_revdeps = {}
Brad Bishop96ff1982019-08-19 13:50:42 -04002502 # Injected inter-setscene task dependencies
2503 self.sq_harddeps = {}
2504 # Cache of stamp files so duplicates can't run in parallel
2505 self.stamps = {}
2506 # Setscene tasks directly depended upon by the build
2507 self.unskippable = set()
2508 # List of setscene tasks which aren't present
2509 self.outrightfail = set()
2510 # A list of normal tasks a setscene task covers
2511 self.sq_covered_tasks = {}
2512
2513def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
2514
2515 sq_revdeps = {}
2516 sq_revdeps_squash = {}
2517 sq_collated_deps = {}
2518
2519 # We need to construct a dependency graph for the setscene functions. Intermediate
2520 # dependencies between the setscene tasks only complicate the code. This code
2521 # therefore aims to collapse the huge runqueue dependency tree into a smaller one
2522 # only containing the setscene functions.
2523
2524 rqdata.init_progress_reporter.next_stage()
2525
2526 # First process the chains up to the first setscene task.
2527 endpoints = {}
2528 for tid in rqdata.runtaskentries:
2529 sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps)
2530 sq_revdeps_squash[tid] = set()
2531 if (len(sq_revdeps[tid]) == 0) and tid not in rqdata.runq_setscene_tids:
2532 #bb.warn("Added endpoint %s" % (tid))
2533 endpoints[tid] = set()
2534
2535 rqdata.init_progress_reporter.next_stage()
2536
2537 # Secondly process the chains between setscene tasks.
2538 for tid in rqdata.runq_setscene_tids:
2539 sq_collated_deps[tid] = set()
2540 #bb.warn("Added endpoint 2 %s" % (tid))
2541 for dep in rqdata.runtaskentries[tid].depends:
2542 if tid in sq_revdeps[dep]:
2543 sq_revdeps[dep].remove(tid)
2544 if dep not in endpoints:
2545 endpoints[dep] = set()
2546 #bb.warn(" Added endpoint 3 %s" % (dep))
2547 endpoints[dep].add(tid)
2548
2549 rqdata.init_progress_reporter.next_stage()
2550
2551 def process_endpoints(endpoints):
2552 newendpoints = {}
2553 for point, task in endpoints.items():
2554 tasks = set()
2555 if task:
2556 tasks |= task
2557 if sq_revdeps_squash[point]:
2558 tasks |= sq_revdeps_squash[point]
2559 if point not in rqdata.runq_setscene_tids:
2560 for t in tasks:
2561 sq_collated_deps[t].add(point)
2562 sq_revdeps_squash[point] = set()
2563 if point in rqdata.runq_setscene_tids:
2564 sq_revdeps_squash[point] = tasks
2565 tasks = set()
2566 continue
2567 for dep in rqdata.runtaskentries[point].depends:
2568 if point in sq_revdeps[dep]:
2569 sq_revdeps[dep].remove(point)
2570 if tasks:
2571 sq_revdeps_squash[dep] |= tasks
2572 if len(sq_revdeps[dep]) == 0 and dep not in rqdata.runq_setscene_tids:
2573 newendpoints[dep] = task
2574 if len(newendpoints) != 0:
2575 process_endpoints(newendpoints)
2576
2577 process_endpoints(endpoints)
2578
2579 rqdata.init_progress_reporter.next_stage()
2580
Brad Bishop08902b02019-08-20 09:16:51 -04002581 # Build a list of tasks which are "unskippable"
2582 # These are direct endpoints referenced by the build upto and including setscene tasks
Brad Bishop96ff1982019-08-19 13:50:42 -04002583 # Take the build endpoints (no revdeps) and find the sstate tasks they depend upon
2584 new = True
2585 for tid in rqdata.runtaskentries:
2586 if len(rqdata.runtaskentries[tid].revdeps) == 0:
2587 sqdata.unskippable.add(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04002588 sqdata.unskippable |= sqrq.cantskip
Brad Bishop96ff1982019-08-19 13:50:42 -04002589 while new:
2590 new = False
Brad Bishop08902b02019-08-20 09:16:51 -04002591 orig = sqdata.unskippable.copy()
2592 for tid in sorted(orig, reverse=True):
Brad Bishop96ff1982019-08-19 13:50:42 -04002593 if tid in rqdata.runq_setscene_tids:
2594 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04002595 if len(rqdata.runtaskentries[tid].depends) == 0:
2596 # These are tasks which have no setscene tasks in their chain, need to mark as directly buildable
Brad Bishop96ff1982019-08-19 13:50:42 -04002597 sqrq.setbuildable(tid)
Brad Bishop96ff1982019-08-19 13:50:42 -04002598 sqdata.unskippable |= rqdata.runtaskentries[tid].depends
Brad Bishop08902b02019-08-20 09:16:51 -04002599 if sqdata.unskippable != orig:
2600 new = True
2601
2602 sqrq.tasks_scenequeue_done |= sqdata.unskippable.difference(rqdata.runq_setscene_tids)
Brad Bishop96ff1982019-08-19 13:50:42 -04002603
2604 rqdata.init_progress_reporter.next_stage(len(rqdata.runtaskentries))
2605
2606 # Sanity check all dependencies could be changed to setscene task references
2607 for taskcounter, tid in enumerate(rqdata.runtaskentries):
2608 if tid in rqdata.runq_setscene_tids:
2609 pass
2610 elif len(sq_revdeps_squash[tid]) != 0:
2611 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
2612 else:
2613 del sq_revdeps_squash[tid]
2614 rqdata.init_progress_reporter.update(taskcounter)
2615
2616 rqdata.init_progress_reporter.next_stage()
2617
2618 # Resolve setscene inter-task dependencies
2619 # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
2620 # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
2621 for tid in rqdata.runq_setscene_tids:
2622 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2623 realtid = tid + "_setscene"
2624 idepends = rqdata.taskData[mc].taskentries[realtid].idepends
2625 sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", rqdata.dataCaches[mc], taskfn, noextra=True)
2626 for (depname, idependtask) in idepends:
2627
2628 if depname not in rqdata.taskData[mc].build_targets:
2629 continue
2630
2631 depfn = rqdata.taskData[mc].build_targets[depname][0]
2632 if depfn is None:
2633 continue
2634 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2635 if deptid not in rqdata.runtaskentries:
2636 bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
2637
2638 if not deptid in sqdata.sq_harddeps:
2639 sqdata.sq_harddeps[deptid] = set()
2640 sqdata.sq_harddeps[deptid].add(tid)
2641
2642 sq_revdeps_squash[tid].add(deptid)
2643 # Have to zero this to avoid circular dependencies
2644 sq_revdeps_squash[deptid] = set()
2645
2646 rqdata.init_progress_reporter.next_stage()
2647
2648 for task in sqdata.sq_harddeps:
2649 for dep in sqdata.sq_harddeps[task]:
2650 sq_revdeps_squash[dep].add(task)
2651
2652 rqdata.init_progress_reporter.next_stage()
2653
2654 #for tid in sq_revdeps_squash:
2655 # data = ""
2656 # for dep in sq_revdeps_squash[tid]:
2657 # data = data + "\n %s" % dep
2658 # bb.warn("Task %s_setscene: is %s " % (tid, data))
2659
2660 sqdata.sq_revdeps = sq_revdeps_squash
Brad Bishop96ff1982019-08-19 13:50:42 -04002661 sqdata.sq_covered_tasks = sq_collated_deps
2662
2663 # Build reverse version of revdeps to populate deps structure
2664 for tid in sqdata.sq_revdeps:
2665 sqdata.sq_deps[tid] = set()
2666 for tid in sqdata.sq_revdeps:
2667 for dep in sqdata.sq_revdeps[tid]:
2668 sqdata.sq_deps[dep].add(tid)
2669
2670 rqdata.init_progress_reporter.next_stage()
2671
2672 multiconfigs = set()
2673 for tid in sqdata.sq_revdeps:
2674 multiconfigs.add(mc_from_tid(tid))
2675 if len(sqdata.sq_revdeps[tid]) == 0:
2676 sqrq.sq_buildable.add(tid)
2677
2678 rqdata.init_progress_reporter.finish()
2679
2680 if rq.hashvalidate:
2681 noexec = []
2682 stamppresent = []
2683 tocheck = set()
2684
Brad Bishop08902b02019-08-20 09:16:51 -04002685 for tid in sorted(sqdata.sq_revdeps):
Brad Bishop96ff1982019-08-19 13:50:42 -04002686 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2687
2688 taskdep = rqdata.dataCaches[mc].task_deps[taskfn]
2689
2690 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2691 noexec.append(tid)
2692 sqrq.sq_task_skip(tid)
2693 bb.build.make_stamp(taskname + "_setscene", rqdata.dataCaches[mc], taskfn)
2694 continue
2695
2696 if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache):
2697 logger.debug(2, 'Setscene stamp current for task %s', tid)
2698 stamppresent.append(tid)
2699 sqrq.sq_task_skip(tid)
2700 continue
2701
2702 if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache):
2703 logger.debug(2, 'Normal stamp current for task %s', tid)
2704 stamppresent.append(tid)
2705 sqrq.sq_task_skip(tid)
2706 continue
2707
2708 tocheck.add(tid)
2709
2710 valid = rq.validate_hashes(tocheck, cooker.data, len(stamppresent), False)
2711
2712 valid_new = stamppresent
2713 for v in valid:
2714 valid_new.append(v)
2715
2716 hashes = {}
2717 for mc in sorted(multiconfigs):
Brad Bishop08902b02019-08-20 09:16:51 -04002718 for tid in sorted(sqdata.sq_revdeps):
Brad Bishop96ff1982019-08-19 13:50:42 -04002719 if mc_from_tid(tid) != mc:
2720 continue
2721 if tid not in valid_new and tid not in noexec and tid not in sqrq.scenequeue_notcovered:
2722 sqdata.outrightfail.add(tid)
2723
2724 h = pending_hash_index(tid, rqdata)
2725 if h not in hashes:
2726 hashes[h] = tid
2727 else:
2728 sqrq.sq_deferred[tid] = hashes[h]
2729 bb.warn("Deferring %s after %s" % (tid, hashes[h]))
2730
2731
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002732class TaskFailure(Exception):
2733 """
2734 Exception raised when a task in a runqueue fails
2735 """
2736 def __init__(self, x):
2737 self.args = x
2738
2739
2740class runQueueExitWait(bb.event.Event):
2741 """
2742 Event when waiting for task processes to exit
2743 """
2744
2745 def __init__(self, remain):
2746 self.remain = remain
2747 self.message = "Waiting for %s active tasks to finish" % remain
2748 bb.event.Event.__init__(self)
2749
2750class runQueueEvent(bb.event.Event):
2751 """
2752 Base runQueue event class
2753 """
2754 def __init__(self, task, stats, rq):
2755 self.taskid = task
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002756 self.taskstring = task
2757 self.taskname = taskname_from_tid(task)
2758 self.taskfile = fn_from_tid(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002759 self.taskhash = rq.rqdata.get_task_hash(task)
2760 self.stats = stats.copy()
2761 bb.event.Event.__init__(self)
2762
2763class sceneQueueEvent(runQueueEvent):
2764 """
2765 Base sceneQueue event class
2766 """
2767 def __init__(self, task, stats, rq, noexec=False):
2768 runQueueEvent.__init__(self, task, stats, rq)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002769 self.taskstring = task + "_setscene"
2770 self.taskname = taskname_from_tid(task) + "_setscene"
2771 self.taskfile = fn_from_tid(task)
2772 self.taskhash = rq.rqdata.get_task_hash(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002773
2774class runQueueTaskStarted(runQueueEvent):
2775 """
2776 Event notifying a task was started
2777 """
2778 def __init__(self, task, stats, rq, noexec=False):
2779 runQueueEvent.__init__(self, task, stats, rq)
2780 self.noexec = noexec
2781
2782class sceneQueueTaskStarted(sceneQueueEvent):
2783 """
2784 Event notifying a setscene task was started
2785 """
2786 def __init__(self, task, stats, rq, noexec=False):
2787 sceneQueueEvent.__init__(self, task, stats, rq)
2788 self.noexec = noexec
2789
2790class runQueueTaskFailed(runQueueEvent):
2791 """
2792 Event notifying a task failed
2793 """
2794 def __init__(self, task, stats, exitcode, rq):
2795 runQueueEvent.__init__(self, task, stats, rq)
2796 self.exitcode = exitcode
2797
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002798 def __str__(self):
2799 return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
2800
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002801class sceneQueueTaskFailed(sceneQueueEvent):
2802 """
2803 Event notifying a setscene task failed
2804 """
2805 def __init__(self, task, stats, exitcode, rq):
2806 sceneQueueEvent.__init__(self, task, stats, rq)
2807 self.exitcode = exitcode
2808
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002809 def __str__(self):
2810 return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
2811
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002812class sceneQueueComplete(sceneQueueEvent):
2813 """
2814 Event when all the sceneQueue tasks are complete
2815 """
2816 def __init__(self, stats, rq):
2817 self.stats = stats.copy()
2818 bb.event.Event.__init__(self)
2819
2820class runQueueTaskCompleted(runQueueEvent):
2821 """
2822 Event notifying a task completed
2823 """
2824
2825class sceneQueueTaskCompleted(sceneQueueEvent):
2826 """
2827 Event notifying a setscene task completed
2828 """
2829
2830class runQueueTaskSkipped(runQueueEvent):
2831 """
2832 Event notifying a task was skipped
2833 """
2834 def __init__(self, task, stats, rq, reason):
2835 runQueueEvent.__init__(self, task, stats, rq)
2836 self.reason = reason
2837
Brad Bishop08902b02019-08-20 09:16:51 -04002838class taskUniHashUpdate(bb.event.Event):
2839 """
2840 Base runQueue event class
2841 """
2842 def __init__(self, task, unihash):
2843 self.taskid = task
2844 self.unihash = unihash
2845 bb.event.Event.__init__(self)
2846
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002847class runQueuePipe():
2848 """
2849 Abstraction for a pipe between a worker thread and the server
2850 """
2851 def __init__(self, pipein, pipeout, d, rq, rqexec):
2852 self.input = pipein
2853 if pipeout:
2854 pipeout.close()
2855 bb.utils.nonblockingfd(self.input)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002856 self.queue = b""
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002857 self.d = d
2858 self.rq = rq
2859 self.rqexec = rqexec
2860
2861 def setrunqueueexec(self, rqexec):
2862 self.rqexec = rqexec
2863
2864 def read(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002865 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
2866 for worker in workers.values():
2867 worker.process.poll()
2868 if worker.process.returncode is not None and not self.rq.teardown:
2869 bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
2870 self.rq.finish_runqueue(True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002871
2872 start = len(self.queue)
2873 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002874 self.queue = self.queue + (self.input.read(102400) or b"")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002875 except (OSError, IOError) as e:
2876 if e.errno != errno.EAGAIN:
2877 raise
2878 end = len(self.queue)
2879 found = True
2880 while found and len(self.queue):
2881 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002882 index = self.queue.find(b"</event>")
2883 while index != -1 and self.queue.startswith(b"<event>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002884 try:
2885 event = pickle.loads(self.queue[7:index])
2886 except ValueError as e:
2887 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
2888 bb.event.fire_from_worker(event, self.d)
Brad Bishop08902b02019-08-20 09:16:51 -04002889 if isinstance(event, taskUniHashUpdate):
2890 self.rqexec.updated_taskhash_queue.append((event.taskid, event.unihash))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002891 found = True
2892 self.queue = self.queue[index+8:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002893 index = self.queue.find(b"</event>")
2894 index = self.queue.find(b"</exitcode>")
2895 while index != -1 and self.queue.startswith(b"<exitcode>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002896 try:
2897 task, status = pickle.loads(self.queue[10:index])
2898 except ValueError as e:
2899 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
2900 self.rqexec.runqueue_process_waitpid(task, status)
2901 found = True
2902 self.queue = self.queue[index+11:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002903 index = self.queue.find(b"</exitcode>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002904 return (end > start)
2905
2906 def close(self):
2907 while self.read():
2908 continue
2909 if len(self.queue) > 0:
2910 print("Warning, worker left partial message: %s" % self.queue)
2911 self.input.close()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002912
2913def get_setscene_enforce_whitelist(d):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002914 if d.getVar('BB_SETSCENE_ENFORCE') != '1':
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002915 return None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002916 whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002917 outlist = []
2918 for item in whitelist[:]:
2919 if item.startswith('%:'):
2920 for target in sys.argv[1:]:
2921 if not target.startswith('-'):
2922 outlist.append(target.split(':')[0] + ':' + item.split(':')[1])
2923 else:
2924 outlist.append(item)
2925 return outlist
2926
2927def check_setscene_enforce_whitelist(pn, taskname, whitelist):
2928 import fnmatch
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002929 if whitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002930 item = '%s:%s' % (pn, taskname)
2931 for whitelist_item in whitelist:
2932 if fnmatch.fnmatch(item, whitelist_item):
2933 return True
2934 return False
2935 return True