blob: 7fa074f6792d38eca4fc0568d41d2b1a6c9aba2c [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001"""
2BitBake 'RunQueue' implementation
3
4Handles preparation and execution of a queue of tasks
5"""
6
7# Copyright (C) 2006-2007 Richard Purdie
8#
Brad Bishopc342db32019-05-15 21:57:59 -04009# SPDX-License-Identifier: GPL-2.0-only
Patrick Williamsc124f4f2015-09-15 14:41:29 -050010#
Patrick Williamsc124f4f2015-09-15 14:41:29 -050011
12import copy
13import os
14import sys
15import signal
16import stat
17import fcntl
18import errno
19import logging
20import re
21import bb
22from bb import msg, data, event
23from bb import monitordisk
24import subprocess
Patrick Williamsc0f7c042017-02-23 20:41:17 -060025import pickle
Brad Bishop6e60e8b2018-02-01 10:27:11 -050026from multiprocessing import Process
Brad Bishop19323692019-04-05 15:28:33 -040027import shlex
Brad Bishop96ff1982019-08-19 13:50:42 -040028import pprint
Patrick Williamsc124f4f2015-09-15 14:41:29 -050029
30bblogger = logging.getLogger("BitBake")
31logger = logging.getLogger("BitBake.RunQueue")
32
Brad Bishop19323692019-04-05 15:28:33 -040033__find_sha256__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{64}(?![a-z0-9])' )
Patrick Williamsc124f4f2015-09-15 14:41:29 -050034
Patrick Williamsc0f7c042017-02-23 20:41:17 -060035def fn_from_tid(tid):
36 return tid.rsplit(":", 1)[0]
37
38def taskname_from_tid(tid):
39 return tid.rsplit(":", 1)[1]
40
Andrew Geissler99467da2019-02-25 18:54:23 -060041def mc_from_tid(tid):
Brad Bishop15ae2502019-06-18 21:44:24 -040042 if tid.startswith('mc:'):
Andrew Geissler99467da2019-02-25 18:54:23 -060043 return tid.split(':')[1]
44 return ""
45
Patrick Williamsc0f7c042017-02-23 20:41:17 -060046def split_tid(tid):
47 (mc, fn, taskname, _) = split_tid_mcfn(tid)
48 return (mc, fn, taskname)
49
50def split_tid_mcfn(tid):
Brad Bishop15ae2502019-06-18 21:44:24 -040051 if tid.startswith('mc:'):
Patrick Williamsc0f7c042017-02-23 20:41:17 -060052 elems = tid.split(':')
53 mc = elems[1]
54 fn = ":".join(elems[2:-1])
55 taskname = elems[-1]
Brad Bishop15ae2502019-06-18 21:44:24 -040056 mcfn = "mc:" + mc + ":" + fn
Patrick Williamsc0f7c042017-02-23 20:41:17 -060057 else:
58 tid = tid.rsplit(":", 1)
59 mc = ""
60 fn = tid[0]
61 taskname = tid[1]
62 mcfn = fn
63
64 return (mc, fn, taskname, mcfn)
65
66def build_tid(mc, fn, taskname):
67 if mc:
Brad Bishop15ae2502019-06-18 21:44:24 -040068 return "mc:" + mc + ":" + fn + ":" + taskname
Patrick Williamsc0f7c042017-02-23 20:41:17 -060069 return fn + ":" + taskname
70
Brad Bishop96ff1982019-08-19 13:50:42 -040071# Index used to pair up potentially matching multiconfig tasks
72# We match on PN, taskname and hash being equal
73def pending_hash_index(tid, rqdata):
74 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
75 pn = rqdata.dataCaches[mc].pkg_fn[taskfn]
76 h = rqdata.runtaskentries[tid].hash
77 return pn + ":" + "taskname" + h
78
Patrick Williamsc124f4f2015-09-15 14:41:29 -050079class RunQueueStats:
80 """
81 Holds statistics on the tasks handled by the associated runQueue
82 """
83 def __init__(self, total):
84 self.completed = 0
85 self.skipped = 0
86 self.failed = 0
87 self.active = 0
88 self.total = total
89
90 def copy(self):
91 obj = self.__class__(self.total)
92 obj.__dict__.update(self.__dict__)
93 return obj
94
95 def taskFailed(self):
96 self.active = self.active - 1
97 self.failed = self.failed + 1
98
Brad Bishop1a4b7ee2018-12-16 17:11:34 -080099 def taskCompleted(self):
100 self.active = self.active - 1
101 self.completed = self.completed + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500102
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800103 def taskSkipped(self):
104 self.active = self.active + 1
105 self.skipped = self.skipped + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500106
107 def taskActive(self):
108 self.active = self.active + 1
109
110# These values indicate the next step due to be run in the
111# runQueue state machine
112runQueuePrepare = 2
113runQueueSceneInit = 3
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500114runQueueRunning = 6
115runQueueFailed = 7
116runQueueCleanUp = 8
117runQueueComplete = 9
118
119class RunQueueScheduler(object):
120 """
121 Control the order tasks are scheduled in.
122 """
123 name = "basic"
124
125 def __init__(self, runqueue, rqdata):
126 """
127 The default scheduler just returns the first buildable task (the
128 priority map is sorted by task number)
129 """
130 self.rq = runqueue
131 self.rqdata = rqdata
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600132 self.numTasks = len(self.rqdata.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500133
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600134 self.prio_map = [self.rqdata.runtaskentries.keys()]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500135
Brad Bishop08902b02019-08-20 09:16:51 -0400136 self.buildable = set()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800137 self.skip_maxthread = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500138 self.stamps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600139 for tid in self.rqdata.runtaskentries:
140 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
141 self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
142 if tid in self.rq.runq_buildable:
143 self.buildable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500144
145 self.rev_prio_map = None
146
147 def next_buildable_task(self):
148 """
149 Return the id of the first task we find that is buildable
150 """
Brad Bishop08902b02019-08-20 09:16:51 -0400151 buildable = set(self.buildable)
152 buildable.difference_update(self.rq.runq_running)
153 buildable.difference_update(self.rq.holdoff_tasks)
154 buildable.intersection_update(self.rq.tasks_covered | self.rq.tasks_notcovered)
Brad Bishop96ff1982019-08-19 13:50:42 -0400155 if not buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500156 return None
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800157
158 # Filter out tasks that have a max number of threads that have been exceeded
159 skip_buildable = {}
160 for running in self.rq.runq_running.difference(self.rq.runq_complete):
161 rtaskname = taskname_from_tid(running)
162 if rtaskname not in self.skip_maxthread:
163 self.skip_maxthread[rtaskname] = self.rq.cfgData.getVarFlag(rtaskname, "number_threads")
164 if not self.skip_maxthread[rtaskname]:
165 continue
166 if rtaskname in skip_buildable:
167 skip_buildable[rtaskname] += 1
168 else:
169 skip_buildable[rtaskname] = 1
170
Brad Bishop96ff1982019-08-19 13:50:42 -0400171 if len(buildable) == 1:
Brad Bishop08902b02019-08-20 09:16:51 -0400172 tid = buildable.pop()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800173 taskname = taskname_from_tid(tid)
174 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
175 return None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600176 stamp = self.stamps[tid]
177 if stamp not in self.rq.build_stamps.values():
178 return tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500179
180 if not self.rev_prio_map:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600181 self.rev_prio_map = {}
182 for tid in self.rqdata.runtaskentries:
183 self.rev_prio_map[tid] = self.prio_map.index(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500184
185 best = None
186 bestprio = None
Brad Bishop96ff1982019-08-19 13:50:42 -0400187 for tid in buildable:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800188 taskname = taskname_from_tid(tid)
189 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
190 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600191 prio = self.rev_prio_map[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500192 if bestprio is None or bestprio > prio:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600193 stamp = self.stamps[tid]
194 if stamp in self.rq.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500195 continue
196 bestprio = prio
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600197 best = tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500198
199 return best
200
201 def next(self):
202 """
203 Return the id of the task we should build next
204 """
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800205 if self.rq.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500206 return self.next_buildable_task()
207
Brad Bishop316dfdd2018-06-25 12:45:53 -0400208 def newbuildable(self, task):
Brad Bishop08902b02019-08-20 09:16:51 -0400209 self.buildable.add(task)
210
211 def removebuildable(self, task):
212 self.buildable.remove(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500213
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500214 def describe_task(self, taskid):
215 result = 'ID %s' % taskid
216 if self.rev_prio_map:
217 result = result + (' pri %d' % self.rev_prio_map[taskid])
218 return result
219
220 def dump_prio(self, comment):
221 bb.debug(3, '%s (most important first):\n%s' %
222 (comment,
223 '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
224 index, taskid in enumerate(self.prio_map)])))
225
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500226class RunQueueSchedulerSpeed(RunQueueScheduler):
227 """
228 A scheduler optimised for speed. The priority map is sorted by task weight,
229 heavier weighted tasks (tasks needed by the most other tasks) are run first.
230 """
231 name = "speed"
232
233 def __init__(self, runqueue, rqdata):
234 """
235 The priority map is sorted by task weight.
236 """
237 RunQueueScheduler.__init__(self, runqueue, rqdata)
238
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600239 weights = {}
240 for tid in self.rqdata.runtaskentries:
241 weight = self.rqdata.runtaskentries[tid].weight
242 if not weight in weights:
243 weights[weight] = []
244 weights[weight].append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500245
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600246 self.prio_map = []
247 for weight in sorted(weights):
248 for w in weights[weight]:
249 self.prio_map.append(w)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500250
251 self.prio_map.reverse()
252
253class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
254 """
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500255 A scheduler optimised to complete .bb files as quickly as possible. The
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500256 priority map is sorted by task weight, but then reordered so once a given
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500257 .bb file starts to build, it's completed as quickly as possible by
258 running all tasks related to the same .bb file one after the after.
259 This works well where disk space is at a premium and classes like OE's
260 rm_work are in force.
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500261 """
262 name = "completion"
263
264 def __init__(self, runqueue, rqdata):
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500265 super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500266
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500267 # Extract list of tasks for each recipe, with tasks sorted
268 # ascending from "must run first" (typically do_fetch) to
269 # "runs last" (do_build). The speed scheduler prioritizes
270 # tasks that must run first before the ones that run later;
271 # this is what we depend on here.
272 task_lists = {}
273 for taskid in self.prio_map:
274 fn, taskname = taskid.rsplit(':', 1)
275 task_lists.setdefault(fn, []).append(taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500276
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500277 # Now unify the different task lists. The strategy is that
278 # common tasks get skipped and new ones get inserted after the
279 # preceeding common one(s) as they are found. Because task
280 # lists should differ only by their number of tasks, but not
281 # the ordering of the common tasks, this should result in a
282 # deterministic result that is a superset of the individual
283 # task ordering.
284 all_tasks = []
285 for recipe, new_tasks in task_lists.items():
286 index = 0
287 old_task = all_tasks[index] if index < len(all_tasks) else None
288 for new_task in new_tasks:
289 if old_task == new_task:
290 # Common task, skip it. This is the fast-path which
291 # avoids a full search.
292 index += 1
293 old_task = all_tasks[index] if index < len(all_tasks) else None
294 else:
295 try:
296 index = all_tasks.index(new_task)
297 # Already present, just not at the current
298 # place. We re-synchronized by changing the
299 # index so that it matches again. Now
300 # move on to the next existing task.
301 index += 1
302 old_task = all_tasks[index] if index < len(all_tasks) else None
303 except ValueError:
304 # Not present. Insert before old_task, which
305 # remains the same (but gets shifted back).
306 all_tasks.insert(index, new_task)
307 index += 1
308 bb.debug(3, 'merged task list: %s' % all_tasks)
309
310 # Now reverse the order so that tasks that finish the work on one
311 # recipe are considered more imporant (= come first). The ordering
312 # is now so that do_build is most important.
313 all_tasks.reverse()
314
315 # Group tasks of the same kind before tasks of less important
316 # kinds at the head of the queue (because earlier = lower
317 # priority number = runs earlier), while preserving the
318 # ordering by recipe. If recipe foo is more important than
319 # bar, then the goal is to work on foo's do_populate_sysroot
320 # before bar's do_populate_sysroot and on the more important
321 # tasks of foo before any of the less important tasks in any
322 # other recipe (if those other recipes are more important than
323 # foo).
324 #
325 # All of this only applies when tasks are runable. Explicit
326 # dependencies still override this ordering by priority.
327 #
328 # Here's an example why this priority re-ordering helps with
329 # minimizing disk usage. Consider a recipe foo with a higher
330 # priority than bar where foo DEPENDS on bar. Then the
331 # implicit rule (from base.bbclass) is that foo's do_configure
332 # depends on bar's do_populate_sysroot. This ensures that
333 # bar's do_populate_sysroot gets done first. Normally the
334 # tasks from foo would continue to run once that is done, and
335 # bar only gets completed and cleaned up later. By ordering
336 # bar's task that depend on bar's do_populate_sysroot before foo's
337 # do_configure, that problem gets avoided.
338 task_index = 0
339 self.dump_prio('original priorities')
340 for task in all_tasks:
341 for index in range(task_index, self.numTasks):
342 taskid = self.prio_map[index]
343 taskname = taskid.rsplit(':', 1)[1]
344 if taskname == task:
345 del self.prio_map[index]
346 self.prio_map.insert(task_index, taskid)
347 task_index += 1
348 self.dump_prio('completion priorities')
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500349
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600350class RunTaskEntry(object):
351 def __init__(self):
352 self.depends = set()
353 self.revdeps = set()
354 self.hash = None
Brad Bishop19323692019-04-05 15:28:33 -0400355 self.unihash = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600356 self.task = None
357 self.weight = 1
358
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500359class RunQueueData:
360 """
361 BitBake Run Queue implementation
362 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600363 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500364 self.cooker = cooker
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600365 self.dataCaches = dataCaches
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500366 self.taskData = taskData
367 self.targets = targets
368 self.rq = rq
369 self.warn_multi_bb = False
370
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500371 self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
372 self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600373 self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
374 self.setscenewhitelist_checked = False
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500375 self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600376 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500377
378 self.reset()
379
380 def reset(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600381 self.runtaskentries = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500382
383 def runq_depends_names(self, ids):
384 import re
385 ret = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600386 for id in ids:
387 nam = os.path.basename(id)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500388 nam = re.sub("_[^,]*,", ",", nam)
389 ret.extend([nam])
390 return ret
391
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600392 def get_task_hash(self, tid):
393 return self.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500394
Brad Bishop19323692019-04-05 15:28:33 -0400395 def get_task_unihash(self, tid):
396 return self.runtaskentries[tid].unihash
397
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600398 def get_user_idstring(self, tid, task_name_suffix = ""):
399 return tid + task_name_suffix
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500400
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500401 def get_short_user_idstring(self, task, task_name_suffix = ""):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500402 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
403 pn = self.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600404 taskname = taskname_from_tid(task) + task_name_suffix
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500405 return "%s:%s" % (pn, taskname)
406
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500407 def circular_depchains_handler(self, tasks):
408 """
409 Some tasks aren't buildable, likely due to circular dependency issues.
410 Identify the circular dependencies and print them in a user readable format.
411 """
412 from copy import deepcopy
413
414 valid_chains = []
415 explored_deps = {}
416 msgs = []
417
Andrew Geissler99467da2019-02-25 18:54:23 -0600418 class TooManyLoops(Exception):
419 pass
420
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500421 def chain_reorder(chain):
422 """
423 Reorder a dependency chain so the lowest task id is first
424 """
425 lowest = 0
426 new_chain = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600427 for entry in range(len(chain)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500428 if chain[entry] < chain[lowest]:
429 lowest = entry
430 new_chain.extend(chain[lowest:])
431 new_chain.extend(chain[:lowest])
432 return new_chain
433
434 def chain_compare_equal(chain1, chain2):
435 """
436 Compare two dependency chains and see if they're the same
437 """
438 if len(chain1) != len(chain2):
439 return False
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600440 for index in range(len(chain1)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500441 if chain1[index] != chain2[index]:
442 return False
443 return True
444
445 def chain_array_contains(chain, chain_array):
446 """
447 Return True if chain_array contains chain
448 """
449 for ch in chain_array:
450 if chain_compare_equal(ch, chain):
451 return True
452 return False
453
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600454 def find_chains(tid, prev_chain):
455 prev_chain.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500456 total_deps = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600457 total_deps.extend(self.runtaskentries[tid].revdeps)
458 for revdep in self.runtaskentries[tid].revdeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500459 if revdep in prev_chain:
460 idx = prev_chain.index(revdep)
461 # To prevent duplicates, reorder the chain to start with the lowest taskid
462 # and search through an array of those we've already printed
463 chain = prev_chain[idx:]
464 new_chain = chain_reorder(chain)
465 if not chain_array_contains(new_chain, valid_chains):
466 valid_chains.append(new_chain)
467 msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
468 for dep in new_chain:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600469 msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500470 msgs.append("\n")
471 if len(valid_chains) > 10:
472 msgs.append("Aborted dependency loops search after 10 matches.\n")
Andrew Geissler99467da2019-02-25 18:54:23 -0600473 raise TooManyLoops
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500474 continue
475 scan = False
476 if revdep not in explored_deps:
477 scan = True
478 elif revdep in explored_deps[revdep]:
479 scan = True
480 else:
481 for dep in prev_chain:
482 if dep in explored_deps[revdep]:
483 scan = True
484 if scan:
485 find_chains(revdep, copy.deepcopy(prev_chain))
486 for dep in explored_deps[revdep]:
487 if dep not in total_deps:
488 total_deps.append(dep)
489
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600490 explored_deps[tid] = total_deps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500491
Andrew Geissler99467da2019-02-25 18:54:23 -0600492 try:
493 for task in tasks:
494 find_chains(task, [])
495 except TooManyLoops:
496 pass
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500497
498 return msgs
499
500 def calculate_task_weights(self, endpoints):
501 """
502 Calculate a number representing the "weight" of each task. Heavier weighted tasks
503 have more dependencies and hence should be executed sooner for maximum speed.
504
505 This function also sanity checks the task list finding tasks that are not
506 possible to execute due to circular dependencies.
507 """
508
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600509 numTasks = len(self.runtaskentries)
510 weight = {}
511 deps_left = {}
512 task_done = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500513
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600514 for tid in self.runtaskentries:
515 task_done[tid] = False
516 weight[tid] = 1
517 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500518
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600519 for tid in endpoints:
520 weight[tid] = 10
521 task_done[tid] = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500522
523 while True:
524 next_points = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600525 for tid in endpoints:
526 for revdep in self.runtaskentries[tid].depends:
527 weight[revdep] = weight[revdep] + weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500528 deps_left[revdep] = deps_left[revdep] - 1
529 if deps_left[revdep] == 0:
530 next_points.append(revdep)
531 task_done[revdep] = True
532 endpoints = next_points
533 if len(next_points) == 0:
534 break
535
536 # Circular dependency sanity check
537 problem_tasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600538 for tid in self.runtaskentries:
539 if task_done[tid] is False or deps_left[tid] != 0:
540 problem_tasks.append(tid)
541 logger.debug(2, "Task %s is not buildable", tid)
542 logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
543 self.runtaskentries[tid].weight = weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500544
545 if problem_tasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600546 message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500547 message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
548 message = message + "Identifying dependency loops (this may take a short while)...\n"
549 logger.error(message)
550
551 msgs = self.circular_depchains_handler(problem_tasks)
552
553 message = "\n"
554 for msg in msgs:
555 message = message + msg
556 bb.msg.fatal("RunQueue", message)
557
558 return weight
559
560 def prepare(self):
561 """
562 Turn a set of taskData into a RunQueue and compute data needed
563 to optimise the execution order.
564 """
565
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600566 runq_build = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500567 recursivetasks = {}
568 recursiveitasks = {}
569 recursivetasksselfref = set()
570
571 taskData = self.taskData
572
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600573 found = False
574 for mc in self.taskData:
575 if len(taskData[mc].taskentries) > 0:
576 found = True
577 break
578 if not found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500579 # Nothing to do
580 return 0
581
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600582 self.init_progress_reporter.start()
583 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500584
585 # Step A - Work out a list of tasks to run
586 #
587 # Taskdata gives us a list of possible providers for every build and run
588 # target ordered by priority. It also gives information on each of those
589 # providers.
590 #
591 # To create the actual list of tasks to execute we fix the list of
592 # providers and then resolve the dependencies into task IDs. This
593 # process is repeated for each type of dependency (tdepends, deptask,
594 # rdeptast, recrdeptask, idepends).
595
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600596 def add_build_dependencies(depids, tasknames, depends, mc):
597 for depname in depids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500598 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600599 if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500600 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600601 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500602 if depdata is None:
603 continue
604 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600605 t = depdata + ":" + taskname
606 if t in taskData[mc].taskentries:
607 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500608
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600609 def add_runtime_dependencies(depids, tasknames, depends, mc):
610 for depname in depids:
611 if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500612 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600613 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500614 if depdata is None:
615 continue
616 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600617 t = depdata + ":" + taskname
618 if t in taskData[mc].taskentries:
619 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500620
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800621 def add_mc_dependencies(mc, tid):
622 mcdeps = taskData[mc].get_mcdepends()
623 for dep in mcdeps:
624 mcdependency = dep.split(':')
625 pn = mcdependency[3]
626 frommc = mcdependency[1]
627 mcdep = mcdependency[2]
628 deptask = mcdependency[4]
629 if mc == frommc:
630 fn = taskData[mcdep].build_targets[pn][0]
631 newdep = '%s:%s' % (fn,deptask)
632 taskData[mc].taskentries[tid].tdepends.append(newdep)
633
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600634 for mc in taskData:
635 for tid in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500636
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600637 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
638 #runtid = build_tid(mc, fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500639
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600640 #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
641
642 depends = set()
643 task_deps = self.dataCaches[mc].task_deps[taskfn]
644
645 self.runtaskentries[tid] = RunTaskEntry()
646
647 if fn in taskData[mc].failed_fns:
648 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500649
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800650 # We add multiconfig dependencies before processing internal task deps (tdepends)
651 if 'mcdepends' in task_deps and taskname in task_deps['mcdepends']:
652 add_mc_dependencies(mc, tid)
653
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500654 # Resolve task internal dependencies
655 #
656 # e.g. addtask before X after Y
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600657 for t in taskData[mc].taskentries[tid].tdepends:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800658 (depmc, depfn, deptaskname, _) = split_tid_mcfn(t)
659 depends.add(build_tid(depmc, depfn, deptaskname))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500660
661 # Resolve 'deptask' dependencies
662 #
663 # e.g. do_sometask[deptask] = "do_someothertask"
664 # (makes sure sometask runs after someothertask of all DEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600665 if 'deptask' in task_deps and taskname in task_deps['deptask']:
666 tasknames = task_deps['deptask'][taskname].split()
667 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500668
669 # Resolve 'rdeptask' dependencies
670 #
671 # e.g. do_sometask[rdeptask] = "do_someothertask"
672 # (makes sure sometask runs after someothertask of all RDEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600673 if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
674 tasknames = task_deps['rdeptask'][taskname].split()
675 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500676
677 # Resolve inter-task dependencies
678 #
679 # e.g. do_sometask[depends] = "targetname:do_someothertask"
680 # (makes sure sometask runs after targetname's someothertask)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600681 idepends = taskData[mc].taskentries[tid].idepends
682 for (depname, idependtask) in idepends:
683 if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500684 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600685 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500686 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600687 t = depdata + ":" + idependtask
688 depends.add(t)
689 if t not in taskData[mc].taskentries:
690 bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
691 irdepends = taskData[mc].taskentries[tid].irdepends
692 for (depname, idependtask) in irdepends:
693 if depname in taskData[mc].run_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500694 # Won't be in run_targets if ASSUME_PROVIDED
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500695 if not taskData[mc].run_targets[depname]:
696 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600697 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500698 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600699 t = depdata + ":" + idependtask
700 depends.add(t)
701 if t not in taskData[mc].taskentries:
702 bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500703
704 # Resolve recursive 'recrdeptask' dependencies (Part A)
705 #
706 # e.g. do_sometask[recrdeptask] = "do_someothertask"
707 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
708 # We cover the recursive part of the dependencies below
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600709 if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
710 tasknames = task_deps['recrdeptask'][taskname].split()
711 recursivetasks[tid] = tasknames
712 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
713 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
714 if taskname in tasknames:
715 recursivetasksselfref.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500716
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600717 if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
718 recursiveitasks[tid] = []
719 for t in task_deps['recideptask'][taskname].split():
720 newdep = build_tid(mc, fn, t)
721 recursiveitasks[tid].append(newdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500722
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600723 self.runtaskentries[tid].depends = depends
Brad Bishop316dfdd2018-06-25 12:45:53 -0400724 # Remove all self references
725 self.runtaskentries[tid].depends.discard(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500726
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600727 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500728
Brad Bishop316dfdd2018-06-25 12:45:53 -0400729 self.init_progress_reporter.next_stage()
730
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500731 # Resolve recursive 'recrdeptask' dependencies (Part B)
732 #
733 # e.g. do_sometask[recrdeptask] = "do_someothertask"
734 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600735 # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600736
Brad Bishop316dfdd2018-06-25 12:45:53 -0400737 # Generating/interating recursive lists of dependencies is painful and potentially slow
738 # Precompute recursive task dependencies here by:
739 # a) create a temp list of reverse dependencies (revdeps)
740 # b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0)
741 # c) combine the total list of dependencies in cumulativedeps
742 # d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500743
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500744
Brad Bishop316dfdd2018-06-25 12:45:53 -0400745 revdeps = {}
746 deps = {}
747 cumulativedeps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600748 for tid in self.runtaskentries:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400749 deps[tid] = set(self.runtaskentries[tid].depends)
750 revdeps[tid] = set()
751 cumulativedeps[tid] = set()
752 # Generate a temp list of reverse dependencies
753 for tid in self.runtaskentries:
754 for dep in self.runtaskentries[tid].depends:
755 revdeps[dep].add(tid)
756 # Find the dependency chain endpoints
757 endpoints = set()
758 for tid in self.runtaskentries:
759 if len(deps[tid]) == 0:
760 endpoints.add(tid)
761 # Iterate the chains collating dependencies
762 while endpoints:
763 next = set()
764 for tid in endpoints:
765 for dep in revdeps[tid]:
766 cumulativedeps[dep].add(fn_from_tid(tid))
767 cumulativedeps[dep].update(cumulativedeps[tid])
768 if tid in deps[dep]:
769 deps[dep].remove(tid)
770 if len(deps[dep]) == 0:
771 next.add(dep)
772 endpoints = next
773 #for tid in deps:
774 # if len(deps[tid]) != 0:
775 # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
776
777 # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
778 # resolve these recursively until we aren't adding any further extra dependencies
779 extradeps = True
780 while extradeps:
781 extradeps = 0
782 for tid in recursivetasks:
783 tasknames = recursivetasks[tid]
784
785 totaldeps = set(self.runtaskentries[tid].depends)
786 if tid in recursiveitasks:
787 totaldeps.update(recursiveitasks[tid])
788 for dep in recursiveitasks[tid]:
789 if dep not in self.runtaskentries:
790 continue
791 totaldeps.update(self.runtaskentries[dep].depends)
792
793 deps = set()
794 for dep in totaldeps:
795 if dep in cumulativedeps:
796 deps.update(cumulativedeps[dep])
797
798 for t in deps:
799 for taskname in tasknames:
800 newtid = t + ":" + taskname
801 if newtid == tid:
802 continue
803 if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
804 extradeps += 1
805 self.runtaskentries[tid].depends.add(newtid)
806
807 # Handle recursive tasks which depend upon other recursive tasks
808 deps = set()
809 for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
810 deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
811 for newtid in deps:
812 for taskname in tasknames:
813 if not newtid.endswith(":" + taskname):
814 continue
815 if newtid in self.runtaskentries:
816 extradeps += 1
817 self.runtaskentries[tid].depends.add(newtid)
818
819 bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps)
820
821 # Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work
822 for tid in recursivetasksselfref:
823 self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600824
825 self.init_progress_reporter.next_stage()
826
827 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500828
829 # Step B - Mark all active tasks
830 #
831 # Start with the tasks we were asked to run and mark all dependencies
832 # as active too. If the task is to be 'forced', clear its stamp. Once
833 # all active tasks are marked, prune the ones we don't need.
834
835 logger.verbose("Marking Active Tasks")
836
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600837 def mark_active(tid, depth):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500838 """
839 Mark an item as active along with its depends
840 (calls itself recursively)
841 """
842
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600843 if tid in runq_build:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500844 return
845
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600846 runq_build[tid] = 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500847
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600848 depends = self.runtaskentries[tid].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500849 for depend in depends:
850 mark_active(depend, depth+1)
851
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600852 self.target_tids = []
853 for (mc, target, task, fn) in self.targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500854
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600855 if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500856 continue
857
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600858 if target in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500859 continue
860
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500861 parents = False
862 if task.endswith('-'):
863 parents = True
864 task = task[:-1]
865
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600866 if fn in taskData[mc].failed_fns:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500867 continue
868
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600869 # fn already has mc prefix
870 tid = fn + ":" + task
871 self.target_tids.append(tid)
872 if tid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500873 import difflib
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600874 tasks = []
875 for x in taskData[mc].taskentries:
876 if x.startswith(fn + ":"):
877 tasks.append(taskname_from_tid(x))
878 close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500879 if close_matches:
880 extra = ". Close matches:\n %s" % "\n ".join(close_matches)
881 else:
882 extra = ""
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600883 bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
884
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500885 # For tasks called "XXXX-", ony run their dependencies
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500886 if parents:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600887 for i in self.runtaskentries[tid].depends:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500888 mark_active(i, 1)
889 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600890 mark_active(tid, 1)
891
892 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500893
894 # Step C - Prune all inactive tasks
895 #
896 # Once all active tasks are marked, prune the ones we don't need.
897
Brad Bishop316dfdd2018-06-25 12:45:53 -0400898 delcount = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600899 for tid in list(self.runtaskentries.keys()):
900 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400901 delcount[tid] = self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600902 del self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600903
Brad Bishop316dfdd2018-06-25 12:45:53 -0400904 # Handle --runall
905 if self.cooker.configuration.runall:
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500906 # re-run the mark_active and then drop unused tasks from new list
907 runq_build = {}
Brad Bishop316dfdd2018-06-25 12:45:53 -0400908
909 for task in self.cooker.configuration.runall:
910 runall_tids = set()
911 for tid in list(self.runtaskentries):
912 wanttid = fn_from_tid(tid) + ":do_%s" % task
913 if wanttid in delcount:
914 self.runtaskentries[wanttid] = delcount[wanttid]
915 if wanttid in self.runtaskentries:
916 runall_tids.add(wanttid)
917
918 for tid in list(runall_tids):
919 mark_active(tid,1)
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500920
921 for tid in list(self.runtaskentries.keys()):
922 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400923 delcount[tid] = self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500924 del self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500925
926 if len(self.runtaskentries) == 0:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400927 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
928
929 self.init_progress_reporter.next_stage()
930
931 # Handle runonly
932 if self.cooker.configuration.runonly:
933 # re-run the mark_active and then drop unused tasks from new list
934 runq_build = {}
935
936 for task in self.cooker.configuration.runonly:
937 runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == "do_%s" % task }
938
939 for tid in list(runonly_tids):
940 mark_active(tid,1)
941
942 for tid in list(self.runtaskentries.keys()):
943 if tid not in runq_build:
944 delcount[tid] = self.runtaskentries[tid]
945 del self.runtaskentries[tid]
946
947 if len(self.runtaskentries) == 0:
948 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500949
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500950 #
951 # Step D - Sanity checks and computation
952 #
953
954 # Check to make sure we still have tasks to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600955 if len(self.runtaskentries) == 0:
956 if not taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500957 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
958 else:
959 bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
960
Brad Bishop316dfdd2018-06-25 12:45:53 -0400961 logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500962
963 logger.verbose("Assign Weightings")
964
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600965 self.init_progress_reporter.next_stage()
966
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500967 # Generate a list of reverse dependencies to ease future calculations
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600968 for tid in self.runtaskentries:
969 for dep in self.runtaskentries[tid].depends:
970 self.runtaskentries[dep].revdeps.add(tid)
971
972 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500973
974 # Identify tasks at the end of dependency chains
975 # Error on circular dependency loops (length two)
976 endpoints = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600977 for tid in self.runtaskentries:
978 revdeps = self.runtaskentries[tid].revdeps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500979 if len(revdeps) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600980 endpoints.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500981 for dep in revdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600982 if dep in self.runtaskentries[tid].depends:
983 bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
984
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500985
986 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
987
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600988 self.init_progress_reporter.next_stage()
989
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500990 # Calculate task weights
991 # Check of higher length circular dependencies
992 self.runq_weight = self.calculate_task_weights(endpoints)
993
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600994 self.init_progress_reporter.next_stage()
995
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500996 # Sanity Check - Check for multiple tasks building the same provider
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600997 for mc in self.dataCaches:
998 prov_list = {}
999 seen_fn = []
1000 for tid in self.runtaskentries:
1001 (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1002 if taskfn in seen_fn:
1003 continue
1004 if mc != tidmc:
1005 continue
1006 seen_fn.append(taskfn)
1007 for prov in self.dataCaches[mc].fn_provides[taskfn]:
1008 if prov not in prov_list:
1009 prov_list[prov] = [taskfn]
1010 elif taskfn not in prov_list[prov]:
1011 prov_list[prov].append(taskfn)
1012 for prov in prov_list:
1013 if len(prov_list[prov]) < 2:
1014 continue
1015 if prov in self.multi_provider_whitelist:
1016 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001017 seen_pn = []
1018 # If two versions of the same PN are being built its fatal, we don't support it.
1019 for fn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001020 pn = self.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001021 if pn not in seen_pn:
1022 seen_pn.append(pn)
1023 else:
1024 bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001025 msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
1026 #
1027 # Construct a list of things which uniquely depend on each provider
1028 # since this may help the user figure out which dependency is triggering this warning
1029 #
1030 msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
1031 deplist = {}
1032 commondeps = None
1033 for provfn in prov_list[prov]:
1034 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001035 for tid in self.runtaskentries:
1036 fn = fn_from_tid(tid)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001037 if fn != provfn:
1038 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001039 for dep in self.runtaskentries[tid].revdeps:
1040 fn = fn_from_tid(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001041 if fn == provfn:
1042 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001043 deps.add(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001044 if not commondeps:
1045 commondeps = set(deps)
1046 else:
1047 commondeps &= deps
1048 deplist[provfn] = deps
1049 for provfn in deplist:
1050 msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
1051 #
1052 # Construct a list of provides and runtime providers for each recipe
1053 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
1054 #
1055 msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
1056 provide_results = {}
1057 rprovide_results = {}
1058 commonprovs = None
1059 commonrprovs = None
1060 for provfn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001061 provides = set(self.dataCaches[mc].fn_provides[provfn])
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001062 rprovides = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001063 for rprovide in self.dataCaches[mc].rproviders:
1064 if provfn in self.dataCaches[mc].rproviders[rprovide]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001065 rprovides.add(rprovide)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001066 for package in self.dataCaches[mc].packages:
1067 if provfn in self.dataCaches[mc].packages[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001068 rprovides.add(package)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001069 for package in self.dataCaches[mc].packages_dynamic:
1070 if provfn in self.dataCaches[mc].packages_dynamic[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001071 rprovides.add(package)
1072 if not commonprovs:
1073 commonprovs = set(provides)
1074 else:
1075 commonprovs &= provides
1076 provide_results[provfn] = provides
1077 if not commonrprovs:
1078 commonrprovs = set(rprovides)
1079 else:
1080 commonrprovs &= rprovides
1081 rprovide_results[provfn] = rprovides
1082 #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
1083 #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
1084 for provfn in prov_list[prov]:
1085 msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
1086 msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
1087
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001088 if self.warn_multi_bb:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001089 logger.verbnote(msg)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001090 else:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001091 logger.error(msg)
1092
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001093 self.init_progress_reporter.next_stage()
1094
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001095 # Create a whitelist usable by the stamp checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001096 self.stampfnwhitelist = {}
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001097 for mc in self.taskData:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001098 self.stampfnwhitelist[mc] = []
1099 for entry in self.stampwhitelist.split():
1100 if entry not in self.taskData[mc].build_targets:
1101 continue
1102 fn = self.taskData.build_targets[entry][0]
1103 self.stampfnwhitelist[mc].append(fn)
1104
1105 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001106
1107 # Iterate over the task list looking for tasks with a 'setscene' function
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001108 self.runq_setscene_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001109 if not self.cooker.configuration.nosetscene:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001110 for tid in self.runtaskentries:
1111 (mc, fn, taskname, _) = split_tid_mcfn(tid)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001112 setscenetid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001113 if setscenetid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001114 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001115 self.runq_setscene_tids.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001116
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001117 def invalidate_task(tid, error_nostamp):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001118 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1119 taskdep = self.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001120 if fn + ":" + taskname not in taskData[mc].taskentries:
1121 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001122 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1123 if error_nostamp:
1124 bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
1125 else:
1126 bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
1127 else:
1128 logger.verbose("Invalidate task %s, %s", taskname, fn)
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001129 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001130
1131 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001132
1133 # Invalidate task if force mode active
1134 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001135 for tid in self.target_tids:
1136 invalidate_task(tid, False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001137
1138 # Invalidate task if invalidate mode active
1139 if self.cooker.configuration.invalidate_stamp:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001140 for tid in self.target_tids:
1141 fn = fn_from_tid(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001142 for st in self.cooker.configuration.invalidate_stamp.split(','):
1143 if not st.startswith("do_"):
1144 st = "do_%s" % st
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001145 invalidate_task(fn + ":" + st, True)
1146
1147 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001148
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001149 # Create and print to the logs a virtual/xxxx -> PN (fn) table
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001150 for mc in taskData:
1151 virtmap = taskData[mc].get_providermap(prefix="virtual/")
1152 virtpnmap = {}
1153 for v in virtmap:
1154 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
1155 bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
1156 if hasattr(bb.parse.siggen, "tasks_resolved"):
1157 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1158
1159 self.init_progress_reporter.next_stage()
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001160
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001161 # Iterate over the task list and call into the siggen code
1162 dealtwith = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001163 todeal = set(self.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001164 while len(todeal) > 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001165 for tid in todeal.copy():
1166 if len(self.runtaskentries[tid].depends - dealtwith) == 0:
1167 dealtwith.add(tid)
1168 todeal.remove(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001169 self.prepare_task_hash(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001170
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001171 bb.parse.siggen.writeout_file_checksum_cache()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001172
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001173 #self.dump_data()
1174 return len(self.runtaskentries)
1175
Brad Bishop19323692019-04-05 15:28:33 -04001176 def prepare_task_hash(self, tid):
1177 procdep = []
1178 for dep in self.runtaskentries[tid].depends:
Brad Bishop08902b02019-08-20 09:16:51 -04001179 procdep.append(dep)
1180 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, procdep, self.dataCaches[mc_from_tid(tid)])
1181 self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001182
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001183 def dump_data(self):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001184 """
1185 Dump some debug information on the internal data structures
1186 """
1187 logger.debug(3, "run_tasks:")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001188 for tid in self.runtaskentries:
1189 logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
1190 self.runtaskentries[tid].weight,
1191 self.runtaskentries[tid].depends,
1192 self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001193
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001194class RunQueueWorker():
1195 def __init__(self, process, pipe):
1196 self.process = process
1197 self.pipe = pipe
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001198
1199class RunQueue:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001200 def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001201
1202 self.cooker = cooker
1203 self.cfgData = cfgData
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001204 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001205
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001206 self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
1207 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001208 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001209
1210 self.state = runQueuePrepare
1211
1212 # For disk space monitor
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001213 # Invoked at regular time intervals via the bitbake heartbeat event
1214 # while the build is running. We generate a unique name for the handler
1215 # here, just in case that there ever is more than one RunQueue instance,
Brad Bishop96ff1982019-08-19 13:50:42 -04001216 # start the handler when reaching runQueueSceneInit, and stop it when
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001217 # done with the build.
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001218 self.dm = monitordisk.diskMonitor(cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001219 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
1220 self.dm_event_handler_registered = False
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001221 self.rqexe = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001222 self.worker = {}
1223 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001224
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001225 def _start_worker(self, mc, fakeroot = False, rqexec = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001226 logger.debug(1, "Starting bitbake-worker")
1227 magic = "decafbad"
1228 if self.cooker.configuration.profile:
1229 magic = "decafbadbad"
1230 if fakeroot:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001231 magic = magic + "beef"
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001232 mcdata = self.cooker.databuilder.mcdata[mc]
Brad Bishop19323692019-04-05 15:28:33 -04001233 fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD"))
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001234 fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001235 env = os.environ.copy()
1236 for key, value in (var.split('=') for var in fakerootenv):
1237 env[key] = value
Brad Bishop19323692019-04-05 15:28:33 -04001238 worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001239 else:
1240 worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
1241 bb.utils.nonblockingfd(worker.stdout)
1242 workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
1243
1244 workerdata = {
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001245 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1246 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1247 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1248 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001249 "sigdata" : bb.parse.siggen.get_taskdata(),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001250 "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
1251 "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
1252 "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
1253 "logdefaultdomain" : bb.msg.loggerDefaultDomains,
1254 "prhost" : self.cooker.prhost,
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001255 "buildname" : self.cfgData.getVar("BUILDNAME"),
1256 "date" : self.cfgData.getVar("DATE"),
1257 "time" : self.cfgData.getVar("TIME"),
Brad Bishop08902b02019-08-20 09:16:51 -04001258 "hashservport" : self.cooker.hashservport,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001259 }
1260
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001261 worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001262 worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001263 worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001264 worker.stdin.flush()
1265
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001266 return RunQueueWorker(worker, workerpipe)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001267
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001268 def _teardown_worker(self, worker):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001269 if not worker:
1270 return
1271 logger.debug(1, "Teardown for bitbake-worker")
1272 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001273 worker.process.stdin.write(b"<quit></quit>")
1274 worker.process.stdin.flush()
1275 worker.process.stdin.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001276 except IOError:
1277 pass
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001278 while worker.process.returncode is None:
1279 worker.pipe.read()
1280 worker.process.poll()
1281 while worker.pipe.read():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001282 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001283 worker.pipe.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001284
1285 def start_worker(self):
1286 if self.worker:
1287 self.teardown_workers()
1288 self.teardown = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001289 for mc in self.rqdata.dataCaches:
1290 self.worker[mc] = self._start_worker(mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001291
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001292 def start_fakeworker(self, rqexec, mc):
1293 if not mc in self.fakeworker:
1294 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001295
1296 def teardown_workers(self):
1297 self.teardown = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001298 for mc in self.worker:
1299 self._teardown_worker(self.worker[mc])
1300 self.worker = {}
1301 for mc in self.fakeworker:
1302 self._teardown_worker(self.fakeworker[mc])
1303 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001304
1305 def read_workers(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001306 for mc in self.worker:
1307 self.worker[mc].pipe.read()
1308 for mc in self.fakeworker:
1309 self.fakeworker[mc].pipe.read()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001310
1311 def active_fds(self):
1312 fds = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001313 for mc in self.worker:
1314 fds.append(self.worker[mc].pipe.input)
1315 for mc in self.fakeworker:
1316 fds.append(self.fakeworker[mc].pipe.input)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001317 return fds
1318
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001319 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001320 def get_timestamp(f):
1321 try:
1322 if not os.access(f, os.F_OK):
1323 return None
1324 return os.stat(f)[stat.ST_MTIME]
1325 except:
1326 return None
1327
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001328 (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
1329 if taskname is None:
1330 taskname = tn
1331
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001332 if self.stamppolicy == "perfile":
1333 fulldeptree = False
1334 else:
1335 fulldeptree = True
1336 stampwhitelist = []
1337 if self.stamppolicy == "whitelist":
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001338 stampwhitelist = self.rqdata.stampfnwhitelist[mc]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001339
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001340 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001341
1342 # If the stamp is missing, it's not current
1343 if not os.access(stampfile, os.F_OK):
1344 logger.debug(2, "Stampfile %s not available", stampfile)
1345 return False
1346 # If it's a 'nostamp' task, it's not current
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001347 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001348 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1349 logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
1350 return False
1351
1352 if taskname != "do_setscene" and taskname.endswith("_setscene"):
1353 return True
1354
1355 if cache is None:
1356 cache = {}
1357
1358 iscurrent = True
1359 t1 = get_timestamp(stampfile)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001360 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001361 if iscurrent:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001362 (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
1363 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1364 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001365 t2 = get_timestamp(stampfile2)
1366 t3 = get_timestamp(stampfile3)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001367 if t3 and not t2:
1368 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001369 if t3 and t3 > t2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001370 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001371 if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
1372 if not t2:
1373 logger.debug(2, 'Stampfile %s does not exist', stampfile2)
1374 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001375 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001376 if t1 < t2:
1377 logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
1378 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001379 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001380 if recurse and iscurrent:
1381 if dep in cache:
1382 iscurrent = cache[dep]
1383 if not iscurrent:
1384 logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
1385 else:
1386 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1387 cache[dep] = iscurrent
1388 if recurse:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001389 cache[tid] = iscurrent
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001390 return iscurrent
1391
Brad Bishop08902b02019-08-20 09:16:51 -04001392 def validate_hashes(self, tocheck, data, currentcount=None, siginfo=False):
Brad Bishop96ff1982019-08-19 13:50:42 -04001393 valid = set()
1394 if self.hashvalidate:
Brad Bishop08902b02019-08-20 09:16:51 -04001395 sq_data = {}
1396 sq_data['hash'] = {}
1397 sq_data['hashfn'] = {}
1398 sq_data['unihash'] = {}
Brad Bishop96ff1982019-08-19 13:50:42 -04001399 for tid in tocheck:
1400 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04001401 sq_data['hash'][tid] = self.rqdata.runtaskentries[tid].hash
1402 sq_data['hashfn'][tid] = self.rqdata.dataCaches[mc].hashfn[taskfn]
1403 sq_data['unihash'][tid] = self.rqdata.runtaskentries[tid].unihash
Brad Bishop96ff1982019-08-19 13:50:42 -04001404
Brad Bishop08902b02019-08-20 09:16:51 -04001405 valid = self.validate_hash(sq_data, data, siginfo, currentcount)
Brad Bishop96ff1982019-08-19 13:50:42 -04001406
1407 return valid
1408
Brad Bishop08902b02019-08-20 09:16:51 -04001409 def validate_hash(self, sq_data, d, siginfo, currentcount):
1410 locs = {"sq_data" : sq_data, "d" : d, "siginfo" : siginfo, "currentcount" : currentcount}
Brad Bishop19323692019-04-05 15:28:33 -04001411
Brad Bishop08902b02019-08-20 09:16:51 -04001412 # Metadata has **kwargs so args can be added, sq_data can also gain new fields
1413 call = self.hashvalidate + "(sq_data, d, siginfo=siginfo, currentcount=currentcount)"
Brad Bishop19323692019-04-05 15:28:33 -04001414
Brad Bishop19323692019-04-05 15:28:33 -04001415 return bb.utils.better_eval(call, locs)
1416
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001417 def _execute_runqueue(self):
1418 """
1419 Run the tasks in a queue prepared by rqdata.prepare()
1420 Upon failure, optionally try to recover the build using any alternate providers
1421 (if the abort on failure configuration option isn't set)
1422 """
1423
1424 retval = True
1425
1426 if self.state is runQueuePrepare:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001427 # NOTE: if you add, remove or significantly refactor the stages of this
1428 # process then you should recalculate the weightings here. This is quite
1429 # easy to do - just change the next line temporarily to pass debug=True as
1430 # the last parameter and you'll get a printout of the weightings as well
1431 # as a map to the lines where next_stage() was called. Of course this isn't
1432 # critical, but it helps to keep the progress reporting accurate.
1433 self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
1434 "Initialising tasks",
1435 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001436 if self.rqdata.prepare() == 0:
1437 self.state = runQueueComplete
1438 else:
1439 self.state = runQueueSceneInit
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001440
1441 if self.state is runQueueSceneInit:
Brad Bishop96ff1982019-08-19 13:50:42 -04001442 self.rqdata.init_progress_reporter.next_stage()
1443
1444 # we are ready to run, emit dependency info to any UI or class which
1445 # needs it
1446 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1447 self.rqdata.init_progress_reporter.next_stage()
1448 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
1449
Brad Bishope2d5b612018-11-23 10:55:50 +13001450 if not self.dm_event_handler_registered:
1451 res = bb.event.register(self.dm_event_handler_name,
Brad Bishop96ff1982019-08-19 13:50:42 -04001452 lambda x: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False,
Brad Bishope2d5b612018-11-23 10:55:50 +13001453 ('bb.event.HeartbeatEvent',))
1454 self.dm_event_handler_registered = True
1455
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001456 dump = self.cooker.configuration.dump_signatures
1457 if dump:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001458 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001459 if 'printdiff' in dump:
1460 invalidtasks = self.print_diffscenetasks()
1461 self.dump_signatures(dump)
1462 if 'printdiff' in dump:
1463 self.write_diffscenetasks(invalidtasks)
1464 self.state = runQueueComplete
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001465
Brad Bishop96ff1982019-08-19 13:50:42 -04001466 if self.state is runQueueSceneInit:
1467 self.rqdata.init_progress_reporter.next_stage()
1468 self.start_worker()
1469 self.rqdata.init_progress_reporter.next_stage()
1470 self.rqexe = RunQueueExecute(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001471
Brad Bishop96ff1982019-08-19 13:50:42 -04001472 # If we don't have any setscene functions, skip execution
1473 if len(self.rqdata.runq_setscene_tids) == 0:
1474 logger.info('No setscene tasks')
1475 for tid in self.rqdata.runtaskentries:
1476 if len(self.rqdata.runtaskentries[tid].depends) == 0:
1477 self.rqexe.setbuildable(tid)
1478 self.rqexe.tasks_notcovered.add(tid)
1479 self.rqexe.sqdone = True
1480 logger.info('Executing Tasks')
1481 self.state = runQueueRunning
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001482
1483 if self.state is runQueueRunning:
1484 retval = self.rqexe.execute()
1485
1486 if self.state is runQueueCleanUp:
1487 retval = self.rqexe.finish()
1488
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001489 build_done = self.state is runQueueComplete or self.state is runQueueFailed
1490
1491 if build_done and self.dm_event_handler_registered:
1492 bb.event.remove(self.dm_event_handler_name, None)
1493 self.dm_event_handler_registered = False
1494
1495 if build_done and self.rqexe:
Brad Bishop08902b02019-08-20 09:16:51 -04001496 bb.parse.siggen.save_unitaskhashes()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001497 self.teardown_workers()
Brad Bishop96ff1982019-08-19 13:50:42 -04001498 if self.rqexe:
1499 if self.rqexe.stats.failed:
1500 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
1501 else:
1502 # Let's avoid the word "failed" if nothing actually did
1503 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001504
1505 if self.state is runQueueFailed:
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001506 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001507
1508 if self.state is runQueueComplete:
1509 # All done
1510 return False
1511
1512 # Loop
1513 return retval
1514
1515 def execute_runqueue(self):
1516 # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
1517 try:
1518 return self._execute_runqueue()
1519 except bb.runqueue.TaskFailure:
1520 raise
1521 except SystemExit:
1522 raise
1523 except bb.BBHandledException:
1524 try:
1525 self.teardown_workers()
1526 except:
1527 pass
1528 self.state = runQueueComplete
1529 raise
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001530 except Exception as err:
1531 logger.exception("An uncaught exception occurred in runqueue")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001532 try:
1533 self.teardown_workers()
1534 except:
1535 pass
1536 self.state = runQueueComplete
1537 raise
1538
1539 def finish_runqueue(self, now = False):
1540 if not self.rqexe:
1541 self.state = runQueueComplete
1542 return
1543
1544 if now:
1545 self.rqexe.finish_now()
1546 else:
1547 self.rqexe.finish()
1548
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001549 def rq_dump_sigfn(self, fn, options):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001550 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001551 the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
1552 siggen = bb.parse.siggen
1553 dataCaches = self.rqdata.dataCaches
1554 siggen.dump_sigfn(fn, dataCaches, options)
1555
1556 def dump_signatures(self, options):
1557 fns = set()
1558 bb.note("Reparsing files to collect dependency data")
1559
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001560 for tid in self.rqdata.runtaskentries:
1561 fn = fn_from_tid(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001562 fns.add(fn)
1563
1564 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1565 # We cannot use the real multiprocessing.Pool easily due to some local data
1566 # that can't be pickled. This is a cheap multi-process solution.
1567 launched = []
1568 while fns:
1569 if len(launched) < max_process:
1570 p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
1571 p.start()
1572 launched.append(p)
1573 for q in launched:
1574 # The finished processes are joined when calling is_alive()
1575 if not q.is_alive():
1576 launched.remove(q)
1577 for p in launched:
1578 p.join()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001579
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001580 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001581
1582 return
1583
1584 def print_diffscenetasks(self):
1585
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001586 noexec = []
Brad Bishop96ff1982019-08-19 13:50:42 -04001587 tocheck = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001588
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001589 for tid in self.rqdata.runtaskentries:
1590 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1591 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001592
1593 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001594 noexec.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001595 continue
1596
Brad Bishop96ff1982019-08-19 13:50:42 -04001597 tocheck.add(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001598
Brad Bishop96ff1982019-08-19 13:50:42 -04001599 valid_new = self.validate_hashes(tocheck, self.cooker.data, None, True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001600
1601 # Tasks which are both setscene and noexec never care about dependencies
1602 # We therefore find tasks which are setscene and noexec and mark their
1603 # unique dependencies as valid.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001604 for tid in noexec:
1605 if tid not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001606 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001607 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001608 hasnoexecparents = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001609 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1610 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001611 continue
1612 hasnoexecparents = False
1613 break
1614 if hasnoexecparents:
1615 valid_new.add(dep)
1616
1617 invalidtasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001618 for tid in self.rqdata.runtaskentries:
1619 if tid not in valid_new and tid not in noexec:
1620 invalidtasks.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001621
1622 found = set()
1623 processed = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001624 for tid in invalidtasks:
1625 toprocess = set([tid])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001626 while toprocess:
1627 next = set()
1628 for t in toprocess:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001629 for dep in self.rqdata.runtaskentries[t].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001630 if dep in invalidtasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001631 found.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001632 if dep not in processed:
1633 processed.add(dep)
1634 next.add(dep)
1635 toprocess = next
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001636 if tid in found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001637 toprocess = set()
1638
1639 tasklist = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001640 for tid in invalidtasks.difference(found):
1641 tasklist.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001642
1643 if tasklist:
1644 bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
1645
1646 return invalidtasks.difference(found)
1647
1648 def write_diffscenetasks(self, invalidtasks):
1649
1650 # Define recursion callback
1651 def recursecb(key, hash1, hash2):
1652 hashes = [hash1, hash2]
1653 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1654
1655 recout = []
1656 if len(hashfiles) == 2:
1657 out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
Brad Bishopc342db32019-05-15 21:57:59 -04001658 recout.extend(list(' ' + l for l in out2))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001659 else:
1660 recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
1661
1662 return recout
1663
1664
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001665 for tid in invalidtasks:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001666 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1667 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001668 h = self.rqdata.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001669 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
1670 match = None
1671 for m in matches:
1672 if h in m:
1673 match = m
1674 if match is None:
1675 bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001676 matches = {k : v for k, v in iter(matches.items()) if h not in k}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001677 if matches:
1678 latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
Brad Bishop19323692019-04-05 15:28:33 -04001679 prevh = __find_sha256__.search(latestmatch).group(0)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001680 output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
1681 bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
1682
Brad Bishop96ff1982019-08-19 13:50:42 -04001683
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001684class RunQueueExecute:
1685
1686 def __init__(self, rq):
1687 self.rq = rq
1688 self.cooker = rq.cooker
1689 self.cfgData = rq.cfgData
1690 self.rqdata = rq.rqdata
1691
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001692 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
1693 self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001694
Brad Bishop96ff1982019-08-19 13:50:42 -04001695 self.sq_buildable = set()
1696 self.sq_running = set()
1697 self.sq_live = set()
1698
Brad Bishop08902b02019-08-20 09:16:51 -04001699 self.updated_taskhash_queue = []
1700 self.pending_migrations = set()
1701
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001702 self.runq_buildable = set()
1703 self.runq_running = set()
1704 self.runq_complete = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001705
1706 self.build_stamps = {}
1707 self.build_stamps2 = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001708 self.failed_tids = []
Brad Bishop96ff1982019-08-19 13:50:42 -04001709 self.sq_deferred = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001710
1711 self.stampcache = {}
1712
Brad Bishop08902b02019-08-20 09:16:51 -04001713 self.holdoff_tasks = set()
Brad Bishop96ff1982019-08-19 13:50:42 -04001714 self.sqdone = False
1715
1716 self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
1717 self.sq_stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
1718
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001719 for mc in rq.worker:
1720 rq.worker[mc].pipe.setrunqueueexec(self)
1721 for mc in rq.fakeworker:
1722 rq.fakeworker[mc].pipe.setrunqueueexec(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001723
1724 if self.number_tasks <= 0:
1725 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1726
Brad Bishop96ff1982019-08-19 13:50:42 -04001727 # List of setscene tasks which we've covered
1728 self.scenequeue_covered = set()
1729 # List of tasks which are covered (including setscene ones)
1730 self.tasks_covered = set()
1731 self.tasks_scenequeue_done = set()
1732 self.scenequeue_notcovered = set()
1733 self.tasks_notcovered = set()
1734 self.scenequeue_notneeded = set()
1735
Brad Bishop08902b02019-08-20 09:16:51 -04001736 # We can't skip specified target tasks which aren't setscene tasks
1737 self.cantskip = set(self.rqdata.target_tids)
1738 self.cantskip.difference_update(self.rqdata.runq_setscene_tids)
1739 self.cantskip.intersection_update(self.rqdata.runtaskentries)
Brad Bishop96ff1982019-08-19 13:50:42 -04001740
1741 schedulers = self.get_schedulers()
1742 for scheduler in schedulers:
1743 if self.scheduler == scheduler.name:
1744 self.sched = scheduler(self, self.rqdata)
1745 logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
1746 break
1747 else:
1748 bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
1749 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1750
Brad Bishop08902b02019-08-20 09:16:51 -04001751 #if len(self.rqdata.runq_setscene_tids) > 0:
1752 self.sqdata = SQData()
1753 build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
Brad Bishop96ff1982019-08-19 13:50:42 -04001754
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001755 def runqueue_process_waitpid(self, task, status):
1756
1757 # self.build_stamps[pid] may not exist when use shared work directory.
1758 if task in self.build_stamps:
1759 self.build_stamps2.remove(self.build_stamps[task])
1760 del self.build_stamps[task]
1761
Brad Bishop96ff1982019-08-19 13:50:42 -04001762 if task in self.sq_live:
1763 if status != 0:
1764 self.sq_task_fail(task, status)
1765 else:
1766 self.sq_task_complete(task)
1767 self.sq_live.remove(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001768 else:
Brad Bishop96ff1982019-08-19 13:50:42 -04001769 if status != 0:
1770 self.task_fail(task, status)
1771 else:
1772 self.task_complete(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001773 return True
1774
1775 def finish_now(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001776 for mc in self.rq.worker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001777 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001778 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1779 self.rq.worker[mc].process.stdin.flush()
1780 except IOError:
1781 # worker must have died?
1782 pass
1783 for mc in self.rq.fakeworker:
1784 try:
1785 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1786 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001787 except IOError:
1788 # worker must have died?
1789 pass
1790
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001791 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001792 self.rq.state = runQueueFailed
1793 return
1794
1795 self.rq.state = runQueueComplete
1796 return
1797
1798 def finish(self):
1799 self.rq.state = runQueueCleanUp
1800
Brad Bishop96ff1982019-08-19 13:50:42 -04001801 active = self.stats.active + self.sq_stats.active
1802 if active > 0:
1803 bb.event.fire(runQueueExitWait(active), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001804 self.rq.read_workers()
1805 return self.rq.active_fds()
1806
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001807 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001808 self.rq.state = runQueueFailed
1809 return True
1810
1811 self.rq.state = runQueueComplete
1812 return True
1813
Brad Bishop96ff1982019-08-19 13:50:42 -04001814 # Used by setscene only
1815 def check_dependencies(self, task, taskdeps):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001816 if not self.rq.depvalidate:
1817 return False
1818
Brad Bishop08902b02019-08-20 09:16:51 -04001819 # Must not edit parent data
1820 taskdeps = set(taskdeps)
1821
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001822 taskdata = {}
1823 taskdeps.add(task)
1824 for dep in taskdeps:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001825 (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
1826 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001827 taskdata[dep] = [pn, taskname, fn]
1828 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001829 locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001830 valid = bb.utils.better_eval(call, locs)
1831 return valid
1832
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001833 def can_start_task(self):
Brad Bishop96ff1982019-08-19 13:50:42 -04001834 active = self.stats.active + self.sq_stats.active
1835 can_start = active < self.number_tasks
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001836 return can_start
1837
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001838 def get_schedulers(self):
1839 schedulers = set(obj for obj in globals().values()
1840 if type(obj) is type and
1841 issubclass(obj, RunQueueScheduler))
1842
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001843 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001844 if user_schedulers:
1845 for sched in user_schedulers.split():
1846 if not "." in sched:
1847 bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
1848 continue
1849
1850 modname, name = sched.rsplit(".", 1)
1851 try:
1852 module = __import__(modname, fromlist=(name,))
1853 except ImportError as exc:
1854 logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
1855 raise SystemExit(1)
1856 else:
1857 schedulers.add(getattr(module, name))
1858 return schedulers
1859
1860 def setbuildable(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001861 self.runq_buildable.add(task)
Brad Bishop316dfdd2018-06-25 12:45:53 -04001862 self.sched.newbuildable(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001863
1864 def task_completeoutright(self, task):
1865 """
1866 Mark a task as completed
1867 Look at the reverse dependencies and mark any task with
1868 completed dependencies as buildable
1869 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001870 self.runq_complete.add(task)
1871 for revdep in self.rqdata.runtaskentries[task].revdeps:
1872 if revdep in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001873 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001874 if revdep in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001875 continue
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001876 alldeps = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001877 for dep in self.rqdata.runtaskentries[revdep].depends:
1878 if dep not in self.runq_complete:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001879 alldeps = False
1880 break
1881 if alldeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001882 self.setbuildable(revdep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001883 logger.debug(1, "Marking task %s as buildable", revdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001884
1885 def task_complete(self, task):
1886 self.stats.taskCompleted()
1887 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1888 self.task_completeoutright(task)
1889
1890 def task_fail(self, task, exitcode):
1891 """
1892 Called when a task has failed
1893 Updates the state engine with the failure
1894 """
1895 self.stats.taskFailed()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001896 self.failed_tids.append(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001897 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001898 if self.rqdata.taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001899 self.rq.state = runQueueCleanUp
1900
1901 def task_skip(self, task, reason):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001902 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001903 self.setbuildable(task)
1904 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
1905 self.task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001906 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001907 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001908
Brad Bishop08902b02019-08-20 09:16:51 -04001909 def summarise_scenequeue_errors(self):
1910 err = False
1911 if not self.sqdone:
1912 logger.debug(1, 'We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered)))
1913 completeevent = sceneQueueComplete(self.sq_stats, self.rq)
1914 bb.event.fire(completeevent, self.cfgData)
1915 if self.sq_deferred:
1916 logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred))
1917 err = True
1918 if self.updated_taskhash_queue:
1919 logger.error("Scenequeue had unprocessed changed taskhash entries: %s" % pprint.pformat(self.updated_taskhash_queue))
1920 err = True
1921 if self.holdoff_tasks:
1922 logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks))
1923 err = True
1924
1925 for tid in self.rqdata.runq_setscene_tids:
1926 if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
1927 err = True
1928 logger.error("Setscene Task %s was never marked as covered or not covered" % tid)
1929 if tid not in self.sq_buildable:
1930 err = True
1931 logger.error("Setscene Task %s was never marked as buildable" % tid)
1932 if tid not in self.sq_running:
1933 err = True
1934 logger.error("Setscene Task %s was never marked as running" % tid)
1935
1936 for x in self.rqdata.runtaskentries:
1937 if x not in self.tasks_covered and x not in self.tasks_notcovered:
1938 logger.error("Task %s was never moved from the setscene queue" % x)
1939 err = True
1940 if x not in self.tasks_scenequeue_done:
1941 logger.error("Task %s was never processed by the setscene code" % x)
1942 err = True
1943 if len(self.rqdata.runtaskentries[x].depends) == 0 and x not in self.runq_buildable:
1944 logger.error("Task %s was never marked as buildable by the setscene code" % x)
1945 err = True
1946 return err
1947
1948
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001949 def execute(self):
1950 """
Brad Bishop96ff1982019-08-19 13:50:42 -04001951 Run the tasks in a queue prepared by prepare_runqueue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001952 """
1953
1954 self.rq.read_workers()
Brad Bishop08902b02019-08-20 09:16:51 -04001955 self.process_possible_migrations()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001956
Brad Bishop96ff1982019-08-19 13:50:42 -04001957 task = None
1958 if not self.sqdone and self.can_start_task():
1959 # Find the next setscene to run
Brad Bishop08902b02019-08-20 09:16:51 -04001960 for nexttask in sorted(self.rqdata.runq_setscene_tids):
Brad Bishop96ff1982019-08-19 13:50:42 -04001961 if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values():
1962 if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
1963 if nexttask not in self.rqdata.target_tids:
1964 logger.debug(2, "Skipping setscene for task %s" % nexttask)
1965 self.sq_task_skip(nexttask)
1966 self.scenequeue_notneeded.add(nexttask)
1967 if nexttask in self.sq_deferred:
1968 del self.sq_deferred[nexttask]
1969 return True
Brad Bishop08902b02019-08-20 09:16:51 -04001970 # If covered tasks are running, need to wait for them to complete
1971 for t in self.sqdata.sq_covered_tasks[nexttask]:
1972 if t in self.runq_running and t not in self.runq_complete:
1973 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04001974 if nexttask in self.sq_deferred:
1975 if self.sq_deferred[nexttask] not in self.runq_complete:
1976 continue
1977 logger.debug(1, "Task %s no longer deferred" % nexttask)
1978 del self.sq_deferred[nexttask]
1979 valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, None, False)
1980 if not valid:
1981 logger.debug(1, "%s didn't become valid, skipping setscene" % nexttask)
1982 self.sq_task_failoutright(nexttask)
1983 return True
1984 else:
1985 self.sqdata.outrightfail.remove(nexttask)
1986 if nexttask in self.sqdata.outrightfail:
1987 logger.debug(2, 'No package found, so skipping setscene task %s', nexttask)
1988 self.sq_task_failoutright(nexttask)
1989 return True
1990 if nexttask in self.sqdata.unskippable:
1991 logger.debug(2, "Setscene task %s is unskippable" % nexttask)
1992 task = nexttask
1993 break
1994 if task is not None:
1995 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
1996 taskname = taskname + "_setscene"
1997 if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
1998 logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
1999 self.sq_task_failoutright(task)
2000 return True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002001
Brad Bishop96ff1982019-08-19 13:50:42 -04002002 if self.cooker.configuration.force:
2003 if task in self.rqdata.target_tids:
2004 self.sq_task_failoutright(task)
2005 return True
2006
2007 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
2008 logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
2009 self.sq_task_skip(task)
2010 return True
2011
2012 if self.cooker.configuration.skipsetscene:
2013 logger.debug(2, 'No setscene tasks should be executed. Skipping %s', task)
2014 self.sq_task_failoutright(task)
2015 return True
2016
2017 startevent = sceneQueueTaskStarted(task, self.sq_stats, self.rq)
2018 bb.event.fire(startevent, self.cfgData)
2019
2020 taskdepdata = self.sq_build_taskdepdata(task)
2021
2022 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2023 taskhash = self.rqdata.get_task_hash(task)
2024 unihash = self.rqdata.get_task_unihash(task)
2025 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
2026 if not mc in self.rq.fakeworker:
2027 self.rq.start_fakeworker(self, mc)
2028 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
2029 self.rq.fakeworker[mc].process.stdin.flush()
2030 else:
2031 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
2032 self.rq.worker[mc].process.stdin.flush()
2033
2034 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2035 self.build_stamps2.append(self.build_stamps[task])
2036 self.sq_running.add(task)
2037 self.sq_live.add(task)
2038 self.sq_stats.taskActive()
2039 if self.can_start_task():
2040 return True
2041
Brad Bishop08902b02019-08-20 09:16:51 -04002042 if not self.sq_live and not self.sqdone and not self.sq_deferred and not self.updated_taskhash_queue and not self.holdoff_tasks:
Brad Bishop96ff1982019-08-19 13:50:42 -04002043 logger.info("Setscene tasks completed")
Brad Bishop96ff1982019-08-19 13:50:42 -04002044
Brad Bishop08902b02019-08-20 09:16:51 -04002045 err = self.summarise_scenequeue_errors()
Brad Bishop96ff1982019-08-19 13:50:42 -04002046 if err:
2047 self.rq.state = runQueueFailed
2048 return True
2049
2050 if self.cooker.configuration.setsceneonly:
2051 self.rq.state = runQueueComplete
2052 return True
2053 self.sqdone = True
2054
2055 if self.stats.total == 0:
2056 # nothing to do
2057 self.rq.state = runQueueComplete
2058 return True
2059
2060 if self.cooker.configuration.setsceneonly:
2061 task = None
2062 else:
2063 task = self.sched.next()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002064 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002065 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002066
Brad Bishop96ff1982019-08-19 13:50:42 -04002067 if self.rqdata.setscenewhitelist is not None:
2068 if self.check_setscenewhitelist(task):
2069 self.task_fail(task, "setscene whitelist")
2070 return True
2071
2072 if task in self.tasks_covered:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002073 logger.debug(2, "Setscene covered task %s", task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002074 self.task_skip(task, "covered")
2075 return True
2076
2077 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002078 logger.debug(2, "Stamp current task %s", task)
2079
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002080 self.task_skip(task, "existing")
2081 return True
2082
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002083 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002084 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2085 startevent = runQueueTaskStarted(task, self.stats, self.rq,
2086 noexec=True)
2087 bb.event.fire(startevent, self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002088 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002089 self.stats.taskActive()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002090 if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002091 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002092 self.task_complete(task)
2093 return True
2094 else:
2095 startevent = runQueueTaskStarted(task, self.stats, self.rq)
2096 bb.event.fire(startevent, self.cfgData)
2097
2098 taskdepdata = self.build_taskdepdata(task)
2099
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002100 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop19323692019-04-05 15:28:33 -04002101 taskhash = self.rqdata.get_task_hash(task)
2102 unihash = self.rqdata.get_task_unihash(task)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002103 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002104 if not mc in self.rq.fakeworker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002105 try:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002106 self.rq.start_fakeworker(self, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002107 except OSError as exc:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002108 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002109 self.rq.state = runQueueFailed
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002110 self.stats.taskFailed()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002111 return True
Brad Bishop19323692019-04-05 15:28:33 -04002112 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002113 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002114 else:
Brad Bishop19323692019-04-05 15:28:33 -04002115 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002116 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002117
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002118 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2119 self.build_stamps2.append(self.build_stamps[task])
2120 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002121 self.stats.taskActive()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002122 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002123 return True
2124
Brad Bishop96ff1982019-08-19 13:50:42 -04002125 if self.stats.active > 0 or self.sq_stats.active > 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002126 self.rq.read_workers()
2127 return self.rq.active_fds()
2128
Brad Bishop96ff1982019-08-19 13:50:42 -04002129 # No more tasks can be run. If we have deferred setscene tasks we should run them.
2130 if self.sq_deferred:
2131 tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0])
2132 logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid)
2133 self.sq_task_failoutright(tid)
2134 return True
2135
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002136 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002137 self.rq.state = runQueueFailed
2138 return True
2139
2140 # Sanity Checks
Brad Bishop08902b02019-08-20 09:16:51 -04002141 err = self.summarise_scenequeue_errors()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002142 for task in self.rqdata.runtaskentries:
2143 if task not in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002144 logger.error("Task %s never buildable!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002145 err = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002146 elif task not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002147 logger.error("Task %s never ran!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002148 err = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002149 elif task not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002150 logger.error("Task %s never completed!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002151 err = True
2152
2153 if err:
2154 self.rq.state = runQueueFailed
2155 else:
2156 self.rq.state = runQueueComplete
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002157
2158 return True
2159
Andrew Geissler99467da2019-02-25 18:54:23 -06002160 def filtermcdeps(self, task, deps):
2161 ret = set()
2162 mainmc = mc_from_tid(task)
2163 for dep in deps:
2164 mc = mc_from_tid(dep)
2165 if mc != mainmc:
2166 continue
2167 ret.add(dep)
2168 return ret
2169
2170 # We filter out multiconfig dependencies from taskdepdata we pass to the tasks
2171 # as most code can't handle them
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002172 def build_taskdepdata(self, task):
2173 taskdepdata = {}
Brad Bishop08902b02019-08-20 09:16:51 -04002174 next = self.rqdata.runtaskentries[task].depends.copy()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002175 next.add(task)
Andrew Geissler99467da2019-02-25 18:54:23 -06002176 next = self.filtermcdeps(task, next)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002177 while next:
2178 additional = []
2179 for revdep in next:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002180 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2181 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2182 deps = self.rqdata.runtaskentries[revdep].depends
2183 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002184 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002185 unihash = self.rqdata.runtaskentries[revdep].unihash
Andrew Geissler99467da2019-02-25 18:54:23 -06002186 deps = self.filtermcdeps(task, deps)
Brad Bishop19323692019-04-05 15:28:33 -04002187 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002188 for revdep2 in deps:
2189 if revdep2 not in taskdepdata:
2190 additional.append(revdep2)
2191 next = additional
2192
2193 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2194 return taskdepdata
2195
Brad Bishop08902b02019-08-20 09:16:51 -04002196 def update_holdofftasks(self):
2197 self.holdoff_tasks = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002198
Brad Bishop08902b02019-08-20 09:16:51 -04002199 for tid in self.rqdata.runq_setscene_tids:
2200 if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
2201 self.holdoff_tasks.add(tid)
2202
2203 for tid in self.holdoff_tasks.copy():
2204 for dep in self.sqdata.sq_covered_tasks[tid]:
2205 if dep not in self.runq_complete:
2206 self.holdoff_tasks.add(dep)
2207
2208 def process_possible_migrations(self):
2209
2210 changed = set()
2211 for tid, unihash in self.updated_taskhash_queue.copy():
2212 if tid in self.runq_running and tid not in self.runq_complete:
2213 continue
2214
2215 self.updated_taskhash_queue.remove((tid, unihash))
2216
2217 if unihash != self.rqdata.runtaskentries[tid].unihash:
2218 logger.info("Task %s unihash changed to %s" % (tid, unihash))
2219 self.rqdata.runtaskentries[tid].unihash = unihash
2220 bb.parse.siggen.set_unihash(tid, unihash)
2221
2222 # Work out all tasks which depend on this one
2223 total = set()
2224 next = set(self.rqdata.runtaskentries[tid].revdeps)
2225 while next:
2226 current = next.copy()
2227 total = total |next
2228 next = set()
2229 for ntid in current:
2230 next |= self.rqdata.runtaskentries[ntid].revdeps
2231 next.difference_update(total)
2232
2233 # Now iterate those tasks in dependency order to regenerate their taskhash/unihash
2234 done = set()
2235 next = set(self.rqdata.runtaskentries[tid].revdeps)
2236 while next:
2237 current = next.copy()
2238 next = set()
2239 for tid in current:
2240 if not self.rqdata.runtaskentries[tid].depends.isdisjoint(total):
2241 continue
2242 procdep = []
2243 for dep in self.rqdata.runtaskentries[tid].depends:
2244 procdep.append(dep)
2245 orighash = self.rqdata.runtaskentries[tid].hash
2246 self.rqdata.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, procdep, self.rqdata.dataCaches[mc_from_tid(tid)])
2247 origuni = self.rqdata.runtaskentries[tid].unihash
2248 self.rqdata.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
2249 logger.debug(1, "Task %s hash changes: %s->%s %s->%s" % (tid, orighash, self.rqdata.runtaskentries[tid].hash, origuni, self.rqdata.runtaskentries[tid].unihash))
2250 next |= self.rqdata.runtaskentries[tid].revdeps
2251 changed.add(tid)
2252 total.remove(tid)
2253 next.intersection_update(total)
2254
2255 if changed:
2256 for mc in self.rq.worker:
2257 self.rq.worker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
2258 for mc in self.rq.fakeworker:
2259 self.rq.fakeworker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
2260
2261 logger.debug(1, pprint.pformat("Tasks changed:\n%s" % (changed)))
2262
2263 for tid in changed:
2264 if tid not in self.rqdata.runq_setscene_tids:
2265 continue
2266 valid = self.rq.validate_hashes(set([tid]), self.cooker.data, None, False)
2267 if not valid:
2268 continue
2269 if tid in self.runq_running:
2270 continue
2271 if tid not in self.pending_migrations:
2272 self.pending_migrations.add(tid)
2273
2274 for tid in self.pending_migrations.copy():
2275 valid = True
2276 # Check no tasks this covers are running
2277 for dep in self.sqdata.sq_covered_tasks[tid]:
2278 if dep in self.runq_running and dep not in self.runq_complete:
2279 logger.debug(2, "Task %s is running which blocks setscene for %s from running" % (dep, tid))
2280 valid = False
2281 break
2282 if not valid:
2283 continue
2284
2285 self.pending_migrations.remove(tid)
2286
2287 if tid in self.tasks_scenequeue_done:
2288 self.tasks_scenequeue_done.remove(tid)
2289 for dep in self.sqdata.sq_covered_tasks[tid]:
2290 if dep not in self.runq_complete:
2291 if dep in self.tasks_scenequeue_done and dep not in self.sqdata.unskippable:
2292 self.tasks_scenequeue_done.remove(dep)
2293
2294 if tid in self.sq_buildable:
2295 self.sq_buildable.remove(tid)
2296 if tid in self.sq_running:
2297 self.sq_running.remove(tid)
2298 if self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
2299 if tid not in self.sq_buildable:
2300 self.sq_buildable.add(tid)
2301 if len(self.sqdata.sq_revdeps[tid]) == 0:
2302 self.sq_buildable.add(tid)
2303
2304 if tid in self.sqdata.outrightfail:
2305 self.sqdata.outrightfail.remove(tid)
2306 if tid in self.scenequeue_notcovered:
2307 self.scenequeue_notcovered.remove(tid)
2308 if tid in self.scenequeue_covered:
2309 self.scenequeue_covered.remove(tid)
2310 if tid in self.scenequeue_notneeded:
2311 self.scenequeue_notneeded.remove(tid)
2312
2313 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2314 self.sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
2315
2316 if tid in self.stampcache:
2317 del self.stampcache[tid]
2318
2319 if tid in self.build_stamps:
2320 del self.build_stamps[tid]
2321
2322 logger.info("Setscene task %s now valid and being rerun" % tid)
2323 self.sqdone = False
2324
2325 if changed:
2326 self.update_holdofftasks()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002327
Brad Bishop96ff1982019-08-19 13:50:42 -04002328 def scenequeue_updatecounters(self, task, fail=False):
Brad Bishop08902b02019-08-20 09:16:51 -04002329
2330 for dep in sorted(self.sqdata.sq_deps[task]):
Brad Bishop96ff1982019-08-19 13:50:42 -04002331 if fail and task in self.sqdata.sq_harddeps and dep in self.sqdata.sq_harddeps[task]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002332 logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
Brad Bishop96ff1982019-08-19 13:50:42 -04002333 self.sq_task_failoutright(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002334 continue
Brad Bishop08902b02019-08-20 09:16:51 -04002335 if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
2336 if dep not in self.sq_buildable:
2337 self.sq_buildable.add(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002338
Brad Bishop96ff1982019-08-19 13:50:42 -04002339 next = set([task])
2340 while next:
2341 new = set()
Brad Bishop08902b02019-08-20 09:16:51 -04002342 for t in sorted(next):
Brad Bishop96ff1982019-08-19 13:50:42 -04002343 self.tasks_scenequeue_done.add(t)
2344 # Look down the dependency chain for non-setscene things which this task depends on
2345 # and mark as 'done'
2346 for dep in self.rqdata.runtaskentries[t].depends:
2347 if dep in self.rqdata.runq_setscene_tids or dep in self.tasks_scenequeue_done:
2348 continue
2349 if self.rqdata.runtaskentries[dep].revdeps.issubset(self.tasks_scenequeue_done):
2350 new.add(dep)
Brad Bishop96ff1982019-08-19 13:50:42 -04002351 next = new
2352
Brad Bishop08902b02019-08-20 09:16:51 -04002353 notcovered = set(self.scenequeue_notcovered)
2354 notcovered |= self.cantskip
2355 for tid in self.scenequeue_notcovered:
2356 notcovered |= self.sqdata.sq_covered_tasks[tid]
2357 notcovered |= self.sqdata.unskippable.difference(self.rqdata.runq_setscene_tids)
2358 notcovered.intersection_update(self.tasks_scenequeue_done)
Brad Bishop96ff1982019-08-19 13:50:42 -04002359
Brad Bishop08902b02019-08-20 09:16:51 -04002360 covered = set(self.scenequeue_covered)
2361 for tid in self.scenequeue_covered:
2362 covered |= self.sqdata.sq_covered_tasks[tid]
2363 covered.difference_update(notcovered)
2364 covered.intersection_update(self.tasks_scenequeue_done)
Brad Bishop96ff1982019-08-19 13:50:42 -04002365
Brad Bishop08902b02019-08-20 09:16:51 -04002366 for tid in notcovered | covered:
2367 if len(self.rqdata.runtaskentries[tid].depends) == 0:
2368 self.setbuildable(tid)
2369 elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete):
2370 self.setbuildable(tid)
Brad Bishop96ff1982019-08-19 13:50:42 -04002371
Brad Bishop08902b02019-08-20 09:16:51 -04002372 self.tasks_covered = covered
2373 self.tasks_notcovered = notcovered
Brad Bishop96ff1982019-08-19 13:50:42 -04002374
Brad Bishop08902b02019-08-20 09:16:51 -04002375 self.update_holdofftasks()
Brad Bishop96ff1982019-08-19 13:50:42 -04002376
2377 def sq_task_completeoutright(self, task):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002378 """
2379 Mark a task as completed
2380 Look at the reverse dependencies and mark any task with
2381 completed dependencies as buildable
2382 """
2383
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002384 logger.debug(1, 'Found task %s which could be accelerated', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002385 self.scenequeue_covered.add(task)
2386 self.scenequeue_updatecounters(task)
2387
Brad Bishop96ff1982019-08-19 13:50:42 -04002388 def sq_check_taskfail(self, task):
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002389 if self.rqdata.setscenewhitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002390 realtask = task.split('_setscene')[0]
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002391 (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
2392 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002393 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2394 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2395 self.rq.state = runQueueCleanUp
2396
Brad Bishop96ff1982019-08-19 13:50:42 -04002397 def sq_task_complete(self, task):
2398 self.sq_stats.taskCompleted()
2399 bb.event.fire(sceneQueueTaskCompleted(task, self.sq_stats, self.rq), self.cfgData)
2400 self.sq_task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002401
Brad Bishop96ff1982019-08-19 13:50:42 -04002402 def sq_task_fail(self, task, result):
2403 self.sq_stats.taskFailed()
2404 bb.event.fire(sceneQueueTaskFailed(task, self.sq_stats, result, self), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002405 self.scenequeue_notcovered.add(task)
2406 self.scenequeue_updatecounters(task, True)
Brad Bishop96ff1982019-08-19 13:50:42 -04002407 self.sq_check_taskfail(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002408
Brad Bishop96ff1982019-08-19 13:50:42 -04002409 def sq_task_failoutright(self, task):
2410 self.sq_running.add(task)
2411 self.sq_buildable.add(task)
2412 self.sq_stats.taskSkipped()
2413 self.sq_stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002414 self.scenequeue_notcovered.add(task)
2415 self.scenequeue_updatecounters(task, True)
2416
Brad Bishop96ff1982019-08-19 13:50:42 -04002417 def sq_task_skip(self, task):
2418 self.sq_running.add(task)
2419 self.sq_buildable.add(task)
2420 self.sq_task_completeoutright(task)
2421 self.sq_stats.taskSkipped()
2422 self.sq_stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002423
Brad Bishop96ff1982019-08-19 13:50:42 -04002424 def sq_build_taskdepdata(self, task):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002425 def getsetscenedeps(tid):
2426 deps = set()
2427 (mc, fn, taskname, _) = split_tid_mcfn(tid)
2428 realtid = tid + "_setscene"
2429 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
2430 for (depname, idependtask) in idepends:
2431 if depname not in self.rqdata.taskData[mc].build_targets:
2432 continue
2433
2434 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2435 if depfn is None:
2436 continue
2437 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2438 deps.add(deptid)
2439 return deps
2440
2441 taskdepdata = {}
2442 next = getsetscenedeps(task)
2443 next.add(task)
2444 while next:
2445 additional = []
2446 for revdep in next:
2447 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2448 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2449 deps = getsetscenedeps(revdep)
2450 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2451 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002452 unihash = self.rqdata.runtaskentries[revdep].unihash
2453 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002454 for revdep2 in deps:
2455 if revdep2 not in taskdepdata:
2456 additional.append(revdep2)
2457 next = additional
2458
2459 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2460 return taskdepdata
2461
Brad Bishop96ff1982019-08-19 13:50:42 -04002462 def check_setscenewhitelist(self, tid):
2463 # Check task that is going to run against the whitelist
2464 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2465 # Ignore covered tasks
2466 if tid in self.tasks_covered:
2467 return False
2468 # Ignore stamped tasks
2469 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
2470 return False
2471 # Ignore noexec tasks
2472 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2473 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2474 return False
2475
2476 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2477 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2478 if tid in self.rqdata.runq_setscene_tids:
2479 msg = 'Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)
2480 else:
2481 msg = 'Task %s.%s attempted to execute unexpectedly' % (pn, taskname)
2482 logger.error(msg + '\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered))
2483 return True
2484 return False
2485
2486class SQData(object):
2487 def __init__(self):
2488 # SceneQueue dependencies
2489 self.sq_deps = {}
2490 # SceneQueue reverse dependencies
2491 self.sq_revdeps = {}
Brad Bishop96ff1982019-08-19 13:50:42 -04002492 # Injected inter-setscene task dependencies
2493 self.sq_harddeps = {}
2494 # Cache of stamp files so duplicates can't run in parallel
2495 self.stamps = {}
2496 # Setscene tasks directly depended upon by the build
2497 self.unskippable = set()
2498 # List of setscene tasks which aren't present
2499 self.outrightfail = set()
2500 # A list of normal tasks a setscene task covers
2501 self.sq_covered_tasks = {}
2502
2503def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
2504
2505 sq_revdeps = {}
2506 sq_revdeps_squash = {}
2507 sq_collated_deps = {}
2508
2509 # We need to construct a dependency graph for the setscene functions. Intermediate
2510 # dependencies between the setscene tasks only complicate the code. This code
2511 # therefore aims to collapse the huge runqueue dependency tree into a smaller one
2512 # only containing the setscene functions.
2513
2514 rqdata.init_progress_reporter.next_stage()
2515
2516 # First process the chains up to the first setscene task.
2517 endpoints = {}
2518 for tid in rqdata.runtaskentries:
2519 sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps)
2520 sq_revdeps_squash[tid] = set()
2521 if (len(sq_revdeps[tid]) == 0) and tid not in rqdata.runq_setscene_tids:
2522 #bb.warn("Added endpoint %s" % (tid))
2523 endpoints[tid] = set()
2524
2525 rqdata.init_progress_reporter.next_stage()
2526
2527 # Secondly process the chains between setscene tasks.
2528 for tid in rqdata.runq_setscene_tids:
2529 sq_collated_deps[tid] = set()
2530 #bb.warn("Added endpoint 2 %s" % (tid))
2531 for dep in rqdata.runtaskentries[tid].depends:
2532 if tid in sq_revdeps[dep]:
2533 sq_revdeps[dep].remove(tid)
2534 if dep not in endpoints:
2535 endpoints[dep] = set()
2536 #bb.warn(" Added endpoint 3 %s" % (dep))
2537 endpoints[dep].add(tid)
2538
2539 rqdata.init_progress_reporter.next_stage()
2540
2541 def process_endpoints(endpoints):
2542 newendpoints = {}
2543 for point, task in endpoints.items():
2544 tasks = set()
2545 if task:
2546 tasks |= task
2547 if sq_revdeps_squash[point]:
2548 tasks |= sq_revdeps_squash[point]
2549 if point not in rqdata.runq_setscene_tids:
2550 for t in tasks:
2551 sq_collated_deps[t].add(point)
2552 sq_revdeps_squash[point] = set()
2553 if point in rqdata.runq_setscene_tids:
2554 sq_revdeps_squash[point] = tasks
2555 tasks = set()
2556 continue
2557 for dep in rqdata.runtaskentries[point].depends:
2558 if point in sq_revdeps[dep]:
2559 sq_revdeps[dep].remove(point)
2560 if tasks:
2561 sq_revdeps_squash[dep] |= tasks
2562 if len(sq_revdeps[dep]) == 0 and dep not in rqdata.runq_setscene_tids:
2563 newendpoints[dep] = task
2564 if len(newendpoints) != 0:
2565 process_endpoints(newendpoints)
2566
2567 process_endpoints(endpoints)
2568
2569 rqdata.init_progress_reporter.next_stage()
2570
Brad Bishop08902b02019-08-20 09:16:51 -04002571 # Build a list of tasks which are "unskippable"
2572 # These are direct endpoints referenced by the build upto and including setscene tasks
Brad Bishop96ff1982019-08-19 13:50:42 -04002573 # Take the build endpoints (no revdeps) and find the sstate tasks they depend upon
2574 new = True
2575 for tid in rqdata.runtaskentries:
2576 if len(rqdata.runtaskentries[tid].revdeps) == 0:
2577 sqdata.unskippable.add(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04002578 sqdata.unskippable |= sqrq.cantskip
Brad Bishop96ff1982019-08-19 13:50:42 -04002579 while new:
2580 new = False
Brad Bishop08902b02019-08-20 09:16:51 -04002581 orig = sqdata.unskippable.copy()
2582 for tid in sorted(orig, reverse=True):
Brad Bishop96ff1982019-08-19 13:50:42 -04002583 if tid in rqdata.runq_setscene_tids:
2584 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04002585 if len(rqdata.runtaskentries[tid].depends) == 0:
2586 # These are tasks which have no setscene tasks in their chain, need to mark as directly buildable
Brad Bishop96ff1982019-08-19 13:50:42 -04002587 sqrq.setbuildable(tid)
Brad Bishop96ff1982019-08-19 13:50:42 -04002588 sqdata.unskippable |= rqdata.runtaskentries[tid].depends
Brad Bishop08902b02019-08-20 09:16:51 -04002589 if sqdata.unskippable != orig:
2590 new = True
2591
2592 sqrq.tasks_scenequeue_done |= sqdata.unskippable.difference(rqdata.runq_setscene_tids)
Brad Bishop96ff1982019-08-19 13:50:42 -04002593
2594 rqdata.init_progress_reporter.next_stage(len(rqdata.runtaskentries))
2595
2596 # Sanity check all dependencies could be changed to setscene task references
2597 for taskcounter, tid in enumerate(rqdata.runtaskentries):
2598 if tid in rqdata.runq_setscene_tids:
2599 pass
2600 elif len(sq_revdeps_squash[tid]) != 0:
2601 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
2602 else:
2603 del sq_revdeps_squash[tid]
2604 rqdata.init_progress_reporter.update(taskcounter)
2605
2606 rqdata.init_progress_reporter.next_stage()
2607
2608 # Resolve setscene inter-task dependencies
2609 # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
2610 # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
2611 for tid in rqdata.runq_setscene_tids:
2612 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2613 realtid = tid + "_setscene"
2614 idepends = rqdata.taskData[mc].taskentries[realtid].idepends
2615 sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", rqdata.dataCaches[mc], taskfn, noextra=True)
2616 for (depname, idependtask) in idepends:
2617
2618 if depname not in rqdata.taskData[mc].build_targets:
2619 continue
2620
2621 depfn = rqdata.taskData[mc].build_targets[depname][0]
2622 if depfn is None:
2623 continue
2624 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2625 if deptid not in rqdata.runtaskentries:
2626 bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
2627
2628 if not deptid in sqdata.sq_harddeps:
2629 sqdata.sq_harddeps[deptid] = set()
2630 sqdata.sq_harddeps[deptid].add(tid)
2631
2632 sq_revdeps_squash[tid].add(deptid)
2633 # Have to zero this to avoid circular dependencies
2634 sq_revdeps_squash[deptid] = set()
2635
2636 rqdata.init_progress_reporter.next_stage()
2637
2638 for task in sqdata.sq_harddeps:
2639 for dep in sqdata.sq_harddeps[task]:
2640 sq_revdeps_squash[dep].add(task)
2641
2642 rqdata.init_progress_reporter.next_stage()
2643
2644 #for tid in sq_revdeps_squash:
2645 # data = ""
2646 # for dep in sq_revdeps_squash[tid]:
2647 # data = data + "\n %s" % dep
2648 # bb.warn("Task %s_setscene: is %s " % (tid, data))
2649
2650 sqdata.sq_revdeps = sq_revdeps_squash
Brad Bishop96ff1982019-08-19 13:50:42 -04002651 sqdata.sq_covered_tasks = sq_collated_deps
2652
2653 # Build reverse version of revdeps to populate deps structure
2654 for tid in sqdata.sq_revdeps:
2655 sqdata.sq_deps[tid] = set()
2656 for tid in sqdata.sq_revdeps:
2657 for dep in sqdata.sq_revdeps[tid]:
2658 sqdata.sq_deps[dep].add(tid)
2659
2660 rqdata.init_progress_reporter.next_stage()
2661
2662 multiconfigs = set()
2663 for tid in sqdata.sq_revdeps:
2664 multiconfigs.add(mc_from_tid(tid))
2665 if len(sqdata.sq_revdeps[tid]) == 0:
2666 sqrq.sq_buildable.add(tid)
2667
2668 rqdata.init_progress_reporter.finish()
2669
2670 if rq.hashvalidate:
2671 noexec = []
2672 stamppresent = []
2673 tocheck = set()
2674
Brad Bishop08902b02019-08-20 09:16:51 -04002675 for tid in sorted(sqdata.sq_revdeps):
Brad Bishop96ff1982019-08-19 13:50:42 -04002676 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2677
2678 taskdep = rqdata.dataCaches[mc].task_deps[taskfn]
2679
2680 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2681 noexec.append(tid)
2682 sqrq.sq_task_skip(tid)
2683 bb.build.make_stamp(taskname + "_setscene", rqdata.dataCaches[mc], taskfn)
2684 continue
2685
2686 if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache):
2687 logger.debug(2, 'Setscene stamp current for task %s', tid)
2688 stamppresent.append(tid)
2689 sqrq.sq_task_skip(tid)
2690 continue
2691
2692 if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache):
2693 logger.debug(2, 'Normal stamp current for task %s', tid)
2694 stamppresent.append(tid)
2695 sqrq.sq_task_skip(tid)
2696 continue
2697
2698 tocheck.add(tid)
2699
2700 valid = rq.validate_hashes(tocheck, cooker.data, len(stamppresent), False)
2701
2702 valid_new = stamppresent
2703 for v in valid:
2704 valid_new.append(v)
2705
2706 hashes = {}
2707 for mc in sorted(multiconfigs):
Brad Bishop08902b02019-08-20 09:16:51 -04002708 for tid in sorted(sqdata.sq_revdeps):
Brad Bishop96ff1982019-08-19 13:50:42 -04002709 if mc_from_tid(tid) != mc:
2710 continue
2711 if tid not in valid_new and tid not in noexec and tid not in sqrq.scenequeue_notcovered:
2712 sqdata.outrightfail.add(tid)
2713
2714 h = pending_hash_index(tid, rqdata)
2715 if h not in hashes:
2716 hashes[h] = tid
2717 else:
2718 sqrq.sq_deferred[tid] = hashes[h]
2719 bb.warn("Deferring %s after %s" % (tid, hashes[h]))
2720
2721
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002722class TaskFailure(Exception):
2723 """
2724 Exception raised when a task in a runqueue fails
2725 """
2726 def __init__(self, x):
2727 self.args = x
2728
2729
2730class runQueueExitWait(bb.event.Event):
2731 """
2732 Event when waiting for task processes to exit
2733 """
2734
2735 def __init__(self, remain):
2736 self.remain = remain
2737 self.message = "Waiting for %s active tasks to finish" % remain
2738 bb.event.Event.__init__(self)
2739
2740class runQueueEvent(bb.event.Event):
2741 """
2742 Base runQueue event class
2743 """
2744 def __init__(self, task, stats, rq):
2745 self.taskid = task
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002746 self.taskstring = task
2747 self.taskname = taskname_from_tid(task)
2748 self.taskfile = fn_from_tid(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002749 self.taskhash = rq.rqdata.get_task_hash(task)
2750 self.stats = stats.copy()
2751 bb.event.Event.__init__(self)
2752
2753class sceneQueueEvent(runQueueEvent):
2754 """
2755 Base sceneQueue event class
2756 """
2757 def __init__(self, task, stats, rq, noexec=False):
2758 runQueueEvent.__init__(self, task, stats, rq)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002759 self.taskstring = task + "_setscene"
2760 self.taskname = taskname_from_tid(task) + "_setscene"
2761 self.taskfile = fn_from_tid(task)
2762 self.taskhash = rq.rqdata.get_task_hash(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002763
2764class runQueueTaskStarted(runQueueEvent):
2765 """
2766 Event notifying a task was started
2767 """
2768 def __init__(self, task, stats, rq, noexec=False):
2769 runQueueEvent.__init__(self, task, stats, rq)
2770 self.noexec = noexec
2771
2772class sceneQueueTaskStarted(sceneQueueEvent):
2773 """
2774 Event notifying a setscene task was started
2775 """
2776 def __init__(self, task, stats, rq, noexec=False):
2777 sceneQueueEvent.__init__(self, task, stats, rq)
2778 self.noexec = noexec
2779
2780class runQueueTaskFailed(runQueueEvent):
2781 """
2782 Event notifying a task failed
2783 """
2784 def __init__(self, task, stats, exitcode, rq):
2785 runQueueEvent.__init__(self, task, stats, rq)
2786 self.exitcode = exitcode
2787
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002788 def __str__(self):
2789 return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
2790
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002791class sceneQueueTaskFailed(sceneQueueEvent):
2792 """
2793 Event notifying a setscene task failed
2794 """
2795 def __init__(self, task, stats, exitcode, rq):
2796 sceneQueueEvent.__init__(self, task, stats, rq)
2797 self.exitcode = exitcode
2798
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002799 def __str__(self):
2800 return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
2801
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002802class sceneQueueComplete(sceneQueueEvent):
2803 """
2804 Event when all the sceneQueue tasks are complete
2805 """
2806 def __init__(self, stats, rq):
2807 self.stats = stats.copy()
2808 bb.event.Event.__init__(self)
2809
2810class runQueueTaskCompleted(runQueueEvent):
2811 """
2812 Event notifying a task completed
2813 """
2814
2815class sceneQueueTaskCompleted(sceneQueueEvent):
2816 """
2817 Event notifying a setscene task completed
2818 """
2819
2820class runQueueTaskSkipped(runQueueEvent):
2821 """
2822 Event notifying a task was skipped
2823 """
2824 def __init__(self, task, stats, rq, reason):
2825 runQueueEvent.__init__(self, task, stats, rq)
2826 self.reason = reason
2827
Brad Bishop08902b02019-08-20 09:16:51 -04002828class taskUniHashUpdate(bb.event.Event):
2829 """
2830 Base runQueue event class
2831 """
2832 def __init__(self, task, unihash):
2833 self.taskid = task
2834 self.unihash = unihash
2835 bb.event.Event.__init__(self)
2836
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002837class runQueuePipe():
2838 """
2839 Abstraction for a pipe between a worker thread and the server
2840 """
2841 def __init__(self, pipein, pipeout, d, rq, rqexec):
2842 self.input = pipein
2843 if pipeout:
2844 pipeout.close()
2845 bb.utils.nonblockingfd(self.input)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002846 self.queue = b""
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002847 self.d = d
2848 self.rq = rq
2849 self.rqexec = rqexec
2850
2851 def setrunqueueexec(self, rqexec):
2852 self.rqexec = rqexec
2853
2854 def read(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002855 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
2856 for worker in workers.values():
2857 worker.process.poll()
2858 if worker.process.returncode is not None and not self.rq.teardown:
2859 bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
2860 self.rq.finish_runqueue(True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002861
2862 start = len(self.queue)
2863 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002864 self.queue = self.queue + (self.input.read(102400) or b"")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002865 except (OSError, IOError) as e:
2866 if e.errno != errno.EAGAIN:
2867 raise
2868 end = len(self.queue)
2869 found = True
2870 while found and len(self.queue):
2871 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002872 index = self.queue.find(b"</event>")
2873 while index != -1 and self.queue.startswith(b"<event>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002874 try:
2875 event = pickle.loads(self.queue[7:index])
2876 except ValueError as e:
2877 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
2878 bb.event.fire_from_worker(event, self.d)
Brad Bishop08902b02019-08-20 09:16:51 -04002879 if isinstance(event, taskUniHashUpdate):
2880 self.rqexec.updated_taskhash_queue.append((event.taskid, event.unihash))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002881 found = True
2882 self.queue = self.queue[index+8:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002883 index = self.queue.find(b"</event>")
2884 index = self.queue.find(b"</exitcode>")
2885 while index != -1 and self.queue.startswith(b"<exitcode>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002886 try:
2887 task, status = pickle.loads(self.queue[10:index])
2888 except ValueError as e:
2889 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
2890 self.rqexec.runqueue_process_waitpid(task, status)
2891 found = True
2892 self.queue = self.queue[index+11:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002893 index = self.queue.find(b"</exitcode>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002894 return (end > start)
2895
2896 def close(self):
2897 while self.read():
2898 continue
2899 if len(self.queue) > 0:
2900 print("Warning, worker left partial message: %s" % self.queue)
2901 self.input.close()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002902
2903def get_setscene_enforce_whitelist(d):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002904 if d.getVar('BB_SETSCENE_ENFORCE') != '1':
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002905 return None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002906 whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002907 outlist = []
2908 for item in whitelist[:]:
2909 if item.startswith('%:'):
2910 for target in sys.argv[1:]:
2911 if not target.startswith('-'):
2912 outlist.append(target.split(':')[0] + ':' + item.split(':')[1])
2913 else:
2914 outlist.append(item)
2915 return outlist
2916
2917def check_setscene_enforce_whitelist(pn, taskname, whitelist):
2918 import fnmatch
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002919 if whitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002920 item = '%s:%s' % (pn, taskname)
2921 for whitelist_item in whitelist:
2922 if fnmatch.fnmatch(item, whitelist_item):
2923 return True
2924 return False
2925 return True