blob: 02a261e30c385210ba6d7c52d51dc1341b62a886 [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001"""
2BitBake 'RunQueue' implementation
3
4Handles preparation and execution of a queue of tasks
5"""
6
7# Copyright (C) 2006-2007 Richard Purdie
8#
Brad Bishopc342db32019-05-15 21:57:59 -04009# SPDX-License-Identifier: GPL-2.0-only
Patrick Williamsc124f4f2015-09-15 14:41:29 -050010#
Patrick Williamsc124f4f2015-09-15 14:41:29 -050011
12import copy
13import os
14import sys
Patrick Williamsc124f4f2015-09-15 14:41:29 -050015import stat
Patrick Williamsc124f4f2015-09-15 14:41:29 -050016import errno
17import logging
18import re
19import bb
Andrew Geissler82c905d2020-04-13 13:39:40 -050020from bb import msg, event
Patrick Williamsc124f4f2015-09-15 14:41:29 -050021from bb import monitordisk
22import subprocess
Patrick Williamsc0f7c042017-02-23 20:41:17 -060023import pickle
Brad Bishop6e60e8b2018-02-01 10:27:11 -050024from multiprocessing import Process
Brad Bishop19323692019-04-05 15:28:33 -040025import shlex
Brad Bishop96ff1982019-08-19 13:50:42 -040026import pprint
Patrick Williamsc124f4f2015-09-15 14:41:29 -050027
28bblogger = logging.getLogger("BitBake")
29logger = logging.getLogger("BitBake.RunQueue")
Andrew Geissler82c905d2020-04-13 13:39:40 -050030hashequiv_logger = logging.getLogger("BitBake.RunQueue.HashEquiv")
Patrick Williamsc124f4f2015-09-15 14:41:29 -050031
Brad Bishop19323692019-04-05 15:28:33 -040032__find_sha256__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{64}(?![a-z0-9])' )
Patrick Williamsc124f4f2015-09-15 14:41:29 -050033
Patrick Williamsc0f7c042017-02-23 20:41:17 -060034def fn_from_tid(tid):
35 return tid.rsplit(":", 1)[0]
36
37def taskname_from_tid(tid):
38 return tid.rsplit(":", 1)[1]
39
Andrew Geissler99467da2019-02-25 18:54:23 -060040def mc_from_tid(tid):
Brad Bishop15ae2502019-06-18 21:44:24 -040041 if tid.startswith('mc:'):
Andrew Geissler99467da2019-02-25 18:54:23 -060042 return tid.split(':')[1]
43 return ""
44
Patrick Williamsc0f7c042017-02-23 20:41:17 -060045def split_tid(tid):
46 (mc, fn, taskname, _) = split_tid_mcfn(tid)
47 return (mc, fn, taskname)
48
Andrew Geissler5a43b432020-06-13 10:46:56 -050049def split_mc(n):
50 if n.startswith("mc:"):
51 _, mc, n = n.split(":", 2)
52 return (mc, n)
53 return ('', n)
54
Patrick Williamsc0f7c042017-02-23 20:41:17 -060055def split_tid_mcfn(tid):
Brad Bishop15ae2502019-06-18 21:44:24 -040056 if tid.startswith('mc:'):
Patrick Williamsc0f7c042017-02-23 20:41:17 -060057 elems = tid.split(':')
58 mc = elems[1]
59 fn = ":".join(elems[2:-1])
60 taskname = elems[-1]
Brad Bishop15ae2502019-06-18 21:44:24 -040061 mcfn = "mc:" + mc + ":" + fn
Patrick Williamsc0f7c042017-02-23 20:41:17 -060062 else:
63 tid = tid.rsplit(":", 1)
64 mc = ""
65 fn = tid[0]
66 taskname = tid[1]
67 mcfn = fn
68
69 return (mc, fn, taskname, mcfn)
70
71def build_tid(mc, fn, taskname):
72 if mc:
Brad Bishop15ae2502019-06-18 21:44:24 -040073 return "mc:" + mc + ":" + fn + ":" + taskname
Patrick Williamsc0f7c042017-02-23 20:41:17 -060074 return fn + ":" + taskname
75
Brad Bishop96ff1982019-08-19 13:50:42 -040076# Index used to pair up potentially matching multiconfig tasks
77# We match on PN, taskname and hash being equal
78def pending_hash_index(tid, rqdata):
79 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
80 pn = rqdata.dataCaches[mc].pkg_fn[taskfn]
Brad Bishop00e122a2019-10-05 11:10:57 -040081 h = rqdata.runtaskentries[tid].unihash
Brad Bishop96ff1982019-08-19 13:50:42 -040082 return pn + ":" + "taskname" + h
83
Patrick Williamsc124f4f2015-09-15 14:41:29 -050084class RunQueueStats:
85 """
86 Holds statistics on the tasks handled by the associated runQueue
87 """
88 def __init__(self, total):
89 self.completed = 0
90 self.skipped = 0
91 self.failed = 0
92 self.active = 0
93 self.total = total
94
95 def copy(self):
96 obj = self.__class__(self.total)
97 obj.__dict__.update(self.__dict__)
98 return obj
99
100 def taskFailed(self):
101 self.active = self.active - 1
102 self.failed = self.failed + 1
103
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800104 def taskCompleted(self):
105 self.active = self.active - 1
106 self.completed = self.completed + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500107
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800108 def taskSkipped(self):
109 self.active = self.active + 1
110 self.skipped = self.skipped + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500111
112 def taskActive(self):
113 self.active = self.active + 1
114
115# These values indicate the next step due to be run in the
116# runQueue state machine
117runQueuePrepare = 2
118runQueueSceneInit = 3
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500119runQueueRunning = 6
120runQueueFailed = 7
121runQueueCleanUp = 8
122runQueueComplete = 9
123
124class RunQueueScheduler(object):
125 """
126 Control the order tasks are scheduled in.
127 """
128 name = "basic"
129
130 def __init__(self, runqueue, rqdata):
131 """
132 The default scheduler just returns the first buildable task (the
133 priority map is sorted by task number)
134 """
135 self.rq = runqueue
136 self.rqdata = rqdata
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600137 self.numTasks = len(self.rqdata.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500138
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600139 self.prio_map = [self.rqdata.runtaskentries.keys()]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500140
Brad Bishop08902b02019-08-20 09:16:51 -0400141 self.buildable = set()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800142 self.skip_maxthread = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500143 self.stamps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600144 for tid in self.rqdata.runtaskentries:
145 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
146 self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
147 if tid in self.rq.runq_buildable:
148 self.buildable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500149
150 self.rev_prio_map = None
151
152 def next_buildable_task(self):
153 """
154 Return the id of the first task we find that is buildable
155 """
Andrew Geissler82c905d2020-04-13 13:39:40 -0500156 # Once tasks are running we don't need to worry about them again
157 self.buildable.difference_update(self.rq.runq_running)
Brad Bishop08902b02019-08-20 09:16:51 -0400158 buildable = set(self.buildable)
Brad Bishop08902b02019-08-20 09:16:51 -0400159 buildable.difference_update(self.rq.holdoff_tasks)
160 buildable.intersection_update(self.rq.tasks_covered | self.rq.tasks_notcovered)
Brad Bishop96ff1982019-08-19 13:50:42 -0400161 if not buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500162 return None
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800163
164 # Filter out tasks that have a max number of threads that have been exceeded
165 skip_buildable = {}
166 for running in self.rq.runq_running.difference(self.rq.runq_complete):
167 rtaskname = taskname_from_tid(running)
168 if rtaskname not in self.skip_maxthread:
169 self.skip_maxthread[rtaskname] = self.rq.cfgData.getVarFlag(rtaskname, "number_threads")
170 if not self.skip_maxthread[rtaskname]:
171 continue
172 if rtaskname in skip_buildable:
173 skip_buildable[rtaskname] += 1
174 else:
175 skip_buildable[rtaskname] = 1
176
Brad Bishop96ff1982019-08-19 13:50:42 -0400177 if len(buildable) == 1:
Brad Bishop08902b02019-08-20 09:16:51 -0400178 tid = buildable.pop()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800179 taskname = taskname_from_tid(tid)
180 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
181 return None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600182 stamp = self.stamps[tid]
183 if stamp not in self.rq.build_stamps.values():
184 return tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500185
186 if not self.rev_prio_map:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600187 self.rev_prio_map = {}
188 for tid in self.rqdata.runtaskentries:
189 self.rev_prio_map[tid] = self.prio_map.index(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500190
191 best = None
192 bestprio = None
Brad Bishop96ff1982019-08-19 13:50:42 -0400193 for tid in buildable:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800194 taskname = taskname_from_tid(tid)
195 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
196 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600197 prio = self.rev_prio_map[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500198 if bestprio is None or bestprio > prio:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600199 stamp = self.stamps[tid]
200 if stamp in self.rq.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500201 continue
202 bestprio = prio
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600203 best = tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500204
205 return best
206
207 def next(self):
208 """
209 Return the id of the task we should build next
210 """
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800211 if self.rq.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500212 return self.next_buildable_task()
213
Brad Bishop316dfdd2018-06-25 12:45:53 -0400214 def newbuildable(self, task):
Brad Bishop08902b02019-08-20 09:16:51 -0400215 self.buildable.add(task)
216
217 def removebuildable(self, task):
218 self.buildable.remove(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500219
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500220 def describe_task(self, taskid):
221 result = 'ID %s' % taskid
222 if self.rev_prio_map:
223 result = result + (' pri %d' % self.rev_prio_map[taskid])
224 return result
225
226 def dump_prio(self, comment):
227 bb.debug(3, '%s (most important first):\n%s' %
228 (comment,
229 '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
230 index, taskid in enumerate(self.prio_map)])))
231
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500232class RunQueueSchedulerSpeed(RunQueueScheduler):
233 """
234 A scheduler optimised for speed. The priority map is sorted by task weight,
235 heavier weighted tasks (tasks needed by the most other tasks) are run first.
236 """
237 name = "speed"
238
239 def __init__(self, runqueue, rqdata):
240 """
241 The priority map is sorted by task weight.
242 """
243 RunQueueScheduler.__init__(self, runqueue, rqdata)
244
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600245 weights = {}
246 for tid in self.rqdata.runtaskentries:
247 weight = self.rqdata.runtaskentries[tid].weight
248 if not weight in weights:
249 weights[weight] = []
250 weights[weight].append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500251
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600252 self.prio_map = []
253 for weight in sorted(weights):
254 for w in weights[weight]:
255 self.prio_map.append(w)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500256
257 self.prio_map.reverse()
258
259class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
260 """
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500261 A scheduler optimised to complete .bb files as quickly as possible. The
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500262 priority map is sorted by task weight, but then reordered so once a given
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500263 .bb file starts to build, it's completed as quickly as possible by
264 running all tasks related to the same .bb file one after the after.
265 This works well where disk space is at a premium and classes like OE's
266 rm_work are in force.
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500267 """
268 name = "completion"
269
270 def __init__(self, runqueue, rqdata):
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500271 super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500272
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500273 # Extract list of tasks for each recipe, with tasks sorted
274 # ascending from "must run first" (typically do_fetch) to
275 # "runs last" (do_build). The speed scheduler prioritizes
276 # tasks that must run first before the ones that run later;
277 # this is what we depend on here.
278 task_lists = {}
279 for taskid in self.prio_map:
280 fn, taskname = taskid.rsplit(':', 1)
281 task_lists.setdefault(fn, []).append(taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500282
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500283 # Now unify the different task lists. The strategy is that
284 # common tasks get skipped and new ones get inserted after the
285 # preceeding common one(s) as they are found. Because task
286 # lists should differ only by their number of tasks, but not
287 # the ordering of the common tasks, this should result in a
288 # deterministic result that is a superset of the individual
289 # task ordering.
290 all_tasks = []
291 for recipe, new_tasks in task_lists.items():
292 index = 0
293 old_task = all_tasks[index] if index < len(all_tasks) else None
294 for new_task in new_tasks:
295 if old_task == new_task:
296 # Common task, skip it. This is the fast-path which
297 # avoids a full search.
298 index += 1
299 old_task = all_tasks[index] if index < len(all_tasks) else None
300 else:
301 try:
302 index = all_tasks.index(new_task)
303 # Already present, just not at the current
304 # place. We re-synchronized by changing the
305 # index so that it matches again. Now
306 # move on to the next existing task.
307 index += 1
308 old_task = all_tasks[index] if index < len(all_tasks) else None
309 except ValueError:
310 # Not present. Insert before old_task, which
311 # remains the same (but gets shifted back).
312 all_tasks.insert(index, new_task)
313 index += 1
314 bb.debug(3, 'merged task list: %s' % all_tasks)
315
316 # Now reverse the order so that tasks that finish the work on one
317 # recipe are considered more imporant (= come first). The ordering
318 # is now so that do_build is most important.
319 all_tasks.reverse()
320
321 # Group tasks of the same kind before tasks of less important
322 # kinds at the head of the queue (because earlier = lower
323 # priority number = runs earlier), while preserving the
324 # ordering by recipe. If recipe foo is more important than
325 # bar, then the goal is to work on foo's do_populate_sysroot
326 # before bar's do_populate_sysroot and on the more important
327 # tasks of foo before any of the less important tasks in any
328 # other recipe (if those other recipes are more important than
329 # foo).
330 #
331 # All of this only applies when tasks are runable. Explicit
332 # dependencies still override this ordering by priority.
333 #
334 # Here's an example why this priority re-ordering helps with
335 # minimizing disk usage. Consider a recipe foo with a higher
336 # priority than bar where foo DEPENDS on bar. Then the
337 # implicit rule (from base.bbclass) is that foo's do_configure
338 # depends on bar's do_populate_sysroot. This ensures that
339 # bar's do_populate_sysroot gets done first. Normally the
340 # tasks from foo would continue to run once that is done, and
341 # bar only gets completed and cleaned up later. By ordering
342 # bar's task that depend on bar's do_populate_sysroot before foo's
343 # do_configure, that problem gets avoided.
344 task_index = 0
345 self.dump_prio('original priorities')
346 for task in all_tasks:
347 for index in range(task_index, self.numTasks):
348 taskid = self.prio_map[index]
349 taskname = taskid.rsplit(':', 1)[1]
350 if taskname == task:
351 del self.prio_map[index]
352 self.prio_map.insert(task_index, taskid)
353 task_index += 1
354 self.dump_prio('completion priorities')
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500355
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600356class RunTaskEntry(object):
357 def __init__(self):
358 self.depends = set()
359 self.revdeps = set()
360 self.hash = None
Brad Bishop19323692019-04-05 15:28:33 -0400361 self.unihash = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600362 self.task = None
363 self.weight = 1
364
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500365class RunQueueData:
366 """
367 BitBake Run Queue implementation
368 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600369 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500370 self.cooker = cooker
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600371 self.dataCaches = dataCaches
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500372 self.taskData = taskData
373 self.targets = targets
374 self.rq = rq
375 self.warn_multi_bb = False
376
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500377 self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
378 self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600379 self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
380 self.setscenewhitelist_checked = False
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500381 self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600382 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500383
384 self.reset()
385
386 def reset(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600387 self.runtaskentries = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500388
389 def runq_depends_names(self, ids):
390 import re
391 ret = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600392 for id in ids:
393 nam = os.path.basename(id)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500394 nam = re.sub("_[^,]*,", ",", nam)
395 ret.extend([nam])
396 return ret
397
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600398 def get_task_hash(self, tid):
399 return self.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500400
Brad Bishop19323692019-04-05 15:28:33 -0400401 def get_task_unihash(self, tid):
402 return self.runtaskentries[tid].unihash
403
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600404 def get_user_idstring(self, tid, task_name_suffix = ""):
405 return tid + task_name_suffix
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500406
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500407 def get_short_user_idstring(self, task, task_name_suffix = ""):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500408 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
409 pn = self.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600410 taskname = taskname_from_tid(task) + task_name_suffix
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500411 return "%s:%s" % (pn, taskname)
412
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500413 def circular_depchains_handler(self, tasks):
414 """
415 Some tasks aren't buildable, likely due to circular dependency issues.
416 Identify the circular dependencies and print them in a user readable format.
417 """
418 from copy import deepcopy
419
420 valid_chains = []
421 explored_deps = {}
422 msgs = []
423
Andrew Geissler99467da2019-02-25 18:54:23 -0600424 class TooManyLoops(Exception):
425 pass
426
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500427 def chain_reorder(chain):
428 """
429 Reorder a dependency chain so the lowest task id is first
430 """
431 lowest = 0
432 new_chain = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600433 for entry in range(len(chain)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500434 if chain[entry] < chain[lowest]:
435 lowest = entry
436 new_chain.extend(chain[lowest:])
437 new_chain.extend(chain[:lowest])
438 return new_chain
439
440 def chain_compare_equal(chain1, chain2):
441 """
442 Compare two dependency chains and see if they're the same
443 """
444 if len(chain1) != len(chain2):
445 return False
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600446 for index in range(len(chain1)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500447 if chain1[index] != chain2[index]:
448 return False
449 return True
450
451 def chain_array_contains(chain, chain_array):
452 """
453 Return True if chain_array contains chain
454 """
455 for ch in chain_array:
456 if chain_compare_equal(ch, chain):
457 return True
458 return False
459
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600460 def find_chains(tid, prev_chain):
461 prev_chain.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500462 total_deps = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600463 total_deps.extend(self.runtaskentries[tid].revdeps)
464 for revdep in self.runtaskentries[tid].revdeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500465 if revdep in prev_chain:
466 idx = prev_chain.index(revdep)
467 # To prevent duplicates, reorder the chain to start with the lowest taskid
468 # and search through an array of those we've already printed
469 chain = prev_chain[idx:]
470 new_chain = chain_reorder(chain)
471 if not chain_array_contains(new_chain, valid_chains):
472 valid_chains.append(new_chain)
473 msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
474 for dep in new_chain:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600475 msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500476 msgs.append("\n")
477 if len(valid_chains) > 10:
478 msgs.append("Aborted dependency loops search after 10 matches.\n")
Andrew Geissler99467da2019-02-25 18:54:23 -0600479 raise TooManyLoops
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500480 continue
481 scan = False
482 if revdep not in explored_deps:
483 scan = True
484 elif revdep in explored_deps[revdep]:
485 scan = True
486 else:
487 for dep in prev_chain:
488 if dep in explored_deps[revdep]:
489 scan = True
490 if scan:
491 find_chains(revdep, copy.deepcopy(prev_chain))
492 for dep in explored_deps[revdep]:
493 if dep not in total_deps:
494 total_deps.append(dep)
495
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600496 explored_deps[tid] = total_deps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500497
Andrew Geissler99467da2019-02-25 18:54:23 -0600498 try:
499 for task in tasks:
500 find_chains(task, [])
501 except TooManyLoops:
502 pass
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500503
504 return msgs
505
506 def calculate_task_weights(self, endpoints):
507 """
508 Calculate a number representing the "weight" of each task. Heavier weighted tasks
509 have more dependencies and hence should be executed sooner for maximum speed.
510
511 This function also sanity checks the task list finding tasks that are not
512 possible to execute due to circular dependencies.
513 """
514
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600515 numTasks = len(self.runtaskentries)
516 weight = {}
517 deps_left = {}
518 task_done = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500519
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600520 for tid in self.runtaskentries:
521 task_done[tid] = False
522 weight[tid] = 1
523 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500524
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600525 for tid in endpoints:
526 weight[tid] = 10
527 task_done[tid] = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500528
529 while True:
530 next_points = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600531 for tid in endpoints:
532 for revdep in self.runtaskentries[tid].depends:
533 weight[revdep] = weight[revdep] + weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500534 deps_left[revdep] = deps_left[revdep] - 1
535 if deps_left[revdep] == 0:
536 next_points.append(revdep)
537 task_done[revdep] = True
538 endpoints = next_points
539 if len(next_points) == 0:
540 break
541
542 # Circular dependency sanity check
543 problem_tasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600544 for tid in self.runtaskentries:
545 if task_done[tid] is False or deps_left[tid] != 0:
546 problem_tasks.append(tid)
547 logger.debug(2, "Task %s is not buildable", tid)
548 logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
549 self.runtaskentries[tid].weight = weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500550
551 if problem_tasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600552 message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500553 message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
554 message = message + "Identifying dependency loops (this may take a short while)...\n"
555 logger.error(message)
556
557 msgs = self.circular_depchains_handler(problem_tasks)
558
559 message = "\n"
560 for msg in msgs:
561 message = message + msg
562 bb.msg.fatal("RunQueue", message)
563
564 return weight
565
566 def prepare(self):
567 """
568 Turn a set of taskData into a RunQueue and compute data needed
569 to optimise the execution order.
570 """
571
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600572 runq_build = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500573 recursivetasks = {}
574 recursiveitasks = {}
575 recursivetasksselfref = set()
576
577 taskData = self.taskData
578
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600579 found = False
580 for mc in self.taskData:
581 if len(taskData[mc].taskentries) > 0:
582 found = True
583 break
584 if not found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500585 # Nothing to do
586 return 0
587
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600588 self.init_progress_reporter.start()
589 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500590
591 # Step A - Work out a list of tasks to run
592 #
593 # Taskdata gives us a list of possible providers for every build and run
594 # target ordered by priority. It also gives information on each of those
595 # providers.
596 #
597 # To create the actual list of tasks to execute we fix the list of
598 # providers and then resolve the dependencies into task IDs. This
599 # process is repeated for each type of dependency (tdepends, deptask,
600 # rdeptast, recrdeptask, idepends).
601
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600602 def add_build_dependencies(depids, tasknames, depends, mc):
603 for depname in depids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500604 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600605 if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500606 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600607 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500608 if depdata is None:
609 continue
610 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600611 t = depdata + ":" + taskname
612 if t in taskData[mc].taskentries:
613 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500614
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600615 def add_runtime_dependencies(depids, tasknames, depends, mc):
616 for depname in depids:
617 if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500618 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600619 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500620 if depdata is None:
621 continue
622 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600623 t = depdata + ":" + taskname
624 if t in taskData[mc].taskentries:
625 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500626
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800627 def add_mc_dependencies(mc, tid):
628 mcdeps = taskData[mc].get_mcdepends()
629 for dep in mcdeps:
630 mcdependency = dep.split(':')
631 pn = mcdependency[3]
632 frommc = mcdependency[1]
633 mcdep = mcdependency[2]
634 deptask = mcdependency[4]
635 if mc == frommc:
636 fn = taskData[mcdep].build_targets[pn][0]
637 newdep = '%s:%s' % (fn,deptask)
638 taskData[mc].taskentries[tid].tdepends.append(newdep)
639
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600640 for mc in taskData:
641 for tid in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500642
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600643 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
644 #runtid = build_tid(mc, fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500645
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600646 #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
647
648 depends = set()
649 task_deps = self.dataCaches[mc].task_deps[taskfn]
650
651 self.runtaskentries[tid] = RunTaskEntry()
652
653 if fn in taskData[mc].failed_fns:
654 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500655
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800656 # We add multiconfig dependencies before processing internal task deps (tdepends)
657 if 'mcdepends' in task_deps and taskname in task_deps['mcdepends']:
658 add_mc_dependencies(mc, tid)
659
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500660 # Resolve task internal dependencies
661 #
662 # e.g. addtask before X after Y
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600663 for t in taskData[mc].taskentries[tid].tdepends:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800664 (depmc, depfn, deptaskname, _) = split_tid_mcfn(t)
665 depends.add(build_tid(depmc, depfn, deptaskname))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500666
667 # Resolve 'deptask' dependencies
668 #
669 # e.g. do_sometask[deptask] = "do_someothertask"
670 # (makes sure sometask runs after someothertask of all DEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600671 if 'deptask' in task_deps and taskname in task_deps['deptask']:
672 tasknames = task_deps['deptask'][taskname].split()
673 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500674
675 # Resolve 'rdeptask' dependencies
676 #
677 # e.g. do_sometask[rdeptask] = "do_someothertask"
678 # (makes sure sometask runs after someothertask of all RDEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600679 if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
680 tasknames = task_deps['rdeptask'][taskname].split()
681 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500682
683 # Resolve inter-task dependencies
684 #
685 # e.g. do_sometask[depends] = "targetname:do_someothertask"
686 # (makes sure sometask runs after targetname's someothertask)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600687 idepends = taskData[mc].taskentries[tid].idepends
688 for (depname, idependtask) in idepends:
689 if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500690 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600691 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500692 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600693 t = depdata + ":" + idependtask
694 depends.add(t)
695 if t not in taskData[mc].taskentries:
696 bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
697 irdepends = taskData[mc].taskentries[tid].irdepends
698 for (depname, idependtask) in irdepends:
699 if depname in taskData[mc].run_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500700 # Won't be in run_targets if ASSUME_PROVIDED
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500701 if not taskData[mc].run_targets[depname]:
702 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600703 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500704 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600705 t = depdata + ":" + idependtask
706 depends.add(t)
707 if t not in taskData[mc].taskentries:
708 bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500709
710 # Resolve recursive 'recrdeptask' dependencies (Part A)
711 #
712 # e.g. do_sometask[recrdeptask] = "do_someothertask"
713 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
714 # We cover the recursive part of the dependencies below
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600715 if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
716 tasknames = task_deps['recrdeptask'][taskname].split()
717 recursivetasks[tid] = tasknames
718 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
719 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
720 if taskname in tasknames:
721 recursivetasksselfref.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500722
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600723 if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
724 recursiveitasks[tid] = []
725 for t in task_deps['recideptask'][taskname].split():
726 newdep = build_tid(mc, fn, t)
727 recursiveitasks[tid].append(newdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500728
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600729 self.runtaskentries[tid].depends = depends
Brad Bishop316dfdd2018-06-25 12:45:53 -0400730 # Remove all self references
731 self.runtaskentries[tid].depends.discard(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500732
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600733 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500734
Brad Bishop316dfdd2018-06-25 12:45:53 -0400735 self.init_progress_reporter.next_stage()
736
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500737 # Resolve recursive 'recrdeptask' dependencies (Part B)
738 #
739 # e.g. do_sometask[recrdeptask] = "do_someothertask"
740 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600741 # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600742
Brad Bishop316dfdd2018-06-25 12:45:53 -0400743 # Generating/interating recursive lists of dependencies is painful and potentially slow
744 # Precompute recursive task dependencies here by:
745 # a) create a temp list of reverse dependencies (revdeps)
746 # b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0)
747 # c) combine the total list of dependencies in cumulativedeps
748 # d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500749
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500750
Brad Bishop316dfdd2018-06-25 12:45:53 -0400751 revdeps = {}
752 deps = {}
753 cumulativedeps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600754 for tid in self.runtaskentries:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400755 deps[tid] = set(self.runtaskentries[tid].depends)
756 revdeps[tid] = set()
757 cumulativedeps[tid] = set()
758 # Generate a temp list of reverse dependencies
759 for tid in self.runtaskentries:
760 for dep in self.runtaskentries[tid].depends:
761 revdeps[dep].add(tid)
762 # Find the dependency chain endpoints
763 endpoints = set()
764 for tid in self.runtaskentries:
765 if len(deps[tid]) == 0:
766 endpoints.add(tid)
767 # Iterate the chains collating dependencies
768 while endpoints:
769 next = set()
770 for tid in endpoints:
771 for dep in revdeps[tid]:
772 cumulativedeps[dep].add(fn_from_tid(tid))
773 cumulativedeps[dep].update(cumulativedeps[tid])
774 if tid in deps[dep]:
775 deps[dep].remove(tid)
776 if len(deps[dep]) == 0:
777 next.add(dep)
778 endpoints = next
779 #for tid in deps:
780 # if len(deps[tid]) != 0:
781 # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
782
783 # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
784 # resolve these recursively until we aren't adding any further extra dependencies
785 extradeps = True
786 while extradeps:
787 extradeps = 0
788 for tid in recursivetasks:
789 tasknames = recursivetasks[tid]
790
791 totaldeps = set(self.runtaskentries[tid].depends)
792 if tid in recursiveitasks:
793 totaldeps.update(recursiveitasks[tid])
794 for dep in recursiveitasks[tid]:
795 if dep not in self.runtaskentries:
796 continue
797 totaldeps.update(self.runtaskentries[dep].depends)
798
799 deps = set()
800 for dep in totaldeps:
801 if dep in cumulativedeps:
802 deps.update(cumulativedeps[dep])
803
804 for t in deps:
805 for taskname in tasknames:
806 newtid = t + ":" + taskname
807 if newtid == tid:
808 continue
809 if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
810 extradeps += 1
811 self.runtaskentries[tid].depends.add(newtid)
812
813 # Handle recursive tasks which depend upon other recursive tasks
814 deps = set()
815 for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
816 deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
817 for newtid in deps:
818 for taskname in tasknames:
819 if not newtid.endswith(":" + taskname):
820 continue
821 if newtid in self.runtaskentries:
822 extradeps += 1
823 self.runtaskentries[tid].depends.add(newtid)
824
825 bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps)
826
827 # Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work
828 for tid in recursivetasksselfref:
829 self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600830
831 self.init_progress_reporter.next_stage()
832
833 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500834
835 # Step B - Mark all active tasks
836 #
837 # Start with the tasks we were asked to run and mark all dependencies
838 # as active too. If the task is to be 'forced', clear its stamp. Once
839 # all active tasks are marked, prune the ones we don't need.
840
841 logger.verbose("Marking Active Tasks")
842
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600843 def mark_active(tid, depth):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500844 """
845 Mark an item as active along with its depends
846 (calls itself recursively)
847 """
848
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600849 if tid in runq_build:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500850 return
851
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600852 runq_build[tid] = 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500853
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600854 depends = self.runtaskentries[tid].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500855 for depend in depends:
856 mark_active(depend, depth+1)
857
Brad Bishop79641f22019-09-10 07:20:22 -0400858 def invalidate_task(tid, error_nostamp):
859 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
860 taskdep = self.dataCaches[mc].task_deps[taskfn]
861 if fn + ":" + taskname not in taskData[mc].taskentries:
862 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
863 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
864 if error_nostamp:
865 bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
866 else:
867 bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
868 else:
869 logger.verbose("Invalidate task %s, %s", taskname, fn)
870 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn)
871
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600872 self.target_tids = []
873 for (mc, target, task, fn) in self.targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500874
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600875 if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500876 continue
877
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600878 if target in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500879 continue
880
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500881 parents = False
882 if task.endswith('-'):
883 parents = True
884 task = task[:-1]
885
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600886 if fn in taskData[mc].failed_fns:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500887 continue
888
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600889 # fn already has mc prefix
890 tid = fn + ":" + task
891 self.target_tids.append(tid)
892 if tid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500893 import difflib
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600894 tasks = []
895 for x in taskData[mc].taskentries:
896 if x.startswith(fn + ":"):
897 tasks.append(taskname_from_tid(x))
898 close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500899 if close_matches:
900 extra = ". Close matches:\n %s" % "\n ".join(close_matches)
901 else:
902 extra = ""
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600903 bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
904
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500905 # For tasks called "XXXX-", ony run their dependencies
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500906 if parents:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600907 for i in self.runtaskentries[tid].depends:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500908 mark_active(i, 1)
909 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600910 mark_active(tid, 1)
911
912 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500913
914 # Step C - Prune all inactive tasks
915 #
916 # Once all active tasks are marked, prune the ones we don't need.
917
Brad Bishop316dfdd2018-06-25 12:45:53 -0400918 delcount = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600919 for tid in list(self.runtaskentries.keys()):
920 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400921 delcount[tid] = self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600922 del self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600923
Brad Bishop316dfdd2018-06-25 12:45:53 -0400924 # Handle --runall
925 if self.cooker.configuration.runall:
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500926 # re-run the mark_active and then drop unused tasks from new list
927 runq_build = {}
Brad Bishop316dfdd2018-06-25 12:45:53 -0400928
929 for task in self.cooker.configuration.runall:
Andrew Geissler82c905d2020-04-13 13:39:40 -0500930 if not task.startswith("do_"):
931 task = "do_{0}".format(task)
Brad Bishop316dfdd2018-06-25 12:45:53 -0400932 runall_tids = set()
933 for tid in list(self.runtaskentries):
Andrew Geissler82c905d2020-04-13 13:39:40 -0500934 wanttid = "{0}:{1}".format(fn_from_tid(tid), task)
Brad Bishop316dfdd2018-06-25 12:45:53 -0400935 if wanttid in delcount:
936 self.runtaskentries[wanttid] = delcount[wanttid]
937 if wanttid in self.runtaskentries:
938 runall_tids.add(wanttid)
939
940 for tid in list(runall_tids):
941 mark_active(tid,1)
Brad Bishop79641f22019-09-10 07:20:22 -0400942 if self.cooker.configuration.force:
943 invalidate_task(tid, False)
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500944
945 for tid in list(self.runtaskentries.keys()):
946 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400947 delcount[tid] = self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500948 del self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500949
950 if len(self.runtaskentries) == 0:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400951 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
952
953 self.init_progress_reporter.next_stage()
954
955 # Handle runonly
956 if self.cooker.configuration.runonly:
957 # re-run the mark_active and then drop unused tasks from new list
958 runq_build = {}
959
960 for task in self.cooker.configuration.runonly:
Andrew Geissler82c905d2020-04-13 13:39:40 -0500961 if not task.startswith("do_"):
962 task = "do_{0}".format(task)
963 runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == task }
Brad Bishop316dfdd2018-06-25 12:45:53 -0400964
965 for tid in list(runonly_tids):
966 mark_active(tid,1)
Brad Bishop79641f22019-09-10 07:20:22 -0400967 if self.cooker.configuration.force:
968 invalidate_task(tid, False)
Brad Bishop316dfdd2018-06-25 12:45:53 -0400969
970 for tid in list(self.runtaskentries.keys()):
971 if tid not in runq_build:
972 delcount[tid] = self.runtaskentries[tid]
973 del self.runtaskentries[tid]
974
975 if len(self.runtaskentries) == 0:
976 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500977
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500978 #
979 # Step D - Sanity checks and computation
980 #
981
982 # Check to make sure we still have tasks to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600983 if len(self.runtaskentries) == 0:
984 if not taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500985 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
986 else:
987 bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
988
Brad Bishop316dfdd2018-06-25 12:45:53 -0400989 logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500990
991 logger.verbose("Assign Weightings")
992
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600993 self.init_progress_reporter.next_stage()
994
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500995 # Generate a list of reverse dependencies to ease future calculations
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600996 for tid in self.runtaskentries:
997 for dep in self.runtaskentries[tid].depends:
998 self.runtaskentries[dep].revdeps.add(tid)
999
1000 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001001
1002 # Identify tasks at the end of dependency chains
1003 # Error on circular dependency loops (length two)
1004 endpoints = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001005 for tid in self.runtaskentries:
1006 revdeps = self.runtaskentries[tid].revdeps
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001007 if len(revdeps) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001008 endpoints.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001009 for dep in revdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001010 if dep in self.runtaskentries[tid].depends:
1011 bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
1012
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001013
1014 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
1015
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001016 self.init_progress_reporter.next_stage()
1017
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001018 # Calculate task weights
1019 # Check of higher length circular dependencies
1020 self.runq_weight = self.calculate_task_weights(endpoints)
1021
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001022 self.init_progress_reporter.next_stage()
1023
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001024 # Sanity Check - Check for multiple tasks building the same provider
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001025 for mc in self.dataCaches:
1026 prov_list = {}
1027 seen_fn = []
1028 for tid in self.runtaskentries:
1029 (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1030 if taskfn in seen_fn:
1031 continue
1032 if mc != tidmc:
1033 continue
1034 seen_fn.append(taskfn)
1035 for prov in self.dataCaches[mc].fn_provides[taskfn]:
1036 if prov not in prov_list:
1037 prov_list[prov] = [taskfn]
1038 elif taskfn not in prov_list[prov]:
1039 prov_list[prov].append(taskfn)
1040 for prov in prov_list:
1041 if len(prov_list[prov]) < 2:
1042 continue
1043 if prov in self.multi_provider_whitelist:
1044 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001045 seen_pn = []
1046 # If two versions of the same PN are being built its fatal, we don't support it.
1047 for fn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001048 pn = self.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001049 if pn not in seen_pn:
1050 seen_pn.append(pn)
1051 else:
1052 bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001053 msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
1054 #
1055 # Construct a list of things which uniquely depend on each provider
1056 # since this may help the user figure out which dependency is triggering this warning
1057 #
1058 msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
1059 deplist = {}
1060 commondeps = None
1061 for provfn in prov_list[prov]:
1062 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001063 for tid in self.runtaskentries:
1064 fn = fn_from_tid(tid)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001065 if fn != provfn:
1066 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001067 for dep in self.runtaskentries[tid].revdeps:
1068 fn = fn_from_tid(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001069 if fn == provfn:
1070 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001071 deps.add(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001072 if not commondeps:
1073 commondeps = set(deps)
1074 else:
1075 commondeps &= deps
1076 deplist[provfn] = deps
1077 for provfn in deplist:
1078 msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
1079 #
1080 # Construct a list of provides and runtime providers for each recipe
1081 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
1082 #
1083 msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
1084 provide_results = {}
1085 rprovide_results = {}
1086 commonprovs = None
1087 commonrprovs = None
1088 for provfn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001089 provides = set(self.dataCaches[mc].fn_provides[provfn])
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001090 rprovides = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001091 for rprovide in self.dataCaches[mc].rproviders:
1092 if provfn in self.dataCaches[mc].rproviders[rprovide]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001093 rprovides.add(rprovide)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001094 for package in self.dataCaches[mc].packages:
1095 if provfn in self.dataCaches[mc].packages[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001096 rprovides.add(package)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001097 for package in self.dataCaches[mc].packages_dynamic:
1098 if provfn in self.dataCaches[mc].packages_dynamic[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001099 rprovides.add(package)
1100 if not commonprovs:
1101 commonprovs = set(provides)
1102 else:
1103 commonprovs &= provides
1104 provide_results[provfn] = provides
1105 if not commonrprovs:
1106 commonrprovs = set(rprovides)
1107 else:
1108 commonrprovs &= rprovides
1109 rprovide_results[provfn] = rprovides
1110 #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
1111 #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
1112 for provfn in prov_list[prov]:
1113 msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
1114 msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
1115
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001116 if self.warn_multi_bb:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001117 logger.verbnote(msg)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001118 else:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001119 logger.error(msg)
1120
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001121 self.init_progress_reporter.next_stage()
1122
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001123 # Create a whitelist usable by the stamp checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001124 self.stampfnwhitelist = {}
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001125 for mc in self.taskData:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001126 self.stampfnwhitelist[mc] = []
1127 for entry in self.stampwhitelist.split():
1128 if entry not in self.taskData[mc].build_targets:
1129 continue
1130 fn = self.taskData.build_targets[entry][0]
1131 self.stampfnwhitelist[mc].append(fn)
1132
1133 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001134
1135 # Iterate over the task list looking for tasks with a 'setscene' function
Andrew Geissler82c905d2020-04-13 13:39:40 -05001136 self.runq_setscene_tids = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001137 if not self.cooker.configuration.nosetscene:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001138 for tid in self.runtaskentries:
1139 (mc, fn, taskname, _) = split_tid_mcfn(tid)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001140 setscenetid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001141 if setscenetid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001142 continue
Andrew Geissler82c905d2020-04-13 13:39:40 -05001143 self.runq_setscene_tids.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001144
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001145 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001146
1147 # Invalidate task if force mode active
1148 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001149 for tid in self.target_tids:
1150 invalidate_task(tid, False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001151
1152 # Invalidate task if invalidate mode active
1153 if self.cooker.configuration.invalidate_stamp:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001154 for tid in self.target_tids:
1155 fn = fn_from_tid(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001156 for st in self.cooker.configuration.invalidate_stamp.split(','):
1157 if not st.startswith("do_"):
1158 st = "do_%s" % st
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001159 invalidate_task(fn + ":" + st, True)
1160
1161 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001162
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001163 # Create and print to the logs a virtual/xxxx -> PN (fn) table
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001164 for mc in taskData:
1165 virtmap = taskData[mc].get_providermap(prefix="virtual/")
1166 virtpnmap = {}
1167 for v in virtmap:
1168 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
1169 bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
1170 if hasattr(bb.parse.siggen, "tasks_resolved"):
1171 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1172
1173 self.init_progress_reporter.next_stage()
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001174
Brad Bishop00e122a2019-10-05 11:10:57 -04001175 bb.parse.siggen.set_setscene_tasks(self.runq_setscene_tids)
1176
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001177 # Iterate over the task list and call into the siggen code
1178 dealtwith = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001179 todeal = set(self.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001180 while len(todeal) > 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001181 for tid in todeal.copy():
1182 if len(self.runtaskentries[tid].depends - dealtwith) == 0:
1183 dealtwith.add(tid)
1184 todeal.remove(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001185 self.prepare_task_hash(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001186
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001187 bb.parse.siggen.writeout_file_checksum_cache()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001188
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001189 #self.dump_data()
1190 return len(self.runtaskentries)
1191
Brad Bishop19323692019-04-05 15:28:33 -04001192 def prepare_task_hash(self, tid):
Andrew Geissler5a43b432020-06-13 10:46:56 -05001193 dc = bb.parse.siggen.get_data_caches(self.dataCaches, mc_from_tid(tid))
1194 bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, dc)
1195 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends, dc)
Brad Bishop08902b02019-08-20 09:16:51 -04001196 self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001197
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001198 def dump_data(self):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001199 """
1200 Dump some debug information on the internal data structures
1201 """
1202 logger.debug(3, "run_tasks:")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001203 for tid in self.runtaskentries:
1204 logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
1205 self.runtaskentries[tid].weight,
1206 self.runtaskentries[tid].depends,
1207 self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001208
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001209class RunQueueWorker():
1210 def __init__(self, process, pipe):
1211 self.process = process
1212 self.pipe = pipe
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001213
1214class RunQueue:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001215 def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001216
1217 self.cooker = cooker
1218 self.cfgData = cfgData
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001219 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001220
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001221 self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
1222 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001223 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001224
1225 self.state = runQueuePrepare
1226
1227 # For disk space monitor
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001228 # Invoked at regular time intervals via the bitbake heartbeat event
1229 # while the build is running. We generate a unique name for the handler
1230 # here, just in case that there ever is more than one RunQueue instance,
Brad Bishop96ff1982019-08-19 13:50:42 -04001231 # start the handler when reaching runQueueSceneInit, and stop it when
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001232 # done with the build.
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001233 self.dm = monitordisk.diskMonitor(cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001234 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
1235 self.dm_event_handler_registered = False
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001236 self.rqexe = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001237 self.worker = {}
1238 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001239
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001240 def _start_worker(self, mc, fakeroot = False, rqexec = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001241 logger.debug(1, "Starting bitbake-worker")
1242 magic = "decafbad"
1243 if self.cooker.configuration.profile:
1244 magic = "decafbadbad"
1245 if fakeroot:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001246 magic = magic + "beef"
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001247 mcdata = self.cooker.databuilder.mcdata[mc]
Brad Bishop19323692019-04-05 15:28:33 -04001248 fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD"))
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001249 fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001250 env = os.environ.copy()
1251 for key, value in (var.split('=') for var in fakerootenv):
1252 env[key] = value
Brad Bishop19323692019-04-05 15:28:33 -04001253 worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001254 else:
1255 worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
1256 bb.utils.nonblockingfd(worker.stdout)
1257 workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
1258
1259 workerdata = {
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001260 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1261 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1262 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1263 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001264 "sigdata" : bb.parse.siggen.get_taskdata(),
Andrew Geissler82c905d2020-04-13 13:39:40 -05001265 "logdefaultlevel" : bb.msg.loggerDefaultLogLevel,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001266 "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
1267 "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
1268 "logdefaultdomain" : bb.msg.loggerDefaultDomains,
1269 "prhost" : self.cooker.prhost,
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001270 "buildname" : self.cfgData.getVar("BUILDNAME"),
1271 "date" : self.cfgData.getVar("DATE"),
1272 "time" : self.cfgData.getVar("TIME"),
Brad Bishopa34c0302019-09-23 22:34:48 -04001273 "hashservaddr" : self.cooker.hashservaddr,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001274 }
1275
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001276 worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001277 worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001278 worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001279 worker.stdin.flush()
1280
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001281 return RunQueueWorker(worker, workerpipe)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001282
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001283 def _teardown_worker(self, worker):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001284 if not worker:
1285 return
1286 logger.debug(1, "Teardown for bitbake-worker")
1287 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001288 worker.process.stdin.write(b"<quit></quit>")
1289 worker.process.stdin.flush()
1290 worker.process.stdin.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001291 except IOError:
1292 pass
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001293 while worker.process.returncode is None:
1294 worker.pipe.read()
1295 worker.process.poll()
1296 while worker.pipe.read():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001297 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001298 worker.pipe.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001299
1300 def start_worker(self):
1301 if self.worker:
1302 self.teardown_workers()
1303 self.teardown = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001304 for mc in self.rqdata.dataCaches:
1305 self.worker[mc] = self._start_worker(mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001306
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001307 def start_fakeworker(self, rqexec, mc):
1308 if not mc in self.fakeworker:
1309 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001310
1311 def teardown_workers(self):
1312 self.teardown = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001313 for mc in self.worker:
1314 self._teardown_worker(self.worker[mc])
1315 self.worker = {}
1316 for mc in self.fakeworker:
1317 self._teardown_worker(self.fakeworker[mc])
1318 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001319
1320 def read_workers(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001321 for mc in self.worker:
1322 self.worker[mc].pipe.read()
1323 for mc in self.fakeworker:
1324 self.fakeworker[mc].pipe.read()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001325
1326 def active_fds(self):
1327 fds = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001328 for mc in self.worker:
1329 fds.append(self.worker[mc].pipe.input)
1330 for mc in self.fakeworker:
1331 fds.append(self.fakeworker[mc].pipe.input)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001332 return fds
1333
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001334 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001335 def get_timestamp(f):
1336 try:
1337 if not os.access(f, os.F_OK):
1338 return None
1339 return os.stat(f)[stat.ST_MTIME]
1340 except:
1341 return None
1342
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001343 (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
1344 if taskname is None:
1345 taskname = tn
1346
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001347 if self.stamppolicy == "perfile":
1348 fulldeptree = False
1349 else:
1350 fulldeptree = True
1351 stampwhitelist = []
1352 if self.stamppolicy == "whitelist":
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001353 stampwhitelist = self.rqdata.stampfnwhitelist[mc]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001354
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001355 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001356
1357 # If the stamp is missing, it's not current
1358 if not os.access(stampfile, os.F_OK):
1359 logger.debug(2, "Stampfile %s not available", stampfile)
1360 return False
1361 # If it's a 'nostamp' task, it's not current
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001362 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001363 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1364 logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
1365 return False
1366
1367 if taskname != "do_setscene" and taskname.endswith("_setscene"):
1368 return True
1369
1370 if cache is None:
1371 cache = {}
1372
1373 iscurrent = True
1374 t1 = get_timestamp(stampfile)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001375 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001376 if iscurrent:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001377 (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
1378 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1379 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001380 t2 = get_timestamp(stampfile2)
1381 t3 = get_timestamp(stampfile3)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001382 if t3 and not t2:
1383 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001384 if t3 and t3 > t2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001385 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001386 if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
1387 if not t2:
1388 logger.debug(2, 'Stampfile %s does not exist', stampfile2)
1389 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001390 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001391 if t1 < t2:
1392 logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
1393 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001394 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001395 if recurse and iscurrent:
1396 if dep in cache:
1397 iscurrent = cache[dep]
1398 if not iscurrent:
1399 logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
1400 else:
1401 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1402 cache[dep] = iscurrent
1403 if recurse:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001404 cache[tid] = iscurrent
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001405 return iscurrent
1406
Brad Bishop1d80a2e2019-11-15 16:35:03 -05001407 def validate_hashes(self, tocheck, data, currentcount=0, siginfo=False, summary=True):
Brad Bishop96ff1982019-08-19 13:50:42 -04001408 valid = set()
1409 if self.hashvalidate:
Brad Bishop08902b02019-08-20 09:16:51 -04001410 sq_data = {}
1411 sq_data['hash'] = {}
1412 sq_data['hashfn'] = {}
1413 sq_data['unihash'] = {}
Brad Bishop96ff1982019-08-19 13:50:42 -04001414 for tid in tocheck:
1415 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04001416 sq_data['hash'][tid] = self.rqdata.runtaskentries[tid].hash
1417 sq_data['hashfn'][tid] = self.rqdata.dataCaches[mc].hashfn[taskfn]
1418 sq_data['unihash'][tid] = self.rqdata.runtaskentries[tid].unihash
Brad Bishop96ff1982019-08-19 13:50:42 -04001419
Brad Bishop1d80a2e2019-11-15 16:35:03 -05001420 valid = self.validate_hash(sq_data, data, siginfo, currentcount, summary)
Brad Bishop96ff1982019-08-19 13:50:42 -04001421
1422 return valid
1423
Brad Bishop1d80a2e2019-11-15 16:35:03 -05001424 def validate_hash(self, sq_data, d, siginfo, currentcount, summary):
1425 locs = {"sq_data" : sq_data, "d" : d, "siginfo" : siginfo, "currentcount" : currentcount, "summary" : summary}
Brad Bishop19323692019-04-05 15:28:33 -04001426
Brad Bishop08902b02019-08-20 09:16:51 -04001427 # Metadata has **kwargs so args can be added, sq_data can also gain new fields
Brad Bishop1d80a2e2019-11-15 16:35:03 -05001428 call = self.hashvalidate + "(sq_data, d, siginfo=siginfo, currentcount=currentcount, summary=summary)"
Brad Bishop19323692019-04-05 15:28:33 -04001429
Brad Bishop19323692019-04-05 15:28:33 -04001430 return bb.utils.better_eval(call, locs)
1431
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001432 def _execute_runqueue(self):
1433 """
1434 Run the tasks in a queue prepared by rqdata.prepare()
1435 Upon failure, optionally try to recover the build using any alternate providers
1436 (if the abort on failure configuration option isn't set)
1437 """
1438
1439 retval = True
1440
1441 if self.state is runQueuePrepare:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001442 # NOTE: if you add, remove or significantly refactor the stages of this
1443 # process then you should recalculate the weightings here. This is quite
1444 # easy to do - just change the next line temporarily to pass debug=True as
1445 # the last parameter and you'll get a printout of the weightings as well
1446 # as a map to the lines where next_stage() was called. Of course this isn't
1447 # critical, but it helps to keep the progress reporting accurate.
1448 self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
1449 "Initialising tasks",
1450 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001451 if self.rqdata.prepare() == 0:
1452 self.state = runQueueComplete
1453 else:
1454 self.state = runQueueSceneInit
Brad Bishop00e122a2019-10-05 11:10:57 -04001455 bb.parse.siggen.save_unitaskhashes()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001456
1457 if self.state is runQueueSceneInit:
Brad Bishop96ff1982019-08-19 13:50:42 -04001458 self.rqdata.init_progress_reporter.next_stage()
1459
1460 # we are ready to run, emit dependency info to any UI or class which
1461 # needs it
1462 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1463 self.rqdata.init_progress_reporter.next_stage()
1464 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
1465
Brad Bishope2d5b612018-11-23 10:55:50 +13001466 if not self.dm_event_handler_registered:
1467 res = bb.event.register(self.dm_event_handler_name,
Brad Bishop96ff1982019-08-19 13:50:42 -04001468 lambda x: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False,
Brad Bishope2d5b612018-11-23 10:55:50 +13001469 ('bb.event.HeartbeatEvent',))
1470 self.dm_event_handler_registered = True
1471
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001472 dump = self.cooker.configuration.dump_signatures
1473 if dump:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001474 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001475 if 'printdiff' in dump:
1476 invalidtasks = self.print_diffscenetasks()
1477 self.dump_signatures(dump)
1478 if 'printdiff' in dump:
1479 self.write_diffscenetasks(invalidtasks)
1480 self.state = runQueueComplete
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001481
Brad Bishop96ff1982019-08-19 13:50:42 -04001482 if self.state is runQueueSceneInit:
1483 self.rqdata.init_progress_reporter.next_stage()
1484 self.start_worker()
1485 self.rqdata.init_progress_reporter.next_stage()
1486 self.rqexe = RunQueueExecute(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001487
Brad Bishop96ff1982019-08-19 13:50:42 -04001488 # If we don't have any setscene functions, skip execution
1489 if len(self.rqdata.runq_setscene_tids) == 0:
1490 logger.info('No setscene tasks')
1491 for tid in self.rqdata.runtaskentries:
1492 if len(self.rqdata.runtaskentries[tid].depends) == 0:
1493 self.rqexe.setbuildable(tid)
1494 self.rqexe.tasks_notcovered.add(tid)
1495 self.rqexe.sqdone = True
1496 logger.info('Executing Tasks')
1497 self.state = runQueueRunning
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001498
1499 if self.state is runQueueRunning:
1500 retval = self.rqexe.execute()
1501
1502 if self.state is runQueueCleanUp:
1503 retval = self.rqexe.finish()
1504
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001505 build_done = self.state is runQueueComplete or self.state is runQueueFailed
1506
1507 if build_done and self.dm_event_handler_registered:
1508 bb.event.remove(self.dm_event_handler_name, None)
1509 self.dm_event_handler_registered = False
1510
1511 if build_done and self.rqexe:
Brad Bishop08902b02019-08-20 09:16:51 -04001512 bb.parse.siggen.save_unitaskhashes()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001513 self.teardown_workers()
Brad Bishop96ff1982019-08-19 13:50:42 -04001514 if self.rqexe:
1515 if self.rqexe.stats.failed:
1516 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
1517 else:
1518 # Let's avoid the word "failed" if nothing actually did
1519 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001520
1521 if self.state is runQueueFailed:
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001522 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001523
1524 if self.state is runQueueComplete:
1525 # All done
1526 return False
1527
1528 # Loop
1529 return retval
1530
1531 def execute_runqueue(self):
1532 # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
1533 try:
1534 return self._execute_runqueue()
1535 except bb.runqueue.TaskFailure:
1536 raise
1537 except SystemExit:
1538 raise
1539 except bb.BBHandledException:
1540 try:
1541 self.teardown_workers()
1542 except:
1543 pass
1544 self.state = runQueueComplete
1545 raise
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001546 except Exception as err:
1547 logger.exception("An uncaught exception occurred in runqueue")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001548 try:
1549 self.teardown_workers()
1550 except:
1551 pass
1552 self.state = runQueueComplete
1553 raise
1554
1555 def finish_runqueue(self, now = False):
1556 if not self.rqexe:
1557 self.state = runQueueComplete
1558 return
1559
1560 if now:
1561 self.rqexe.finish_now()
1562 else:
1563 self.rqexe.finish()
1564
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001565 def rq_dump_sigfn(self, fn, options):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001566 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
Andrew Geissler5a43b432020-06-13 10:46:56 -05001567 mc = bb.runqueue.mc_from_tid(fn)
1568 the_data = bb_cache.loadDataFull(fn, self.cooker.collections[mc].get_file_appends(fn))
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001569 siggen = bb.parse.siggen
1570 dataCaches = self.rqdata.dataCaches
1571 siggen.dump_sigfn(fn, dataCaches, options)
1572
1573 def dump_signatures(self, options):
1574 fns = set()
1575 bb.note("Reparsing files to collect dependency data")
1576
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001577 for tid in self.rqdata.runtaskentries:
1578 fn = fn_from_tid(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001579 fns.add(fn)
1580
1581 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1582 # We cannot use the real multiprocessing.Pool easily due to some local data
1583 # that can't be pickled. This is a cheap multi-process solution.
1584 launched = []
1585 while fns:
1586 if len(launched) < max_process:
1587 p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
1588 p.start()
1589 launched.append(p)
1590 for q in launched:
1591 # The finished processes are joined when calling is_alive()
1592 if not q.is_alive():
1593 launched.remove(q)
1594 for p in launched:
1595 p.join()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001596
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001597 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001598
1599 return
1600
1601 def print_diffscenetasks(self):
1602
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001603 noexec = []
Brad Bishop96ff1982019-08-19 13:50:42 -04001604 tocheck = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001605
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001606 for tid in self.rqdata.runtaskentries:
1607 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1608 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001609
1610 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001611 noexec.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001612 continue
1613
Brad Bishop96ff1982019-08-19 13:50:42 -04001614 tocheck.add(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001615
Brad Bishop1d80a2e2019-11-15 16:35:03 -05001616 valid_new = self.validate_hashes(tocheck, self.cooker.data, 0, True, summary=False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001617
1618 # Tasks which are both setscene and noexec never care about dependencies
1619 # We therefore find tasks which are setscene and noexec and mark their
1620 # unique dependencies as valid.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001621 for tid in noexec:
1622 if tid not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001623 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001624 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001625 hasnoexecparents = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001626 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1627 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001628 continue
1629 hasnoexecparents = False
1630 break
1631 if hasnoexecparents:
1632 valid_new.add(dep)
1633
1634 invalidtasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001635 for tid in self.rqdata.runtaskentries:
1636 if tid not in valid_new and tid not in noexec:
1637 invalidtasks.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001638
1639 found = set()
1640 processed = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001641 for tid in invalidtasks:
1642 toprocess = set([tid])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001643 while toprocess:
1644 next = set()
1645 for t in toprocess:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001646 for dep in self.rqdata.runtaskentries[t].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001647 if dep in invalidtasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001648 found.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001649 if dep not in processed:
1650 processed.add(dep)
1651 next.add(dep)
1652 toprocess = next
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001653 if tid in found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001654 toprocess = set()
1655
1656 tasklist = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001657 for tid in invalidtasks.difference(found):
1658 tasklist.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001659
1660 if tasklist:
1661 bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
1662
1663 return invalidtasks.difference(found)
1664
1665 def write_diffscenetasks(self, invalidtasks):
1666
1667 # Define recursion callback
1668 def recursecb(key, hash1, hash2):
1669 hashes = [hash1, hash2]
1670 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1671
1672 recout = []
1673 if len(hashfiles) == 2:
1674 out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
Brad Bishopc342db32019-05-15 21:57:59 -04001675 recout.extend(list(' ' + l for l in out2))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001676 else:
1677 recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
1678
1679 return recout
1680
1681
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001682 for tid in invalidtasks:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001683 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1684 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001685 h = self.rqdata.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001686 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
1687 match = None
1688 for m in matches:
1689 if h in m:
1690 match = m
1691 if match is None:
1692 bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001693 matches = {k : v for k, v in iter(matches.items()) if h not in k}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001694 if matches:
1695 latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
Brad Bishop19323692019-04-05 15:28:33 -04001696 prevh = __find_sha256__.search(latestmatch).group(0)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001697 output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
1698 bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
1699
Brad Bishop96ff1982019-08-19 13:50:42 -04001700
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001701class RunQueueExecute:
1702
1703 def __init__(self, rq):
1704 self.rq = rq
1705 self.cooker = rq.cooker
1706 self.cfgData = rq.cfgData
1707 self.rqdata = rq.rqdata
1708
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001709 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
1710 self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001711
Brad Bishop96ff1982019-08-19 13:50:42 -04001712 self.sq_buildable = set()
1713 self.sq_running = set()
1714 self.sq_live = set()
1715
Brad Bishop08902b02019-08-20 09:16:51 -04001716 self.updated_taskhash_queue = []
1717 self.pending_migrations = set()
1718
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001719 self.runq_buildable = set()
1720 self.runq_running = set()
1721 self.runq_complete = set()
Andrew Geissler82c905d2020-04-13 13:39:40 -05001722 self.runq_tasksrun = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001723
1724 self.build_stamps = {}
1725 self.build_stamps2 = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001726 self.failed_tids = []
Brad Bishop96ff1982019-08-19 13:50:42 -04001727 self.sq_deferred = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001728
1729 self.stampcache = {}
1730
Brad Bishop08902b02019-08-20 09:16:51 -04001731 self.holdoff_tasks = set()
Brad Bishopc68388fc2019-08-26 01:33:31 -04001732 self.holdoff_need_update = True
Brad Bishop96ff1982019-08-19 13:50:42 -04001733 self.sqdone = False
1734
1735 self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
1736 self.sq_stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
1737
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001738 for mc in rq.worker:
1739 rq.worker[mc].pipe.setrunqueueexec(self)
1740 for mc in rq.fakeworker:
1741 rq.fakeworker[mc].pipe.setrunqueueexec(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001742
1743 if self.number_tasks <= 0:
1744 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1745
Brad Bishop96ff1982019-08-19 13:50:42 -04001746 # List of setscene tasks which we've covered
1747 self.scenequeue_covered = set()
1748 # List of tasks which are covered (including setscene ones)
1749 self.tasks_covered = set()
1750 self.tasks_scenequeue_done = set()
1751 self.scenequeue_notcovered = set()
1752 self.tasks_notcovered = set()
1753 self.scenequeue_notneeded = set()
1754
Brad Bishop08902b02019-08-20 09:16:51 -04001755 # We can't skip specified target tasks which aren't setscene tasks
1756 self.cantskip = set(self.rqdata.target_tids)
1757 self.cantskip.difference_update(self.rqdata.runq_setscene_tids)
1758 self.cantskip.intersection_update(self.rqdata.runtaskentries)
Brad Bishop96ff1982019-08-19 13:50:42 -04001759
1760 schedulers = self.get_schedulers()
1761 for scheduler in schedulers:
1762 if self.scheduler == scheduler.name:
1763 self.sched = scheduler(self, self.rqdata)
1764 logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
1765 break
1766 else:
1767 bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
1768 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1769
Brad Bishop08902b02019-08-20 09:16:51 -04001770 #if len(self.rqdata.runq_setscene_tids) > 0:
1771 self.sqdata = SQData()
1772 build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
Brad Bishop96ff1982019-08-19 13:50:42 -04001773
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001774 def runqueue_process_waitpid(self, task, status):
1775
1776 # self.build_stamps[pid] may not exist when use shared work directory.
1777 if task in self.build_stamps:
1778 self.build_stamps2.remove(self.build_stamps[task])
1779 del self.build_stamps[task]
1780
Brad Bishop96ff1982019-08-19 13:50:42 -04001781 if task in self.sq_live:
1782 if status != 0:
1783 self.sq_task_fail(task, status)
1784 else:
1785 self.sq_task_complete(task)
1786 self.sq_live.remove(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001787 else:
Brad Bishop96ff1982019-08-19 13:50:42 -04001788 if status != 0:
1789 self.task_fail(task, status)
1790 else:
1791 self.task_complete(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001792 return True
1793
1794 def finish_now(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001795 for mc in self.rq.worker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001796 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001797 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1798 self.rq.worker[mc].process.stdin.flush()
1799 except IOError:
1800 # worker must have died?
1801 pass
1802 for mc in self.rq.fakeworker:
1803 try:
1804 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1805 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001806 except IOError:
1807 # worker must have died?
1808 pass
1809
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001810 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001811 self.rq.state = runQueueFailed
1812 return
1813
1814 self.rq.state = runQueueComplete
1815 return
1816
1817 def finish(self):
1818 self.rq.state = runQueueCleanUp
1819
Brad Bishop96ff1982019-08-19 13:50:42 -04001820 active = self.stats.active + self.sq_stats.active
1821 if active > 0:
1822 bb.event.fire(runQueueExitWait(active), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001823 self.rq.read_workers()
1824 return self.rq.active_fds()
1825
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001826 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001827 self.rq.state = runQueueFailed
1828 return True
1829
1830 self.rq.state = runQueueComplete
1831 return True
1832
Brad Bishop96ff1982019-08-19 13:50:42 -04001833 # Used by setscene only
1834 def check_dependencies(self, task, taskdeps):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001835 if not self.rq.depvalidate:
1836 return False
1837
Brad Bishop08902b02019-08-20 09:16:51 -04001838 # Must not edit parent data
1839 taskdeps = set(taskdeps)
1840
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001841 taskdata = {}
1842 taskdeps.add(task)
1843 for dep in taskdeps:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001844 (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
1845 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001846 taskdata[dep] = [pn, taskname, fn]
1847 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001848 locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001849 valid = bb.utils.better_eval(call, locs)
1850 return valid
1851
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001852 def can_start_task(self):
Brad Bishop96ff1982019-08-19 13:50:42 -04001853 active = self.stats.active + self.sq_stats.active
1854 can_start = active < self.number_tasks
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001855 return can_start
1856
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001857 def get_schedulers(self):
1858 schedulers = set(obj for obj in globals().values()
1859 if type(obj) is type and
1860 issubclass(obj, RunQueueScheduler))
1861
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001862 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001863 if user_schedulers:
1864 for sched in user_schedulers.split():
1865 if not "." in sched:
1866 bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
1867 continue
1868
1869 modname, name = sched.rsplit(".", 1)
1870 try:
1871 module = __import__(modname, fromlist=(name,))
1872 except ImportError as exc:
1873 logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
1874 raise SystemExit(1)
1875 else:
1876 schedulers.add(getattr(module, name))
1877 return schedulers
1878
1879 def setbuildable(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001880 self.runq_buildable.add(task)
Brad Bishop316dfdd2018-06-25 12:45:53 -04001881 self.sched.newbuildable(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001882
1883 def task_completeoutright(self, task):
1884 """
1885 Mark a task as completed
1886 Look at the reverse dependencies and mark any task with
1887 completed dependencies as buildable
1888 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001889 self.runq_complete.add(task)
1890 for revdep in self.rqdata.runtaskentries[task].revdeps:
1891 if revdep in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001892 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001893 if revdep in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001894 continue
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001895 alldeps = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001896 for dep in self.rqdata.runtaskentries[revdep].depends:
1897 if dep not in self.runq_complete:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001898 alldeps = False
1899 break
1900 if alldeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001901 self.setbuildable(revdep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001902 logger.debug(1, "Marking task %s as buildable", revdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001903
1904 def task_complete(self, task):
1905 self.stats.taskCompleted()
1906 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1907 self.task_completeoutright(task)
Andrew Geissler82c905d2020-04-13 13:39:40 -05001908 self.runq_tasksrun.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001909
1910 def task_fail(self, task, exitcode):
1911 """
1912 Called when a task has failed
1913 Updates the state engine with the failure
1914 """
1915 self.stats.taskFailed()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001916 self.failed_tids.append(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001917 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001918 if self.rqdata.taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001919 self.rq.state = runQueueCleanUp
1920
1921 def task_skip(self, task, reason):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001922 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001923 self.setbuildable(task)
1924 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
1925 self.task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001926 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001927 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001928
Brad Bishop08902b02019-08-20 09:16:51 -04001929 def summarise_scenequeue_errors(self):
1930 err = False
1931 if not self.sqdone:
1932 logger.debug(1, 'We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered)))
1933 completeevent = sceneQueueComplete(self.sq_stats, self.rq)
1934 bb.event.fire(completeevent, self.cfgData)
1935 if self.sq_deferred:
1936 logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred))
1937 err = True
1938 if self.updated_taskhash_queue:
1939 logger.error("Scenequeue had unprocessed changed taskhash entries: %s" % pprint.pformat(self.updated_taskhash_queue))
1940 err = True
1941 if self.holdoff_tasks:
1942 logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks))
1943 err = True
1944
1945 for tid in self.rqdata.runq_setscene_tids:
1946 if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
1947 err = True
1948 logger.error("Setscene Task %s was never marked as covered or not covered" % tid)
1949 if tid not in self.sq_buildable:
1950 err = True
1951 logger.error("Setscene Task %s was never marked as buildable" % tid)
1952 if tid not in self.sq_running:
1953 err = True
1954 logger.error("Setscene Task %s was never marked as running" % tid)
1955
1956 for x in self.rqdata.runtaskentries:
1957 if x not in self.tasks_covered and x not in self.tasks_notcovered:
1958 logger.error("Task %s was never moved from the setscene queue" % x)
1959 err = True
1960 if x not in self.tasks_scenequeue_done:
1961 logger.error("Task %s was never processed by the setscene code" % x)
1962 err = True
1963 if len(self.rqdata.runtaskentries[x].depends) == 0 and x not in self.runq_buildable:
1964 logger.error("Task %s was never marked as buildable by the setscene code" % x)
1965 err = True
1966 return err
1967
1968
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001969 def execute(self):
1970 """
Brad Bishop96ff1982019-08-19 13:50:42 -04001971 Run the tasks in a queue prepared by prepare_runqueue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001972 """
1973
1974 self.rq.read_workers()
Andrew Geissler82c905d2020-04-13 13:39:40 -05001975 if self.updated_taskhash_queue or self.pending_migrations:
1976 self.process_possible_migrations()
1977
1978 if not hasattr(self, "sorted_setscene_tids"):
1979 # Don't want to sort this set every execution
1980 self.sorted_setscene_tids = sorted(self.rqdata.runq_setscene_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001981
Brad Bishop96ff1982019-08-19 13:50:42 -04001982 task = None
1983 if not self.sqdone and self.can_start_task():
1984 # Find the next setscene to run
Andrew Geissler82c905d2020-04-13 13:39:40 -05001985 for nexttask in self.sorted_setscene_tids:
Brad Bishop96ff1982019-08-19 13:50:42 -04001986 if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values():
1987 if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
1988 if nexttask not in self.rqdata.target_tids:
1989 logger.debug(2, "Skipping setscene for task %s" % nexttask)
1990 self.sq_task_skip(nexttask)
1991 self.scenequeue_notneeded.add(nexttask)
1992 if nexttask in self.sq_deferred:
1993 del self.sq_deferred[nexttask]
1994 return True
Brad Bishop08902b02019-08-20 09:16:51 -04001995 # If covered tasks are running, need to wait for them to complete
1996 for t in self.sqdata.sq_covered_tasks[nexttask]:
1997 if t in self.runq_running and t not in self.runq_complete:
1998 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04001999 if nexttask in self.sq_deferred:
2000 if self.sq_deferred[nexttask] not in self.runq_complete:
2001 continue
2002 logger.debug(1, "Task %s no longer deferred" % nexttask)
2003 del self.sq_deferred[nexttask]
Brad Bishop1d80a2e2019-11-15 16:35:03 -05002004 valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False, summary=False)
Brad Bishop96ff1982019-08-19 13:50:42 -04002005 if not valid:
2006 logger.debug(1, "%s didn't become valid, skipping setscene" % nexttask)
2007 self.sq_task_failoutright(nexttask)
2008 return True
2009 else:
2010 self.sqdata.outrightfail.remove(nexttask)
2011 if nexttask in self.sqdata.outrightfail:
2012 logger.debug(2, 'No package found, so skipping setscene task %s', nexttask)
2013 self.sq_task_failoutright(nexttask)
2014 return True
2015 if nexttask in self.sqdata.unskippable:
2016 logger.debug(2, "Setscene task %s is unskippable" % nexttask)
2017 task = nexttask
2018 break
2019 if task is not None:
2020 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
2021 taskname = taskname + "_setscene"
2022 if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
2023 logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
2024 self.sq_task_failoutright(task)
2025 return True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002026
Brad Bishop96ff1982019-08-19 13:50:42 -04002027 if self.cooker.configuration.force:
2028 if task in self.rqdata.target_tids:
2029 self.sq_task_failoutright(task)
2030 return True
2031
2032 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
2033 logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
2034 self.sq_task_skip(task)
2035 return True
2036
2037 if self.cooker.configuration.skipsetscene:
2038 logger.debug(2, 'No setscene tasks should be executed. Skipping %s', task)
2039 self.sq_task_failoutright(task)
2040 return True
2041
2042 startevent = sceneQueueTaskStarted(task, self.sq_stats, self.rq)
2043 bb.event.fire(startevent, self.cfgData)
2044
2045 taskdepdata = self.sq_build_taskdepdata(task)
2046
2047 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2048 taskhash = self.rqdata.get_task_hash(task)
2049 unihash = self.rqdata.get_task_unihash(task)
2050 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
2051 if not mc in self.rq.fakeworker:
2052 self.rq.start_fakeworker(self, mc)
Andrew Geissler5a43b432020-06-13 10:46:56 -05002053 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Brad Bishop96ff1982019-08-19 13:50:42 -04002054 self.rq.fakeworker[mc].process.stdin.flush()
2055 else:
Andrew Geissler5a43b432020-06-13 10:46:56 -05002056 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Brad Bishop96ff1982019-08-19 13:50:42 -04002057 self.rq.worker[mc].process.stdin.flush()
2058
2059 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2060 self.build_stamps2.append(self.build_stamps[task])
2061 self.sq_running.add(task)
2062 self.sq_live.add(task)
2063 self.sq_stats.taskActive()
2064 if self.can_start_task():
2065 return True
2066
Brad Bishopc68388fc2019-08-26 01:33:31 -04002067 self.update_holdofftasks()
2068
Brad Bishop08902b02019-08-20 09:16:51 -04002069 if not self.sq_live and not self.sqdone and not self.sq_deferred and not self.updated_taskhash_queue and not self.holdoff_tasks:
Andrew Geissler82c905d2020-04-13 13:39:40 -05002070 hashequiv_logger.verbose("Setscene tasks completed")
Brad Bishop96ff1982019-08-19 13:50:42 -04002071
Brad Bishop08902b02019-08-20 09:16:51 -04002072 err = self.summarise_scenequeue_errors()
Brad Bishop96ff1982019-08-19 13:50:42 -04002073 if err:
2074 self.rq.state = runQueueFailed
2075 return True
2076
2077 if self.cooker.configuration.setsceneonly:
2078 self.rq.state = runQueueComplete
2079 return True
2080 self.sqdone = True
2081
2082 if self.stats.total == 0:
2083 # nothing to do
2084 self.rq.state = runQueueComplete
2085 return True
2086
2087 if self.cooker.configuration.setsceneonly:
2088 task = None
2089 else:
2090 task = self.sched.next()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002091 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002092 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002093
Brad Bishop96ff1982019-08-19 13:50:42 -04002094 if self.rqdata.setscenewhitelist is not None:
2095 if self.check_setscenewhitelist(task):
2096 self.task_fail(task, "setscene whitelist")
2097 return True
2098
2099 if task in self.tasks_covered:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002100 logger.debug(2, "Setscene covered task %s", task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002101 self.task_skip(task, "covered")
2102 return True
2103
2104 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002105 logger.debug(2, "Stamp current task %s", task)
2106
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002107 self.task_skip(task, "existing")
Andrew Geissler82c905d2020-04-13 13:39:40 -05002108 self.runq_tasksrun.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002109 return True
2110
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002111 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002112 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2113 startevent = runQueueTaskStarted(task, self.stats, self.rq,
2114 noexec=True)
2115 bb.event.fire(startevent, self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002116 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002117 self.stats.taskActive()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002118 if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002119 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002120 self.task_complete(task)
2121 return True
2122 else:
2123 startevent = runQueueTaskStarted(task, self.stats, self.rq)
2124 bb.event.fire(startevent, self.cfgData)
2125
2126 taskdepdata = self.build_taskdepdata(task)
2127
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002128 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop19323692019-04-05 15:28:33 -04002129 taskhash = self.rqdata.get_task_hash(task)
2130 unihash = self.rqdata.get_task_unihash(task)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002131 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002132 if not mc in self.rq.fakeworker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002133 try:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002134 self.rq.start_fakeworker(self, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002135 except OSError as exc:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002136 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002137 self.rq.state = runQueueFailed
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002138 self.stats.taskFailed()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002139 return True
Andrew Geissler5a43b432020-06-13 10:46:56 -05002140 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002141 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002142 else:
Andrew Geissler5a43b432020-06-13 10:46:56 -05002143 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002144 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002145
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002146 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2147 self.build_stamps2.append(self.build_stamps[task])
2148 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002149 self.stats.taskActive()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002150 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002151 return True
2152
Brad Bishop96ff1982019-08-19 13:50:42 -04002153 if self.stats.active > 0 or self.sq_stats.active > 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002154 self.rq.read_workers()
2155 return self.rq.active_fds()
2156
Brad Bishop96ff1982019-08-19 13:50:42 -04002157 # No more tasks can be run. If we have deferred setscene tasks we should run them.
2158 if self.sq_deferred:
2159 tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0])
2160 logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid)
2161 self.sq_task_failoutright(tid)
2162 return True
2163
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002164 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002165 self.rq.state = runQueueFailed
2166 return True
2167
2168 # Sanity Checks
Brad Bishop08902b02019-08-20 09:16:51 -04002169 err = self.summarise_scenequeue_errors()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002170 for task in self.rqdata.runtaskentries:
2171 if task not in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002172 logger.error("Task %s never buildable!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002173 err = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002174 elif task not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002175 logger.error("Task %s never ran!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002176 err = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002177 elif task not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002178 logger.error("Task %s never completed!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002179 err = True
2180
2181 if err:
2182 self.rq.state = runQueueFailed
2183 else:
2184 self.rq.state = runQueueComplete
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002185
2186 return True
2187
Brad Bishopc68388fc2019-08-26 01:33:31 -04002188 def filtermcdeps(self, task, mc, deps):
Andrew Geissler99467da2019-02-25 18:54:23 -06002189 ret = set()
Andrew Geissler99467da2019-02-25 18:54:23 -06002190 for dep in deps:
Brad Bishopc68388fc2019-08-26 01:33:31 -04002191 thismc = mc_from_tid(dep)
2192 if thismc != mc:
Andrew Geissler99467da2019-02-25 18:54:23 -06002193 continue
2194 ret.add(dep)
2195 return ret
2196
Brad Bishopa34c0302019-09-23 22:34:48 -04002197 # We filter out multiconfig dependencies from taskdepdata we pass to the tasks
Andrew Geissler99467da2019-02-25 18:54:23 -06002198 # as most code can't handle them
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002199 def build_taskdepdata(self, task):
2200 taskdepdata = {}
Brad Bishopc68388fc2019-08-26 01:33:31 -04002201 mc = mc_from_tid(task)
Brad Bishop08902b02019-08-20 09:16:51 -04002202 next = self.rqdata.runtaskentries[task].depends.copy()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002203 next.add(task)
Brad Bishopc68388fc2019-08-26 01:33:31 -04002204 next = self.filtermcdeps(task, mc, next)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002205 while next:
2206 additional = []
2207 for revdep in next:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002208 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2209 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2210 deps = self.rqdata.runtaskentries[revdep].depends
2211 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002212 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002213 unihash = self.rqdata.runtaskentries[revdep].unihash
Brad Bishopc68388fc2019-08-26 01:33:31 -04002214 deps = self.filtermcdeps(task, mc, deps)
Brad Bishop19323692019-04-05 15:28:33 -04002215 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002216 for revdep2 in deps:
2217 if revdep2 not in taskdepdata:
2218 additional.append(revdep2)
2219 next = additional
2220
2221 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2222 return taskdepdata
2223
Brad Bishop08902b02019-08-20 09:16:51 -04002224 def update_holdofftasks(self):
Brad Bishopc68388fc2019-08-26 01:33:31 -04002225
2226 if not self.holdoff_need_update:
2227 return
2228
2229 notcovered = set(self.scenequeue_notcovered)
2230 notcovered |= self.cantskip
2231 for tid in self.scenequeue_notcovered:
2232 notcovered |= self.sqdata.sq_covered_tasks[tid]
2233 notcovered |= self.sqdata.unskippable.difference(self.rqdata.runq_setscene_tids)
2234 notcovered.intersection_update(self.tasks_scenequeue_done)
2235
2236 covered = set(self.scenequeue_covered)
2237 for tid in self.scenequeue_covered:
2238 covered |= self.sqdata.sq_covered_tasks[tid]
2239 covered.difference_update(notcovered)
2240 covered.intersection_update(self.tasks_scenequeue_done)
2241
2242 for tid in notcovered | covered:
2243 if len(self.rqdata.runtaskentries[tid].depends) == 0:
2244 self.setbuildable(tid)
2245 elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete):
2246 self.setbuildable(tid)
2247
2248 self.tasks_covered = covered
2249 self.tasks_notcovered = notcovered
2250
Brad Bishop08902b02019-08-20 09:16:51 -04002251 self.holdoff_tasks = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002252
Brad Bishop08902b02019-08-20 09:16:51 -04002253 for tid in self.rqdata.runq_setscene_tids:
2254 if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
2255 self.holdoff_tasks.add(tid)
2256
2257 for tid in self.holdoff_tasks.copy():
2258 for dep in self.sqdata.sq_covered_tasks[tid]:
2259 if dep not in self.runq_complete:
2260 self.holdoff_tasks.add(dep)
2261
Brad Bishopc68388fc2019-08-26 01:33:31 -04002262 self.holdoff_need_update = False
2263
Brad Bishop08902b02019-08-20 09:16:51 -04002264 def process_possible_migrations(self):
2265
2266 changed = set()
Andrew Geissler82c905d2020-04-13 13:39:40 -05002267 toprocess = set()
Brad Bishop08902b02019-08-20 09:16:51 -04002268 for tid, unihash in self.updated_taskhash_queue.copy():
2269 if tid in self.runq_running and tid not in self.runq_complete:
2270 continue
2271
2272 self.updated_taskhash_queue.remove((tid, unihash))
2273
2274 if unihash != self.rqdata.runtaskentries[tid].unihash:
Andrew Geissler82c905d2020-04-13 13:39:40 -05002275 hashequiv_logger.verbose("Task %s unihash changed to %s" % (tid, unihash))
Brad Bishop08902b02019-08-20 09:16:51 -04002276 self.rqdata.runtaskentries[tid].unihash = unihash
2277 bb.parse.siggen.set_unihash(tid, unihash)
Andrew Geissler82c905d2020-04-13 13:39:40 -05002278 toprocess.add(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04002279
Andrew Geissler82c905d2020-04-13 13:39:40 -05002280 # Work out all tasks which depend upon these
2281 total = set()
2282 next = set()
2283 for p in toprocess:
2284 next |= self.rqdata.runtaskentries[p].revdeps
2285 while next:
2286 current = next.copy()
2287 total = total | next
2288 next = set()
2289 for ntid in current:
2290 next |= self.rqdata.runtaskentries[ntid].revdeps
2291 next.difference_update(total)
Brad Bishop08902b02019-08-20 09:16:51 -04002292
Andrew Geissler82c905d2020-04-13 13:39:40 -05002293 # Now iterate those tasks in dependency order to regenerate their taskhash/unihash
2294 next = set()
2295 for p in total:
2296 if len(self.rqdata.runtaskentries[p].depends) == 0:
2297 next.add(p)
2298 elif self.rqdata.runtaskentries[p].depends.isdisjoint(total):
2299 next.add(p)
2300
2301 # When an item doesn't have dependencies in total, we can process it. Drop items from total when handled
2302 while next:
2303 current = next.copy()
2304 next = set()
2305 for tid in current:
2306 if len(self.rqdata.runtaskentries[p].depends) and not self.rqdata.runtaskentries[tid].depends.isdisjoint(total):
2307 continue
2308 orighash = self.rqdata.runtaskentries[tid].hash
Andrew Geissler5a43b432020-06-13 10:46:56 -05002309 dc = bb.parse.siggen.get_data_caches(self.rqdata.dataCaches, mc_from_tid(tid))
2310 newhash = bb.parse.siggen.get_taskhash(tid, self.rqdata.runtaskentries[tid].depends, dc)
Andrew Geissler82c905d2020-04-13 13:39:40 -05002311 origuni = self.rqdata.runtaskentries[tid].unihash
2312 newuni = bb.parse.siggen.get_unihash(tid)
2313 # FIXME, need to check it can come from sstate at all for determinism?
2314 remapped = False
2315 if newuni == origuni:
2316 # Nothing to do, we match, skip code below
2317 remapped = True
2318 elif tid in self.scenequeue_covered or tid in self.sq_live:
2319 # Already ran this setscene task or it running. Report the new taskhash
2320 bb.parse.siggen.report_unihash_equiv(tid, newhash, origuni, newuni, self.rqdata.dataCaches)
2321 hashequiv_logger.verbose("Already covered setscene for %s so ignoring rehash (remap)" % (tid))
2322 remapped = True
2323
2324 if not remapped:
2325 #logger.debug(1, "Task %s hash changes: %s->%s %s->%s" % (tid, orighash, newhash, origuni, newuni))
2326 self.rqdata.runtaskentries[tid].hash = newhash
2327 self.rqdata.runtaskentries[tid].unihash = newuni
2328 changed.add(tid)
2329
2330 next |= self.rqdata.runtaskentries[tid].revdeps
2331 total.remove(tid)
2332 next.intersection_update(total)
Brad Bishop08902b02019-08-20 09:16:51 -04002333
2334 if changed:
2335 for mc in self.rq.worker:
2336 self.rq.worker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
2337 for mc in self.rq.fakeworker:
2338 self.rq.fakeworker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
2339
Andrew Geissler82c905d2020-04-13 13:39:40 -05002340 hashequiv_logger.debug(1, pprint.pformat("Tasks changed:\n%s" % (changed)))
Brad Bishop08902b02019-08-20 09:16:51 -04002341
2342 for tid in changed:
2343 if tid not in self.rqdata.runq_setscene_tids:
2344 continue
Brad Bishop08902b02019-08-20 09:16:51 -04002345 if tid not in self.pending_migrations:
2346 self.pending_migrations.add(tid)
2347
Andrew Geissler82c905d2020-04-13 13:39:40 -05002348 update_tasks = []
Brad Bishop08902b02019-08-20 09:16:51 -04002349 for tid in self.pending_migrations.copy():
Andrew Geissler82c905d2020-04-13 13:39:40 -05002350 if tid in self.runq_running or tid in self.sq_live:
Brad Bishop6dbb3162019-11-25 09:41:34 -05002351 # Too late, task already running, not much we can do now
2352 self.pending_migrations.remove(tid)
2353 continue
2354
Brad Bishop08902b02019-08-20 09:16:51 -04002355 valid = True
2356 # Check no tasks this covers are running
2357 for dep in self.sqdata.sq_covered_tasks[tid]:
2358 if dep in self.runq_running and dep not in self.runq_complete:
Andrew Geissler82c905d2020-04-13 13:39:40 -05002359 hashequiv_logger.debug(2, "Task %s is running which blocks setscene for %s from running" % (dep, tid))
Brad Bishop08902b02019-08-20 09:16:51 -04002360 valid = False
2361 break
2362 if not valid:
2363 continue
2364
2365 self.pending_migrations.remove(tid)
Brad Bishopc68388fc2019-08-26 01:33:31 -04002366 changed = True
Brad Bishop08902b02019-08-20 09:16:51 -04002367
2368 if tid in self.tasks_scenequeue_done:
2369 self.tasks_scenequeue_done.remove(tid)
2370 for dep in self.sqdata.sq_covered_tasks[tid]:
Andrew Geissler82c905d2020-04-13 13:39:40 -05002371 if dep in self.runq_complete and dep not in self.runq_tasksrun:
2372 bb.error("Task %s marked as completed but now needing to rerun? Aborting build." % dep)
2373 self.failed_tids.append(tid)
2374 self.rq.state = runQueueCleanUp
2375 return
2376
Brad Bishop08902b02019-08-20 09:16:51 -04002377 if dep not in self.runq_complete:
2378 if dep in self.tasks_scenequeue_done and dep not in self.sqdata.unskippable:
2379 self.tasks_scenequeue_done.remove(dep)
2380
2381 if tid in self.sq_buildable:
2382 self.sq_buildable.remove(tid)
2383 if tid in self.sq_running:
2384 self.sq_running.remove(tid)
Andrew Geissler82c905d2020-04-13 13:39:40 -05002385 harddepfail = False
2386 for t in self.sqdata.sq_harddeps:
2387 if tid in self.sqdata.sq_harddeps[t] and t in self.scenequeue_notcovered:
2388 harddepfail = True
2389 break
2390 if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
Brad Bishop08902b02019-08-20 09:16:51 -04002391 if tid not in self.sq_buildable:
2392 self.sq_buildable.add(tid)
2393 if len(self.sqdata.sq_revdeps[tid]) == 0:
2394 self.sq_buildable.add(tid)
2395
2396 if tid in self.sqdata.outrightfail:
2397 self.sqdata.outrightfail.remove(tid)
2398 if tid in self.scenequeue_notcovered:
2399 self.scenequeue_notcovered.remove(tid)
2400 if tid in self.scenequeue_covered:
2401 self.scenequeue_covered.remove(tid)
2402 if tid in self.scenequeue_notneeded:
2403 self.scenequeue_notneeded.remove(tid)
2404
2405 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2406 self.sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
2407
2408 if tid in self.stampcache:
2409 del self.stampcache[tid]
2410
2411 if tid in self.build_stamps:
2412 del self.build_stamps[tid]
2413
Andrew Geissler82c905d2020-04-13 13:39:40 -05002414 update_tasks.append((tid, harddepfail, tid in self.sqdata.valid))
2415
2416 if update_tasks:
Brad Bishop08902b02019-08-20 09:16:51 -04002417 self.sqdone = False
Andrew Geissler82c905d2020-04-13 13:39:40 -05002418 update_scenequeue_data([t[0] for t in update_tasks], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
2419
2420 for (tid, harddepfail, origvalid) in update_tasks:
Brad Bishop1d80a2e2019-11-15 16:35:03 -05002421 if tid in self.sqdata.valid and not origvalid:
Andrew Geissler82c905d2020-04-13 13:39:40 -05002422 hashequiv_logger.verbose("Setscene task %s became valid" % tid)
2423 if harddepfail:
2424 self.sq_task_failoutright(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04002425
2426 if changed:
Brad Bishopc68388fc2019-08-26 01:33:31 -04002427 self.holdoff_need_update = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002428
Brad Bishop96ff1982019-08-19 13:50:42 -04002429 def scenequeue_updatecounters(self, task, fail=False):
Brad Bishop08902b02019-08-20 09:16:51 -04002430
2431 for dep in sorted(self.sqdata.sq_deps[task]):
Brad Bishop96ff1982019-08-19 13:50:42 -04002432 if fail and task in self.sqdata.sq_harddeps and dep in self.sqdata.sq_harddeps[task]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002433 logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
Brad Bishop96ff1982019-08-19 13:50:42 -04002434 self.sq_task_failoutright(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002435 continue
Brad Bishop08902b02019-08-20 09:16:51 -04002436 if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
2437 if dep not in self.sq_buildable:
2438 self.sq_buildable.add(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002439
Brad Bishop96ff1982019-08-19 13:50:42 -04002440 next = set([task])
2441 while next:
2442 new = set()
Brad Bishop08902b02019-08-20 09:16:51 -04002443 for t in sorted(next):
Brad Bishop96ff1982019-08-19 13:50:42 -04002444 self.tasks_scenequeue_done.add(t)
2445 # Look down the dependency chain for non-setscene things which this task depends on
2446 # and mark as 'done'
2447 for dep in self.rqdata.runtaskentries[t].depends:
2448 if dep in self.rqdata.runq_setscene_tids or dep in self.tasks_scenequeue_done:
2449 continue
2450 if self.rqdata.runtaskentries[dep].revdeps.issubset(self.tasks_scenequeue_done):
2451 new.add(dep)
Brad Bishop96ff1982019-08-19 13:50:42 -04002452 next = new
2453
Brad Bishopc68388fc2019-08-26 01:33:31 -04002454 self.holdoff_need_update = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002455
2456 def sq_task_completeoutright(self, task):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002457 """
2458 Mark a task as completed
2459 Look at the reverse dependencies and mark any task with
2460 completed dependencies as buildable
2461 """
2462
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002463 logger.debug(1, 'Found task %s which could be accelerated', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002464 self.scenequeue_covered.add(task)
2465 self.scenequeue_updatecounters(task)
2466
Brad Bishop96ff1982019-08-19 13:50:42 -04002467 def sq_check_taskfail(self, task):
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002468 if self.rqdata.setscenewhitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002469 realtask = task.split('_setscene')[0]
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002470 (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
2471 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002472 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2473 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2474 self.rq.state = runQueueCleanUp
2475
Brad Bishop96ff1982019-08-19 13:50:42 -04002476 def sq_task_complete(self, task):
2477 self.sq_stats.taskCompleted()
2478 bb.event.fire(sceneQueueTaskCompleted(task, self.sq_stats, self.rq), self.cfgData)
2479 self.sq_task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002480
Brad Bishop96ff1982019-08-19 13:50:42 -04002481 def sq_task_fail(self, task, result):
2482 self.sq_stats.taskFailed()
2483 bb.event.fire(sceneQueueTaskFailed(task, self.sq_stats, result, self), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002484 self.scenequeue_notcovered.add(task)
2485 self.scenequeue_updatecounters(task, True)
Brad Bishop96ff1982019-08-19 13:50:42 -04002486 self.sq_check_taskfail(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002487
Brad Bishop96ff1982019-08-19 13:50:42 -04002488 def sq_task_failoutright(self, task):
2489 self.sq_running.add(task)
2490 self.sq_buildable.add(task)
2491 self.sq_stats.taskSkipped()
2492 self.sq_stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002493 self.scenequeue_notcovered.add(task)
2494 self.scenequeue_updatecounters(task, True)
2495
Brad Bishop96ff1982019-08-19 13:50:42 -04002496 def sq_task_skip(self, task):
2497 self.sq_running.add(task)
2498 self.sq_buildable.add(task)
2499 self.sq_task_completeoutright(task)
2500 self.sq_stats.taskSkipped()
2501 self.sq_stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002502
Brad Bishop96ff1982019-08-19 13:50:42 -04002503 def sq_build_taskdepdata(self, task):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002504 def getsetscenedeps(tid):
2505 deps = set()
2506 (mc, fn, taskname, _) = split_tid_mcfn(tid)
2507 realtid = tid + "_setscene"
2508 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
2509 for (depname, idependtask) in idepends:
2510 if depname not in self.rqdata.taskData[mc].build_targets:
2511 continue
2512
2513 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2514 if depfn is None:
2515 continue
2516 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2517 deps.add(deptid)
2518 return deps
2519
2520 taskdepdata = {}
2521 next = getsetscenedeps(task)
2522 next.add(task)
2523 while next:
2524 additional = []
2525 for revdep in next:
2526 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2527 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2528 deps = getsetscenedeps(revdep)
2529 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2530 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002531 unihash = self.rqdata.runtaskentries[revdep].unihash
2532 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002533 for revdep2 in deps:
2534 if revdep2 not in taskdepdata:
2535 additional.append(revdep2)
2536 next = additional
2537
2538 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2539 return taskdepdata
2540
Brad Bishop96ff1982019-08-19 13:50:42 -04002541 def check_setscenewhitelist(self, tid):
2542 # Check task that is going to run against the whitelist
2543 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2544 # Ignore covered tasks
2545 if tid in self.tasks_covered:
2546 return False
2547 # Ignore stamped tasks
2548 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
2549 return False
2550 # Ignore noexec tasks
2551 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2552 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2553 return False
2554
2555 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2556 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2557 if tid in self.rqdata.runq_setscene_tids:
2558 msg = 'Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)
2559 else:
2560 msg = 'Task %s.%s attempted to execute unexpectedly' % (pn, taskname)
Andrew Geissler82c905d2020-04-13 13:39:40 -05002561 for t in self.scenequeue_notcovered:
2562 msg = msg + "\nTask %s, unihash %s, taskhash %s" % (t, self.rqdata.runtaskentries[t].unihash, self.rqdata.runtaskentries[t].hash)
Brad Bishop96ff1982019-08-19 13:50:42 -04002563 logger.error(msg + '\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered))
2564 return True
2565 return False
2566
2567class SQData(object):
2568 def __init__(self):
2569 # SceneQueue dependencies
2570 self.sq_deps = {}
2571 # SceneQueue reverse dependencies
2572 self.sq_revdeps = {}
Brad Bishop96ff1982019-08-19 13:50:42 -04002573 # Injected inter-setscene task dependencies
2574 self.sq_harddeps = {}
2575 # Cache of stamp files so duplicates can't run in parallel
2576 self.stamps = {}
2577 # Setscene tasks directly depended upon by the build
2578 self.unskippable = set()
2579 # List of setscene tasks which aren't present
2580 self.outrightfail = set()
2581 # A list of normal tasks a setscene task covers
2582 self.sq_covered_tasks = {}
2583
2584def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
2585
2586 sq_revdeps = {}
2587 sq_revdeps_squash = {}
2588 sq_collated_deps = {}
2589
2590 # We need to construct a dependency graph for the setscene functions. Intermediate
2591 # dependencies between the setscene tasks only complicate the code. This code
2592 # therefore aims to collapse the huge runqueue dependency tree into a smaller one
2593 # only containing the setscene functions.
2594
2595 rqdata.init_progress_reporter.next_stage()
2596
2597 # First process the chains up to the first setscene task.
2598 endpoints = {}
2599 for tid in rqdata.runtaskentries:
2600 sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps)
2601 sq_revdeps_squash[tid] = set()
2602 if (len(sq_revdeps[tid]) == 0) and tid not in rqdata.runq_setscene_tids:
2603 #bb.warn("Added endpoint %s" % (tid))
2604 endpoints[tid] = set()
2605
2606 rqdata.init_progress_reporter.next_stage()
2607
2608 # Secondly process the chains between setscene tasks.
2609 for tid in rqdata.runq_setscene_tids:
2610 sq_collated_deps[tid] = set()
2611 #bb.warn("Added endpoint 2 %s" % (tid))
2612 for dep in rqdata.runtaskentries[tid].depends:
2613 if tid in sq_revdeps[dep]:
2614 sq_revdeps[dep].remove(tid)
2615 if dep not in endpoints:
2616 endpoints[dep] = set()
2617 #bb.warn(" Added endpoint 3 %s" % (dep))
2618 endpoints[dep].add(tid)
2619
2620 rqdata.init_progress_reporter.next_stage()
2621
2622 def process_endpoints(endpoints):
2623 newendpoints = {}
2624 for point, task in endpoints.items():
2625 tasks = set()
2626 if task:
2627 tasks |= task
2628 if sq_revdeps_squash[point]:
2629 tasks |= sq_revdeps_squash[point]
2630 if point not in rqdata.runq_setscene_tids:
2631 for t in tasks:
2632 sq_collated_deps[t].add(point)
2633 sq_revdeps_squash[point] = set()
2634 if point in rqdata.runq_setscene_tids:
2635 sq_revdeps_squash[point] = tasks
2636 tasks = set()
2637 continue
2638 for dep in rqdata.runtaskentries[point].depends:
2639 if point in sq_revdeps[dep]:
2640 sq_revdeps[dep].remove(point)
2641 if tasks:
2642 sq_revdeps_squash[dep] |= tasks
2643 if len(sq_revdeps[dep]) == 0 and dep not in rqdata.runq_setscene_tids:
2644 newendpoints[dep] = task
2645 if len(newendpoints) != 0:
2646 process_endpoints(newendpoints)
2647
2648 process_endpoints(endpoints)
2649
2650 rqdata.init_progress_reporter.next_stage()
2651
Brad Bishop08902b02019-08-20 09:16:51 -04002652 # Build a list of tasks which are "unskippable"
2653 # These are direct endpoints referenced by the build upto and including setscene tasks
Brad Bishop96ff1982019-08-19 13:50:42 -04002654 # Take the build endpoints (no revdeps) and find the sstate tasks they depend upon
2655 new = True
2656 for tid in rqdata.runtaskentries:
2657 if len(rqdata.runtaskentries[tid].revdeps) == 0:
2658 sqdata.unskippable.add(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04002659 sqdata.unskippable |= sqrq.cantskip
Brad Bishop96ff1982019-08-19 13:50:42 -04002660 while new:
2661 new = False
Brad Bishop08902b02019-08-20 09:16:51 -04002662 orig = sqdata.unskippable.copy()
2663 for tid in sorted(orig, reverse=True):
Brad Bishop96ff1982019-08-19 13:50:42 -04002664 if tid in rqdata.runq_setscene_tids:
2665 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04002666 if len(rqdata.runtaskentries[tid].depends) == 0:
2667 # These are tasks which have no setscene tasks in their chain, need to mark as directly buildable
Brad Bishop96ff1982019-08-19 13:50:42 -04002668 sqrq.setbuildable(tid)
Brad Bishop96ff1982019-08-19 13:50:42 -04002669 sqdata.unskippable |= rqdata.runtaskentries[tid].depends
Brad Bishop08902b02019-08-20 09:16:51 -04002670 if sqdata.unskippable != orig:
2671 new = True
2672
2673 sqrq.tasks_scenequeue_done |= sqdata.unskippable.difference(rqdata.runq_setscene_tids)
Brad Bishop96ff1982019-08-19 13:50:42 -04002674
2675 rqdata.init_progress_reporter.next_stage(len(rqdata.runtaskentries))
2676
2677 # Sanity check all dependencies could be changed to setscene task references
2678 for taskcounter, tid in enumerate(rqdata.runtaskentries):
2679 if tid in rqdata.runq_setscene_tids:
2680 pass
2681 elif len(sq_revdeps_squash[tid]) != 0:
2682 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
2683 else:
2684 del sq_revdeps_squash[tid]
2685 rqdata.init_progress_reporter.update(taskcounter)
2686
2687 rqdata.init_progress_reporter.next_stage()
2688
2689 # Resolve setscene inter-task dependencies
2690 # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
2691 # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
2692 for tid in rqdata.runq_setscene_tids:
2693 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2694 realtid = tid + "_setscene"
2695 idepends = rqdata.taskData[mc].taskentries[realtid].idepends
2696 sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", rqdata.dataCaches[mc], taskfn, noextra=True)
2697 for (depname, idependtask) in idepends:
2698
2699 if depname not in rqdata.taskData[mc].build_targets:
2700 continue
2701
2702 depfn = rqdata.taskData[mc].build_targets[depname][0]
2703 if depfn is None:
2704 continue
2705 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2706 if deptid not in rqdata.runtaskentries:
2707 bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
2708
2709 if not deptid in sqdata.sq_harddeps:
2710 sqdata.sq_harddeps[deptid] = set()
2711 sqdata.sq_harddeps[deptid].add(tid)
2712
2713 sq_revdeps_squash[tid].add(deptid)
2714 # Have to zero this to avoid circular dependencies
2715 sq_revdeps_squash[deptid] = set()
2716
2717 rqdata.init_progress_reporter.next_stage()
2718
2719 for task in sqdata.sq_harddeps:
2720 for dep in sqdata.sq_harddeps[task]:
2721 sq_revdeps_squash[dep].add(task)
2722
2723 rqdata.init_progress_reporter.next_stage()
2724
2725 #for tid in sq_revdeps_squash:
2726 # data = ""
2727 # for dep in sq_revdeps_squash[tid]:
2728 # data = data + "\n %s" % dep
2729 # bb.warn("Task %s_setscene: is %s " % (tid, data))
2730
2731 sqdata.sq_revdeps = sq_revdeps_squash
Brad Bishop96ff1982019-08-19 13:50:42 -04002732 sqdata.sq_covered_tasks = sq_collated_deps
2733
2734 # Build reverse version of revdeps to populate deps structure
2735 for tid in sqdata.sq_revdeps:
2736 sqdata.sq_deps[tid] = set()
2737 for tid in sqdata.sq_revdeps:
2738 for dep in sqdata.sq_revdeps[tid]:
2739 sqdata.sq_deps[dep].add(tid)
2740
2741 rqdata.init_progress_reporter.next_stage()
2742
Brad Bishop00e122a2019-10-05 11:10:57 -04002743 sqdata.multiconfigs = set()
Brad Bishop96ff1982019-08-19 13:50:42 -04002744 for tid in sqdata.sq_revdeps:
Brad Bishop00e122a2019-10-05 11:10:57 -04002745 sqdata.multiconfigs.add(mc_from_tid(tid))
Brad Bishop96ff1982019-08-19 13:50:42 -04002746 if len(sqdata.sq_revdeps[tid]) == 0:
2747 sqrq.sq_buildable.add(tid)
2748
2749 rqdata.init_progress_reporter.finish()
2750
Brad Bishop00e122a2019-10-05 11:10:57 -04002751 sqdata.noexec = set()
2752 sqdata.stamppresent = set()
2753 sqdata.valid = set()
Brad Bishop96ff1982019-08-19 13:50:42 -04002754
Brad Bishop1d80a2e2019-11-15 16:35:03 -05002755 update_scenequeue_data(sqdata.sq_revdeps, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True)
Brad Bishop00e122a2019-10-05 11:10:57 -04002756
Brad Bishop1d80a2e2019-11-15 16:35:03 -05002757def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True):
Brad Bishop00e122a2019-10-05 11:10:57 -04002758
2759 tocheck = set()
2760
2761 for tid in sorted(tids):
2762 if tid in sqdata.stamppresent:
2763 sqdata.stamppresent.remove(tid)
2764 if tid in sqdata.valid:
2765 sqdata.valid.remove(tid)
2766
2767 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2768
2769 taskdep = rqdata.dataCaches[mc].task_deps[taskfn]
2770
2771 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2772 sqdata.noexec.add(tid)
2773 sqrq.sq_task_skip(tid)
2774 bb.build.make_stamp(taskname + "_setscene", rqdata.dataCaches[mc], taskfn)
2775 continue
2776
2777 if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache):
2778 logger.debug(2, 'Setscene stamp current for task %s', tid)
2779 sqdata.stamppresent.add(tid)
2780 sqrq.sq_task_skip(tid)
2781 continue
2782
2783 if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache):
2784 logger.debug(2, 'Normal stamp current for task %s', tid)
2785 sqdata.stamppresent.add(tid)
2786 sqrq.sq_task_skip(tid)
2787 continue
2788
2789 tocheck.add(tid)
2790
Brad Bishop1d80a2e2019-11-15 16:35:03 -05002791 sqdata.valid |= rq.validate_hashes(tocheck, cooker.data, len(sqdata.stamppresent), False, summary=summary)
Brad Bishop00e122a2019-10-05 11:10:57 -04002792
2793 sqdata.hashes = {}
2794 for mc in sorted(sqdata.multiconfigs):
Brad Bishop08902b02019-08-20 09:16:51 -04002795 for tid in sorted(sqdata.sq_revdeps):
Brad Bishop96ff1982019-08-19 13:50:42 -04002796 if mc_from_tid(tid) != mc:
2797 continue
Brad Bishop00e122a2019-10-05 11:10:57 -04002798 if tid in sqdata.stamppresent:
2799 continue
2800 if tid in sqdata.valid:
2801 continue
2802 if tid in sqdata.noexec:
2803 continue
2804 if tid in sqrq.scenequeue_notcovered:
2805 continue
2806 sqdata.outrightfail.add(tid)
Brad Bishop96ff1982019-08-19 13:50:42 -04002807
Brad Bishop00e122a2019-10-05 11:10:57 -04002808 h = pending_hash_index(tid, rqdata)
2809 if h not in sqdata.hashes:
2810 sqdata.hashes[h] = tid
2811 else:
2812 sqrq.sq_deferred[tid] = sqdata.hashes[h]
Andrew Geissler82c905d2020-04-13 13:39:40 -05002813 bb.note("Deferring %s after %s" % (tid, sqdata.hashes[h]))
Brad Bishop96ff1982019-08-19 13:50:42 -04002814
2815
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002816class TaskFailure(Exception):
2817 """
2818 Exception raised when a task in a runqueue fails
2819 """
2820 def __init__(self, x):
2821 self.args = x
2822
2823
2824class runQueueExitWait(bb.event.Event):
2825 """
2826 Event when waiting for task processes to exit
2827 """
2828
2829 def __init__(self, remain):
2830 self.remain = remain
2831 self.message = "Waiting for %s active tasks to finish" % remain
2832 bb.event.Event.__init__(self)
2833
2834class runQueueEvent(bb.event.Event):
2835 """
2836 Base runQueue event class
2837 """
2838 def __init__(self, task, stats, rq):
2839 self.taskid = task
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002840 self.taskstring = task
2841 self.taskname = taskname_from_tid(task)
2842 self.taskfile = fn_from_tid(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002843 self.taskhash = rq.rqdata.get_task_hash(task)
2844 self.stats = stats.copy()
2845 bb.event.Event.__init__(self)
2846
2847class sceneQueueEvent(runQueueEvent):
2848 """
2849 Base sceneQueue event class
2850 """
2851 def __init__(self, task, stats, rq, noexec=False):
2852 runQueueEvent.__init__(self, task, stats, rq)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002853 self.taskstring = task + "_setscene"
2854 self.taskname = taskname_from_tid(task) + "_setscene"
2855 self.taskfile = fn_from_tid(task)
2856 self.taskhash = rq.rqdata.get_task_hash(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002857
2858class runQueueTaskStarted(runQueueEvent):
2859 """
2860 Event notifying a task was started
2861 """
2862 def __init__(self, task, stats, rq, noexec=False):
2863 runQueueEvent.__init__(self, task, stats, rq)
2864 self.noexec = noexec
2865
2866class sceneQueueTaskStarted(sceneQueueEvent):
2867 """
2868 Event notifying a setscene task was started
2869 """
2870 def __init__(self, task, stats, rq, noexec=False):
2871 sceneQueueEvent.__init__(self, task, stats, rq)
2872 self.noexec = noexec
2873
2874class runQueueTaskFailed(runQueueEvent):
2875 """
2876 Event notifying a task failed
2877 """
2878 def __init__(self, task, stats, exitcode, rq):
2879 runQueueEvent.__init__(self, task, stats, rq)
2880 self.exitcode = exitcode
2881
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002882 def __str__(self):
2883 return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
2884
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002885class sceneQueueTaskFailed(sceneQueueEvent):
2886 """
2887 Event notifying a setscene task failed
2888 """
2889 def __init__(self, task, stats, exitcode, rq):
2890 sceneQueueEvent.__init__(self, task, stats, rq)
2891 self.exitcode = exitcode
2892
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002893 def __str__(self):
2894 return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
2895
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002896class sceneQueueComplete(sceneQueueEvent):
2897 """
2898 Event when all the sceneQueue tasks are complete
2899 """
2900 def __init__(self, stats, rq):
2901 self.stats = stats.copy()
2902 bb.event.Event.__init__(self)
2903
2904class runQueueTaskCompleted(runQueueEvent):
2905 """
2906 Event notifying a task completed
2907 """
2908
2909class sceneQueueTaskCompleted(sceneQueueEvent):
2910 """
2911 Event notifying a setscene task completed
2912 """
2913
2914class runQueueTaskSkipped(runQueueEvent):
2915 """
2916 Event notifying a task was skipped
2917 """
2918 def __init__(self, task, stats, rq, reason):
2919 runQueueEvent.__init__(self, task, stats, rq)
2920 self.reason = reason
2921
Brad Bishop08902b02019-08-20 09:16:51 -04002922class taskUniHashUpdate(bb.event.Event):
2923 """
2924 Base runQueue event class
2925 """
2926 def __init__(self, task, unihash):
2927 self.taskid = task
2928 self.unihash = unihash
2929 bb.event.Event.__init__(self)
2930
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002931class runQueuePipe():
2932 """
2933 Abstraction for a pipe between a worker thread and the server
2934 """
2935 def __init__(self, pipein, pipeout, d, rq, rqexec):
2936 self.input = pipein
2937 if pipeout:
2938 pipeout.close()
2939 bb.utils.nonblockingfd(self.input)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002940 self.queue = b""
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002941 self.d = d
2942 self.rq = rq
2943 self.rqexec = rqexec
2944
2945 def setrunqueueexec(self, rqexec):
2946 self.rqexec = rqexec
2947
2948 def read(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002949 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
2950 for worker in workers.values():
2951 worker.process.poll()
2952 if worker.process.returncode is not None and not self.rq.teardown:
2953 bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
2954 self.rq.finish_runqueue(True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002955
2956 start = len(self.queue)
2957 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002958 self.queue = self.queue + (self.input.read(102400) or b"")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002959 except (OSError, IOError) as e:
2960 if e.errno != errno.EAGAIN:
2961 raise
2962 end = len(self.queue)
2963 found = True
2964 while found and len(self.queue):
2965 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002966 index = self.queue.find(b"</event>")
2967 while index != -1 and self.queue.startswith(b"<event>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002968 try:
2969 event = pickle.loads(self.queue[7:index])
Andrew Geissler475cb722020-07-10 16:00:51 -05002970 except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e:
2971 if isinstance(e, pickle.UnpicklingError) and "truncated" in str(e):
2972 # The pickled data could contain "</event>" so search for the next occurance
2973 # unpickling again, this should be the only way an unpickle error could occur
2974 index = self.queue.find(b"</event>", index + 1)
2975 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002976 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
2977 bb.event.fire_from_worker(event, self.d)
Brad Bishop08902b02019-08-20 09:16:51 -04002978 if isinstance(event, taskUniHashUpdate):
2979 self.rqexec.updated_taskhash_queue.append((event.taskid, event.unihash))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002980 found = True
2981 self.queue = self.queue[index+8:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002982 index = self.queue.find(b"</event>")
2983 index = self.queue.find(b"</exitcode>")
2984 while index != -1 and self.queue.startswith(b"<exitcode>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002985 try:
2986 task, status = pickle.loads(self.queue[10:index])
Andrew Geissler475cb722020-07-10 16:00:51 -05002987 except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002988 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
2989 self.rqexec.runqueue_process_waitpid(task, status)
2990 found = True
2991 self.queue = self.queue[index+11:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002992 index = self.queue.find(b"</exitcode>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002993 return (end > start)
2994
2995 def close(self):
2996 while self.read():
2997 continue
2998 if len(self.queue) > 0:
2999 print("Warning, worker left partial message: %s" % self.queue)
3000 self.input.close()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003001
3002def get_setscene_enforce_whitelist(d):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05003003 if d.getVar('BB_SETSCENE_ENFORCE') != '1':
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003004 return None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05003005 whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003006 outlist = []
3007 for item in whitelist[:]:
3008 if item.startswith('%:'):
3009 for target in sys.argv[1:]:
3010 if not target.startswith('-'):
3011 outlist.append(target.split(':')[0] + ':' + item.split(':')[1])
3012 else:
3013 outlist.append(item)
3014 return outlist
3015
3016def check_setscene_enforce_whitelist(pn, taskname, whitelist):
3017 import fnmatch
Brad Bishopd7bf8c12018-02-25 22:55:05 -05003018 if whitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003019 item = '%s:%s' % (pn, taskname)
3020 for whitelist_item in whitelist:
3021 if fnmatch.fnmatch(item, whitelist_item):
3022 return True
3023 return False
3024 return True