blob: cd56a55472c1a8f30872f585835ea9f45360c927 [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001"""
2BitBake 'RunQueue' implementation
3
4Handles preparation and execution of a queue of tasks
5"""
6
7# Copyright (C) 2006-2007 Richard Purdie
8#
Brad Bishopc342db32019-05-15 21:57:59 -04009# SPDX-License-Identifier: GPL-2.0-only
Patrick Williamsc124f4f2015-09-15 14:41:29 -050010#
Patrick Williamsc124f4f2015-09-15 14:41:29 -050011
12import copy
13import os
14import sys
Patrick Williamsc124f4f2015-09-15 14:41:29 -050015import stat
Patrick Williamsc124f4f2015-09-15 14:41:29 -050016import errno
17import logging
18import re
19import bb
Andrew Geissler82c905d2020-04-13 13:39:40 -050020from bb import msg, event
Patrick Williamsc124f4f2015-09-15 14:41:29 -050021from bb import monitordisk
22import subprocess
Patrick Williamsc0f7c042017-02-23 20:41:17 -060023import pickle
Brad Bishop6e60e8b2018-02-01 10:27:11 -050024from multiprocessing import Process
Brad Bishop19323692019-04-05 15:28:33 -040025import shlex
Brad Bishop96ff1982019-08-19 13:50:42 -040026import pprint
Patrick Williamsc124f4f2015-09-15 14:41:29 -050027
28bblogger = logging.getLogger("BitBake")
29logger = logging.getLogger("BitBake.RunQueue")
Andrew Geissler82c905d2020-04-13 13:39:40 -050030hashequiv_logger = logging.getLogger("BitBake.RunQueue.HashEquiv")
Patrick Williamsc124f4f2015-09-15 14:41:29 -050031
Brad Bishop19323692019-04-05 15:28:33 -040032__find_sha256__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{64}(?![a-z0-9])' )
Patrick Williamsc124f4f2015-09-15 14:41:29 -050033
Patrick Williamsc0f7c042017-02-23 20:41:17 -060034def fn_from_tid(tid):
35 return tid.rsplit(":", 1)[0]
36
37def taskname_from_tid(tid):
38 return tid.rsplit(":", 1)[1]
39
Andrew Geissler99467da2019-02-25 18:54:23 -060040def mc_from_tid(tid):
Andrew Geisslerd1e89492021-02-12 15:35:20 -060041 if tid.startswith('mc:') and tid.count(':') >= 2:
Andrew Geissler99467da2019-02-25 18:54:23 -060042 return tid.split(':')[1]
43 return ""
44
Patrick Williamsc0f7c042017-02-23 20:41:17 -060045def split_tid(tid):
46 (mc, fn, taskname, _) = split_tid_mcfn(tid)
47 return (mc, fn, taskname)
48
Andrew Geissler5a43b432020-06-13 10:46:56 -050049def split_mc(n):
Andrew Geisslerd1e89492021-02-12 15:35:20 -060050 if n.startswith("mc:") and n.count(':') >= 2:
Andrew Geissler5a43b432020-06-13 10:46:56 -050051 _, mc, n = n.split(":", 2)
52 return (mc, n)
53 return ('', n)
54
Patrick Williamsc0f7c042017-02-23 20:41:17 -060055def split_tid_mcfn(tid):
Andrew Geisslerd1e89492021-02-12 15:35:20 -060056 if tid.startswith('mc:') and tid.count(':') >= 2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -060057 elems = tid.split(':')
58 mc = elems[1]
59 fn = ":".join(elems[2:-1])
60 taskname = elems[-1]
Brad Bishop15ae2502019-06-18 21:44:24 -040061 mcfn = "mc:" + mc + ":" + fn
Patrick Williamsc0f7c042017-02-23 20:41:17 -060062 else:
63 tid = tid.rsplit(":", 1)
64 mc = ""
65 fn = tid[0]
66 taskname = tid[1]
67 mcfn = fn
68
69 return (mc, fn, taskname, mcfn)
70
71def build_tid(mc, fn, taskname):
72 if mc:
Brad Bishop15ae2502019-06-18 21:44:24 -040073 return "mc:" + mc + ":" + fn + ":" + taskname
Patrick Williamsc0f7c042017-02-23 20:41:17 -060074 return fn + ":" + taskname
75
Brad Bishop96ff1982019-08-19 13:50:42 -040076# Index used to pair up potentially matching multiconfig tasks
77# We match on PN, taskname and hash being equal
78def pending_hash_index(tid, rqdata):
79 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
80 pn = rqdata.dataCaches[mc].pkg_fn[taskfn]
Brad Bishop00e122a2019-10-05 11:10:57 -040081 h = rqdata.runtaskentries[tid].unihash
Brad Bishop96ff1982019-08-19 13:50:42 -040082 return pn + ":" + "taskname" + h
83
Patrick Williamsc124f4f2015-09-15 14:41:29 -050084class RunQueueStats:
85 """
86 Holds statistics on the tasks handled by the associated runQueue
87 """
88 def __init__(self, total):
89 self.completed = 0
90 self.skipped = 0
91 self.failed = 0
92 self.active = 0
93 self.total = total
94
95 def copy(self):
96 obj = self.__class__(self.total)
97 obj.__dict__.update(self.__dict__)
98 return obj
99
100 def taskFailed(self):
101 self.active = self.active - 1
102 self.failed = self.failed + 1
103
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800104 def taskCompleted(self):
105 self.active = self.active - 1
106 self.completed = self.completed + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500107
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800108 def taskSkipped(self):
109 self.active = self.active + 1
110 self.skipped = self.skipped + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500111
112 def taskActive(self):
113 self.active = self.active + 1
114
115# These values indicate the next step due to be run in the
116# runQueue state machine
117runQueuePrepare = 2
118runQueueSceneInit = 3
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500119runQueueRunning = 6
120runQueueFailed = 7
121runQueueCleanUp = 8
122runQueueComplete = 9
123
124class RunQueueScheduler(object):
125 """
126 Control the order tasks are scheduled in.
127 """
128 name = "basic"
129
130 def __init__(self, runqueue, rqdata):
131 """
132 The default scheduler just returns the first buildable task (the
133 priority map is sorted by task number)
134 """
135 self.rq = runqueue
136 self.rqdata = rqdata
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600137 self.numTasks = len(self.rqdata.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500138
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600139 self.prio_map = [self.rqdata.runtaskentries.keys()]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500140
Brad Bishop08902b02019-08-20 09:16:51 -0400141 self.buildable = set()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800142 self.skip_maxthread = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500143 self.stamps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600144 for tid in self.rqdata.runtaskentries:
145 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
146 self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
147 if tid in self.rq.runq_buildable:
148 self.buildable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500149
150 self.rev_prio_map = None
151
152 def next_buildable_task(self):
153 """
154 Return the id of the first task we find that is buildable
155 """
Andrew Geissler82c905d2020-04-13 13:39:40 -0500156 # Once tasks are running we don't need to worry about them again
157 self.buildable.difference_update(self.rq.runq_running)
Brad Bishop08902b02019-08-20 09:16:51 -0400158 buildable = set(self.buildable)
Brad Bishop08902b02019-08-20 09:16:51 -0400159 buildable.difference_update(self.rq.holdoff_tasks)
160 buildable.intersection_update(self.rq.tasks_covered | self.rq.tasks_notcovered)
Brad Bishop96ff1982019-08-19 13:50:42 -0400161 if not buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500162 return None
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800163
164 # Filter out tasks that have a max number of threads that have been exceeded
165 skip_buildable = {}
166 for running in self.rq.runq_running.difference(self.rq.runq_complete):
167 rtaskname = taskname_from_tid(running)
168 if rtaskname not in self.skip_maxthread:
169 self.skip_maxthread[rtaskname] = self.rq.cfgData.getVarFlag(rtaskname, "number_threads")
170 if not self.skip_maxthread[rtaskname]:
171 continue
172 if rtaskname in skip_buildable:
173 skip_buildable[rtaskname] += 1
174 else:
175 skip_buildable[rtaskname] = 1
176
Brad Bishop96ff1982019-08-19 13:50:42 -0400177 if len(buildable) == 1:
Brad Bishop08902b02019-08-20 09:16:51 -0400178 tid = buildable.pop()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800179 taskname = taskname_from_tid(tid)
180 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
181 return None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600182 stamp = self.stamps[tid]
183 if stamp not in self.rq.build_stamps.values():
184 return tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500185
186 if not self.rev_prio_map:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600187 self.rev_prio_map = {}
188 for tid in self.rqdata.runtaskentries:
189 self.rev_prio_map[tid] = self.prio_map.index(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500190
191 best = None
192 bestprio = None
Brad Bishop96ff1982019-08-19 13:50:42 -0400193 for tid in buildable:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800194 taskname = taskname_from_tid(tid)
195 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
196 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600197 prio = self.rev_prio_map[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500198 if bestprio is None or bestprio > prio:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600199 stamp = self.stamps[tid]
200 if stamp in self.rq.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500201 continue
202 bestprio = prio
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600203 best = tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500204
205 return best
206
207 def next(self):
208 """
209 Return the id of the task we should build next
210 """
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800211 if self.rq.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500212 return self.next_buildable_task()
213
Brad Bishop316dfdd2018-06-25 12:45:53 -0400214 def newbuildable(self, task):
Brad Bishop08902b02019-08-20 09:16:51 -0400215 self.buildable.add(task)
216
217 def removebuildable(self, task):
218 self.buildable.remove(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500219
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500220 def describe_task(self, taskid):
221 result = 'ID %s' % taskid
222 if self.rev_prio_map:
223 result = result + (' pri %d' % self.rev_prio_map[taskid])
224 return result
225
226 def dump_prio(self, comment):
227 bb.debug(3, '%s (most important first):\n%s' %
228 (comment,
229 '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
230 index, taskid in enumerate(self.prio_map)])))
231
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500232class RunQueueSchedulerSpeed(RunQueueScheduler):
233 """
234 A scheduler optimised for speed. The priority map is sorted by task weight,
235 heavier weighted tasks (tasks needed by the most other tasks) are run first.
236 """
237 name = "speed"
238
239 def __init__(self, runqueue, rqdata):
240 """
241 The priority map is sorted by task weight.
242 """
243 RunQueueScheduler.__init__(self, runqueue, rqdata)
244
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600245 weights = {}
246 for tid in self.rqdata.runtaskentries:
247 weight = self.rqdata.runtaskentries[tid].weight
248 if not weight in weights:
249 weights[weight] = []
250 weights[weight].append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500251
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600252 self.prio_map = []
253 for weight in sorted(weights):
254 for w in weights[weight]:
255 self.prio_map.append(w)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500256
257 self.prio_map.reverse()
258
259class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
260 """
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500261 A scheduler optimised to complete .bb files as quickly as possible. The
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500262 priority map is sorted by task weight, but then reordered so once a given
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500263 .bb file starts to build, it's completed as quickly as possible by
264 running all tasks related to the same .bb file one after the after.
265 This works well where disk space is at a premium and classes like OE's
266 rm_work are in force.
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500267 """
268 name = "completion"
269
270 def __init__(self, runqueue, rqdata):
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500271 super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500272
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500273 # Extract list of tasks for each recipe, with tasks sorted
274 # ascending from "must run first" (typically do_fetch) to
275 # "runs last" (do_build). The speed scheduler prioritizes
276 # tasks that must run first before the ones that run later;
277 # this is what we depend on here.
278 task_lists = {}
279 for taskid in self.prio_map:
280 fn, taskname = taskid.rsplit(':', 1)
281 task_lists.setdefault(fn, []).append(taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500282
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500283 # Now unify the different task lists. The strategy is that
284 # common tasks get skipped and new ones get inserted after the
285 # preceeding common one(s) as they are found. Because task
286 # lists should differ only by their number of tasks, but not
287 # the ordering of the common tasks, this should result in a
288 # deterministic result that is a superset of the individual
289 # task ordering.
290 all_tasks = []
291 for recipe, new_tasks in task_lists.items():
292 index = 0
293 old_task = all_tasks[index] if index < len(all_tasks) else None
294 for new_task in new_tasks:
295 if old_task == new_task:
296 # Common task, skip it. This is the fast-path which
297 # avoids a full search.
298 index += 1
299 old_task = all_tasks[index] if index < len(all_tasks) else None
300 else:
301 try:
302 index = all_tasks.index(new_task)
303 # Already present, just not at the current
304 # place. We re-synchronized by changing the
305 # index so that it matches again. Now
306 # move on to the next existing task.
307 index += 1
308 old_task = all_tasks[index] if index < len(all_tasks) else None
309 except ValueError:
310 # Not present. Insert before old_task, which
311 # remains the same (but gets shifted back).
312 all_tasks.insert(index, new_task)
313 index += 1
314 bb.debug(3, 'merged task list: %s' % all_tasks)
315
316 # Now reverse the order so that tasks that finish the work on one
317 # recipe are considered more imporant (= come first). The ordering
318 # is now so that do_build is most important.
319 all_tasks.reverse()
320
321 # Group tasks of the same kind before tasks of less important
322 # kinds at the head of the queue (because earlier = lower
323 # priority number = runs earlier), while preserving the
324 # ordering by recipe. If recipe foo is more important than
325 # bar, then the goal is to work on foo's do_populate_sysroot
326 # before bar's do_populate_sysroot and on the more important
327 # tasks of foo before any of the less important tasks in any
328 # other recipe (if those other recipes are more important than
329 # foo).
330 #
331 # All of this only applies when tasks are runable. Explicit
332 # dependencies still override this ordering by priority.
333 #
334 # Here's an example why this priority re-ordering helps with
335 # minimizing disk usage. Consider a recipe foo with a higher
336 # priority than bar where foo DEPENDS on bar. Then the
337 # implicit rule (from base.bbclass) is that foo's do_configure
338 # depends on bar's do_populate_sysroot. This ensures that
339 # bar's do_populate_sysroot gets done first. Normally the
340 # tasks from foo would continue to run once that is done, and
341 # bar only gets completed and cleaned up later. By ordering
342 # bar's task that depend on bar's do_populate_sysroot before foo's
343 # do_configure, that problem gets avoided.
344 task_index = 0
345 self.dump_prio('original priorities')
346 for task in all_tasks:
347 for index in range(task_index, self.numTasks):
348 taskid = self.prio_map[index]
349 taskname = taskid.rsplit(':', 1)[1]
350 if taskname == task:
351 del self.prio_map[index]
352 self.prio_map.insert(task_index, taskid)
353 task_index += 1
354 self.dump_prio('completion priorities')
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500355
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600356class RunTaskEntry(object):
357 def __init__(self):
358 self.depends = set()
359 self.revdeps = set()
360 self.hash = None
Brad Bishop19323692019-04-05 15:28:33 -0400361 self.unihash = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600362 self.task = None
363 self.weight = 1
364
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500365class RunQueueData:
366 """
367 BitBake Run Queue implementation
368 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600369 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500370 self.cooker = cooker
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600371 self.dataCaches = dataCaches
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500372 self.taskData = taskData
373 self.targets = targets
374 self.rq = rq
375 self.warn_multi_bb = False
376
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500377 self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
378 self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
Andrew Geisslerc9f78652020-09-18 14:11:35 -0500379 self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData, targets)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600380 self.setscenewhitelist_checked = False
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500381 self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600382 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500383
384 self.reset()
385
386 def reset(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600387 self.runtaskentries = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500388
389 def runq_depends_names(self, ids):
390 import re
391 ret = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600392 for id in ids:
393 nam = os.path.basename(id)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500394 nam = re.sub("_[^,]*,", ",", nam)
395 ret.extend([nam])
396 return ret
397
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600398 def get_task_hash(self, tid):
399 return self.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500400
Brad Bishop19323692019-04-05 15:28:33 -0400401 def get_task_unihash(self, tid):
402 return self.runtaskentries[tid].unihash
403
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600404 def get_user_idstring(self, tid, task_name_suffix = ""):
405 return tid + task_name_suffix
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500406
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500407 def get_short_user_idstring(self, task, task_name_suffix = ""):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500408 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
409 pn = self.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600410 taskname = taskname_from_tid(task) + task_name_suffix
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500411 return "%s:%s" % (pn, taskname)
412
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500413 def circular_depchains_handler(self, tasks):
414 """
415 Some tasks aren't buildable, likely due to circular dependency issues.
416 Identify the circular dependencies and print them in a user readable format.
417 """
418 from copy import deepcopy
419
420 valid_chains = []
421 explored_deps = {}
422 msgs = []
423
Andrew Geissler99467da2019-02-25 18:54:23 -0600424 class TooManyLoops(Exception):
425 pass
426
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500427 def chain_reorder(chain):
428 """
429 Reorder a dependency chain so the lowest task id is first
430 """
431 lowest = 0
432 new_chain = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600433 for entry in range(len(chain)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500434 if chain[entry] < chain[lowest]:
435 lowest = entry
436 new_chain.extend(chain[lowest:])
437 new_chain.extend(chain[:lowest])
438 return new_chain
439
440 def chain_compare_equal(chain1, chain2):
441 """
442 Compare two dependency chains and see if they're the same
443 """
444 if len(chain1) != len(chain2):
445 return False
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600446 for index in range(len(chain1)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500447 if chain1[index] != chain2[index]:
448 return False
449 return True
450
451 def chain_array_contains(chain, chain_array):
452 """
453 Return True if chain_array contains chain
454 """
455 for ch in chain_array:
456 if chain_compare_equal(ch, chain):
457 return True
458 return False
459
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600460 def find_chains(tid, prev_chain):
461 prev_chain.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500462 total_deps = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600463 total_deps.extend(self.runtaskentries[tid].revdeps)
464 for revdep in self.runtaskentries[tid].revdeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500465 if revdep in prev_chain:
466 idx = prev_chain.index(revdep)
467 # To prevent duplicates, reorder the chain to start with the lowest taskid
468 # and search through an array of those we've already printed
469 chain = prev_chain[idx:]
470 new_chain = chain_reorder(chain)
471 if not chain_array_contains(new_chain, valid_chains):
472 valid_chains.append(new_chain)
473 msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
474 for dep in new_chain:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600475 msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500476 msgs.append("\n")
477 if len(valid_chains) > 10:
478 msgs.append("Aborted dependency loops search after 10 matches.\n")
Andrew Geissler99467da2019-02-25 18:54:23 -0600479 raise TooManyLoops
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500480 continue
481 scan = False
482 if revdep not in explored_deps:
483 scan = True
484 elif revdep in explored_deps[revdep]:
485 scan = True
486 else:
487 for dep in prev_chain:
488 if dep in explored_deps[revdep]:
489 scan = True
490 if scan:
491 find_chains(revdep, copy.deepcopy(prev_chain))
492 for dep in explored_deps[revdep]:
493 if dep not in total_deps:
494 total_deps.append(dep)
495
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600496 explored_deps[tid] = total_deps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500497
Andrew Geissler99467da2019-02-25 18:54:23 -0600498 try:
499 for task in tasks:
500 find_chains(task, [])
501 except TooManyLoops:
502 pass
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500503
504 return msgs
505
506 def calculate_task_weights(self, endpoints):
507 """
508 Calculate a number representing the "weight" of each task. Heavier weighted tasks
509 have more dependencies and hence should be executed sooner for maximum speed.
510
511 This function also sanity checks the task list finding tasks that are not
512 possible to execute due to circular dependencies.
513 """
514
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600515 numTasks = len(self.runtaskentries)
516 weight = {}
517 deps_left = {}
518 task_done = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500519
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600520 for tid in self.runtaskentries:
521 task_done[tid] = False
522 weight[tid] = 1
523 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500524
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600525 for tid in endpoints:
526 weight[tid] = 10
527 task_done[tid] = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500528
529 while True:
530 next_points = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600531 for tid in endpoints:
532 for revdep in self.runtaskentries[tid].depends:
533 weight[revdep] = weight[revdep] + weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500534 deps_left[revdep] = deps_left[revdep] - 1
535 if deps_left[revdep] == 0:
536 next_points.append(revdep)
537 task_done[revdep] = True
538 endpoints = next_points
539 if len(next_points) == 0:
540 break
541
542 # Circular dependency sanity check
543 problem_tasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600544 for tid in self.runtaskentries:
545 if task_done[tid] is False or deps_left[tid] != 0:
546 problem_tasks.append(tid)
Andrew Geisslerd1e89492021-02-12 15:35:20 -0600547 logger.debug2("Task %s is not buildable", tid)
548 logger.debug2("(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600549 self.runtaskentries[tid].weight = weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500550
551 if problem_tasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600552 message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500553 message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
554 message = message + "Identifying dependency loops (this may take a short while)...\n"
555 logger.error(message)
556
557 msgs = self.circular_depchains_handler(problem_tasks)
558
559 message = "\n"
560 for msg in msgs:
561 message = message + msg
562 bb.msg.fatal("RunQueue", message)
563
564 return weight
565
566 def prepare(self):
567 """
568 Turn a set of taskData into a RunQueue and compute data needed
569 to optimise the execution order.
570 """
571
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600572 runq_build = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500573 recursivetasks = {}
574 recursiveitasks = {}
575 recursivetasksselfref = set()
576
577 taskData = self.taskData
578
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600579 found = False
580 for mc in self.taskData:
581 if len(taskData[mc].taskentries) > 0:
582 found = True
583 break
584 if not found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500585 # Nothing to do
586 return 0
587
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600588 self.init_progress_reporter.start()
589 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500590
591 # Step A - Work out a list of tasks to run
592 #
593 # Taskdata gives us a list of possible providers for every build and run
594 # target ordered by priority. It also gives information on each of those
595 # providers.
596 #
597 # To create the actual list of tasks to execute we fix the list of
598 # providers and then resolve the dependencies into task IDs. This
599 # process is repeated for each type of dependency (tdepends, deptask,
600 # rdeptast, recrdeptask, idepends).
601
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600602 def add_build_dependencies(depids, tasknames, depends, mc):
603 for depname in depids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500604 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600605 if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500606 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600607 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500608 if depdata is None:
609 continue
610 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600611 t = depdata + ":" + taskname
612 if t in taskData[mc].taskentries:
613 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500614
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600615 def add_runtime_dependencies(depids, tasknames, depends, mc):
616 for depname in depids:
617 if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500618 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600619 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500620 if depdata is None:
621 continue
622 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600623 t = depdata + ":" + taskname
624 if t in taskData[mc].taskentries:
625 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500626
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800627 def add_mc_dependencies(mc, tid):
628 mcdeps = taskData[mc].get_mcdepends()
629 for dep in mcdeps:
630 mcdependency = dep.split(':')
631 pn = mcdependency[3]
632 frommc = mcdependency[1]
633 mcdep = mcdependency[2]
634 deptask = mcdependency[4]
635 if mc == frommc:
636 fn = taskData[mcdep].build_targets[pn][0]
637 newdep = '%s:%s' % (fn,deptask)
638 taskData[mc].taskentries[tid].tdepends.append(newdep)
639
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600640 for mc in taskData:
641 for tid in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500642
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600643 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
644 #runtid = build_tid(mc, fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500645
Andrew Geisslerd1e89492021-02-12 15:35:20 -0600646 #logger.debug2("Processing %s,%s:%s", mc, fn, taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600647
648 depends = set()
649 task_deps = self.dataCaches[mc].task_deps[taskfn]
650
651 self.runtaskentries[tid] = RunTaskEntry()
652
653 if fn in taskData[mc].failed_fns:
654 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500655
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800656 # We add multiconfig dependencies before processing internal task deps (tdepends)
657 if 'mcdepends' in task_deps and taskname in task_deps['mcdepends']:
658 add_mc_dependencies(mc, tid)
659
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500660 # Resolve task internal dependencies
661 #
662 # e.g. addtask before X after Y
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600663 for t in taskData[mc].taskentries[tid].tdepends:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800664 (depmc, depfn, deptaskname, _) = split_tid_mcfn(t)
665 depends.add(build_tid(depmc, depfn, deptaskname))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500666
667 # Resolve 'deptask' dependencies
668 #
669 # e.g. do_sometask[deptask] = "do_someothertask"
670 # (makes sure sometask runs after someothertask of all DEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600671 if 'deptask' in task_deps and taskname in task_deps['deptask']:
672 tasknames = task_deps['deptask'][taskname].split()
673 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500674
675 # Resolve 'rdeptask' dependencies
676 #
677 # e.g. do_sometask[rdeptask] = "do_someothertask"
678 # (makes sure sometask runs after someothertask of all RDEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600679 if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
680 tasknames = task_deps['rdeptask'][taskname].split()
681 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500682
683 # Resolve inter-task dependencies
684 #
685 # e.g. do_sometask[depends] = "targetname:do_someothertask"
686 # (makes sure sometask runs after targetname's someothertask)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600687 idepends = taskData[mc].taskentries[tid].idepends
688 for (depname, idependtask) in idepends:
689 if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500690 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600691 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500692 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600693 t = depdata + ":" + idependtask
694 depends.add(t)
695 if t not in taskData[mc].taskentries:
696 bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
697 irdepends = taskData[mc].taskentries[tid].irdepends
698 for (depname, idependtask) in irdepends:
699 if depname in taskData[mc].run_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500700 # Won't be in run_targets if ASSUME_PROVIDED
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500701 if not taskData[mc].run_targets[depname]:
702 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600703 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500704 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600705 t = depdata + ":" + idependtask
706 depends.add(t)
707 if t not in taskData[mc].taskentries:
708 bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500709
710 # Resolve recursive 'recrdeptask' dependencies (Part A)
711 #
712 # e.g. do_sometask[recrdeptask] = "do_someothertask"
713 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
714 # We cover the recursive part of the dependencies below
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600715 if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
716 tasknames = task_deps['recrdeptask'][taskname].split()
717 recursivetasks[tid] = tasknames
718 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
719 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
720 if taskname in tasknames:
721 recursivetasksselfref.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500722
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600723 if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
724 recursiveitasks[tid] = []
725 for t in task_deps['recideptask'][taskname].split():
726 newdep = build_tid(mc, fn, t)
727 recursiveitasks[tid].append(newdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500728
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600729 self.runtaskentries[tid].depends = depends
Brad Bishop316dfdd2018-06-25 12:45:53 -0400730 # Remove all self references
731 self.runtaskentries[tid].depends.discard(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500732
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600733 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500734
Brad Bishop316dfdd2018-06-25 12:45:53 -0400735 self.init_progress_reporter.next_stage()
736
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500737 # Resolve recursive 'recrdeptask' dependencies (Part B)
738 #
739 # e.g. do_sometask[recrdeptask] = "do_someothertask"
740 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600741 # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600742
Brad Bishop316dfdd2018-06-25 12:45:53 -0400743 # Generating/interating recursive lists of dependencies is painful and potentially slow
744 # Precompute recursive task dependencies here by:
745 # a) create a temp list of reverse dependencies (revdeps)
746 # b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0)
747 # c) combine the total list of dependencies in cumulativedeps
748 # d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500749
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500750
Brad Bishop316dfdd2018-06-25 12:45:53 -0400751 revdeps = {}
752 deps = {}
753 cumulativedeps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600754 for tid in self.runtaskentries:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400755 deps[tid] = set(self.runtaskentries[tid].depends)
756 revdeps[tid] = set()
757 cumulativedeps[tid] = set()
758 # Generate a temp list of reverse dependencies
759 for tid in self.runtaskentries:
760 for dep in self.runtaskentries[tid].depends:
761 revdeps[dep].add(tid)
762 # Find the dependency chain endpoints
763 endpoints = set()
764 for tid in self.runtaskentries:
765 if len(deps[tid]) == 0:
766 endpoints.add(tid)
767 # Iterate the chains collating dependencies
768 while endpoints:
769 next = set()
770 for tid in endpoints:
771 for dep in revdeps[tid]:
772 cumulativedeps[dep].add(fn_from_tid(tid))
773 cumulativedeps[dep].update(cumulativedeps[tid])
774 if tid in deps[dep]:
775 deps[dep].remove(tid)
776 if len(deps[dep]) == 0:
777 next.add(dep)
778 endpoints = next
779 #for tid in deps:
780 # if len(deps[tid]) != 0:
781 # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
782
783 # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
784 # resolve these recursively until we aren't adding any further extra dependencies
785 extradeps = True
786 while extradeps:
787 extradeps = 0
788 for tid in recursivetasks:
789 tasknames = recursivetasks[tid]
790
791 totaldeps = set(self.runtaskentries[tid].depends)
792 if tid in recursiveitasks:
793 totaldeps.update(recursiveitasks[tid])
794 for dep in recursiveitasks[tid]:
795 if dep not in self.runtaskentries:
796 continue
797 totaldeps.update(self.runtaskentries[dep].depends)
798
799 deps = set()
800 for dep in totaldeps:
801 if dep in cumulativedeps:
802 deps.update(cumulativedeps[dep])
803
804 for t in deps:
805 for taskname in tasknames:
806 newtid = t + ":" + taskname
807 if newtid == tid:
808 continue
809 if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
810 extradeps += 1
811 self.runtaskentries[tid].depends.add(newtid)
812
813 # Handle recursive tasks which depend upon other recursive tasks
814 deps = set()
815 for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
816 deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
817 for newtid in deps:
818 for taskname in tasknames:
819 if not newtid.endswith(":" + taskname):
820 continue
821 if newtid in self.runtaskentries:
822 extradeps += 1
823 self.runtaskentries[tid].depends.add(newtid)
824
825 bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps)
826
827 # Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work
828 for tid in recursivetasksselfref:
829 self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600830
831 self.init_progress_reporter.next_stage()
832
833 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500834
835 # Step B - Mark all active tasks
836 #
837 # Start with the tasks we were asked to run and mark all dependencies
838 # as active too. If the task is to be 'forced', clear its stamp. Once
839 # all active tasks are marked, prune the ones we don't need.
840
841 logger.verbose("Marking Active Tasks")
842
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600843 def mark_active(tid, depth):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500844 """
845 Mark an item as active along with its depends
846 (calls itself recursively)
847 """
848
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600849 if tid in runq_build:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500850 return
851
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600852 runq_build[tid] = 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500853
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600854 depends = self.runtaskentries[tid].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500855 for depend in depends:
856 mark_active(depend, depth+1)
857
Brad Bishop79641f22019-09-10 07:20:22 -0400858 def invalidate_task(tid, error_nostamp):
859 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
860 taskdep = self.dataCaches[mc].task_deps[taskfn]
861 if fn + ":" + taskname not in taskData[mc].taskentries:
862 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
863 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
864 if error_nostamp:
865 bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
866 else:
867 bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
868 else:
869 logger.verbose("Invalidate task %s, %s", taskname, fn)
870 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn)
871
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600872 self.target_tids = []
873 for (mc, target, task, fn) in self.targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500874
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600875 if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500876 continue
877
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600878 if target in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500879 continue
880
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500881 parents = False
882 if task.endswith('-'):
883 parents = True
884 task = task[:-1]
885
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600886 if fn in taskData[mc].failed_fns:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500887 continue
888
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600889 # fn already has mc prefix
890 tid = fn + ":" + task
891 self.target_tids.append(tid)
892 if tid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500893 import difflib
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600894 tasks = []
895 for x in taskData[mc].taskentries:
896 if x.startswith(fn + ":"):
897 tasks.append(taskname_from_tid(x))
898 close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500899 if close_matches:
900 extra = ". Close matches:\n %s" % "\n ".join(close_matches)
901 else:
902 extra = ""
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600903 bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
904
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500905 # For tasks called "XXXX-", ony run their dependencies
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500906 if parents:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600907 for i in self.runtaskentries[tid].depends:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500908 mark_active(i, 1)
909 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600910 mark_active(tid, 1)
911
912 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500913
914 # Step C - Prune all inactive tasks
915 #
916 # Once all active tasks are marked, prune the ones we don't need.
917
Brad Bishop316dfdd2018-06-25 12:45:53 -0400918 delcount = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600919 for tid in list(self.runtaskentries.keys()):
920 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400921 delcount[tid] = self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600922 del self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600923
Brad Bishop316dfdd2018-06-25 12:45:53 -0400924 # Handle --runall
925 if self.cooker.configuration.runall:
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500926 # re-run the mark_active and then drop unused tasks from new list
927 runq_build = {}
Brad Bishop316dfdd2018-06-25 12:45:53 -0400928
929 for task in self.cooker.configuration.runall:
Andrew Geissler82c905d2020-04-13 13:39:40 -0500930 if not task.startswith("do_"):
931 task = "do_{0}".format(task)
Brad Bishop316dfdd2018-06-25 12:45:53 -0400932 runall_tids = set()
933 for tid in list(self.runtaskentries):
Andrew Geissler82c905d2020-04-13 13:39:40 -0500934 wanttid = "{0}:{1}".format(fn_from_tid(tid), task)
Brad Bishop316dfdd2018-06-25 12:45:53 -0400935 if wanttid in delcount:
936 self.runtaskentries[wanttid] = delcount[wanttid]
937 if wanttid in self.runtaskentries:
938 runall_tids.add(wanttid)
939
940 for tid in list(runall_tids):
941 mark_active(tid,1)
Brad Bishop79641f22019-09-10 07:20:22 -0400942 if self.cooker.configuration.force:
943 invalidate_task(tid, False)
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500944
945 for tid in list(self.runtaskentries.keys()):
946 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400947 delcount[tid] = self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500948 del self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500949
950 if len(self.runtaskentries) == 0:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400951 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
952
953 self.init_progress_reporter.next_stage()
954
955 # Handle runonly
956 if self.cooker.configuration.runonly:
957 # re-run the mark_active and then drop unused tasks from new list
958 runq_build = {}
959
960 for task in self.cooker.configuration.runonly:
Andrew Geissler82c905d2020-04-13 13:39:40 -0500961 if not task.startswith("do_"):
962 task = "do_{0}".format(task)
963 runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == task }
Brad Bishop316dfdd2018-06-25 12:45:53 -0400964
965 for tid in list(runonly_tids):
966 mark_active(tid,1)
Brad Bishop79641f22019-09-10 07:20:22 -0400967 if self.cooker.configuration.force:
968 invalidate_task(tid, False)
Brad Bishop316dfdd2018-06-25 12:45:53 -0400969
970 for tid in list(self.runtaskentries.keys()):
971 if tid not in runq_build:
972 delcount[tid] = self.runtaskentries[tid]
973 del self.runtaskentries[tid]
974
975 if len(self.runtaskentries) == 0:
976 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500977
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500978 #
979 # Step D - Sanity checks and computation
980 #
981
982 # Check to make sure we still have tasks to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600983 if len(self.runtaskentries) == 0:
984 if not taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500985 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
986 else:
987 bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
988
Brad Bishop316dfdd2018-06-25 12:45:53 -0400989 logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500990
991 logger.verbose("Assign Weightings")
992
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600993 self.init_progress_reporter.next_stage()
994
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500995 # Generate a list of reverse dependencies to ease future calculations
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600996 for tid in self.runtaskentries:
997 for dep in self.runtaskentries[tid].depends:
998 self.runtaskentries[dep].revdeps.add(tid)
999
1000 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001001
1002 # Identify tasks at the end of dependency chains
1003 # Error on circular dependency loops (length two)
1004 endpoints = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001005 for tid in self.runtaskentries:
1006 revdeps = self.runtaskentries[tid].revdeps
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001007 if len(revdeps) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001008 endpoints.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001009 for dep in revdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001010 if dep in self.runtaskentries[tid].depends:
1011 bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
1012
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001013
1014 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
1015
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001016 self.init_progress_reporter.next_stage()
1017
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001018 # Calculate task weights
1019 # Check of higher length circular dependencies
1020 self.runq_weight = self.calculate_task_weights(endpoints)
1021
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001022 self.init_progress_reporter.next_stage()
1023
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001024 # Sanity Check - Check for multiple tasks building the same provider
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001025 for mc in self.dataCaches:
1026 prov_list = {}
1027 seen_fn = []
1028 for tid in self.runtaskentries:
1029 (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1030 if taskfn in seen_fn:
1031 continue
1032 if mc != tidmc:
1033 continue
1034 seen_fn.append(taskfn)
1035 for prov in self.dataCaches[mc].fn_provides[taskfn]:
1036 if prov not in prov_list:
1037 prov_list[prov] = [taskfn]
1038 elif taskfn not in prov_list[prov]:
1039 prov_list[prov].append(taskfn)
1040 for prov in prov_list:
1041 if len(prov_list[prov]) < 2:
1042 continue
1043 if prov in self.multi_provider_whitelist:
1044 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001045 seen_pn = []
1046 # If two versions of the same PN are being built its fatal, we don't support it.
1047 for fn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001048 pn = self.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001049 if pn not in seen_pn:
1050 seen_pn.append(pn)
1051 else:
1052 bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001053 msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
1054 #
1055 # Construct a list of things which uniquely depend on each provider
1056 # since this may help the user figure out which dependency is triggering this warning
1057 #
1058 msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
1059 deplist = {}
1060 commondeps = None
1061 for provfn in prov_list[prov]:
1062 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001063 for tid in self.runtaskentries:
1064 fn = fn_from_tid(tid)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001065 if fn != provfn:
1066 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001067 for dep in self.runtaskentries[tid].revdeps:
1068 fn = fn_from_tid(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001069 if fn == provfn:
1070 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001071 deps.add(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001072 if not commondeps:
1073 commondeps = set(deps)
1074 else:
1075 commondeps &= deps
1076 deplist[provfn] = deps
1077 for provfn in deplist:
1078 msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
1079 #
1080 # Construct a list of provides and runtime providers for each recipe
1081 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
1082 #
1083 msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
1084 provide_results = {}
1085 rprovide_results = {}
1086 commonprovs = None
1087 commonrprovs = None
1088 for provfn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001089 provides = set(self.dataCaches[mc].fn_provides[provfn])
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001090 rprovides = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001091 for rprovide in self.dataCaches[mc].rproviders:
1092 if provfn in self.dataCaches[mc].rproviders[rprovide]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001093 rprovides.add(rprovide)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001094 for package in self.dataCaches[mc].packages:
1095 if provfn in self.dataCaches[mc].packages[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001096 rprovides.add(package)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001097 for package in self.dataCaches[mc].packages_dynamic:
1098 if provfn in self.dataCaches[mc].packages_dynamic[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001099 rprovides.add(package)
1100 if not commonprovs:
1101 commonprovs = set(provides)
1102 else:
1103 commonprovs &= provides
1104 provide_results[provfn] = provides
1105 if not commonrprovs:
1106 commonrprovs = set(rprovides)
1107 else:
1108 commonrprovs &= rprovides
1109 rprovide_results[provfn] = rprovides
1110 #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
1111 #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
1112 for provfn in prov_list[prov]:
1113 msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
1114 msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
1115
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001116 if self.warn_multi_bb:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001117 logger.verbnote(msg)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001118 else:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001119 logger.error(msg)
1120
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001121 self.init_progress_reporter.next_stage()
1122
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001123 # Create a whitelist usable by the stamp checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001124 self.stampfnwhitelist = {}
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001125 for mc in self.taskData:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001126 self.stampfnwhitelist[mc] = []
1127 for entry in self.stampwhitelist.split():
1128 if entry not in self.taskData[mc].build_targets:
1129 continue
1130 fn = self.taskData.build_targets[entry][0]
1131 self.stampfnwhitelist[mc].append(fn)
1132
1133 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001134
1135 # Iterate over the task list looking for tasks with a 'setscene' function
Andrew Geissler82c905d2020-04-13 13:39:40 -05001136 self.runq_setscene_tids = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001137 if not self.cooker.configuration.nosetscene:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001138 for tid in self.runtaskentries:
1139 (mc, fn, taskname, _) = split_tid_mcfn(tid)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001140 setscenetid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001141 if setscenetid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001142 continue
Andrew Geissler82c905d2020-04-13 13:39:40 -05001143 self.runq_setscene_tids.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001144
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001145 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001146
1147 # Invalidate task if force mode active
1148 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001149 for tid in self.target_tids:
1150 invalidate_task(tid, False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001151
1152 # Invalidate task if invalidate mode active
1153 if self.cooker.configuration.invalidate_stamp:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001154 for tid in self.target_tids:
1155 fn = fn_from_tid(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001156 for st in self.cooker.configuration.invalidate_stamp.split(','):
1157 if not st.startswith("do_"):
1158 st = "do_%s" % st
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001159 invalidate_task(fn + ":" + st, True)
1160
1161 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001162
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001163 # Create and print to the logs a virtual/xxxx -> PN (fn) table
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001164 for mc in taskData:
1165 virtmap = taskData[mc].get_providermap(prefix="virtual/")
1166 virtpnmap = {}
1167 for v in virtmap:
1168 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
1169 bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
1170 if hasattr(bb.parse.siggen, "tasks_resolved"):
1171 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1172
1173 self.init_progress_reporter.next_stage()
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001174
Brad Bishop00e122a2019-10-05 11:10:57 -04001175 bb.parse.siggen.set_setscene_tasks(self.runq_setscene_tids)
1176
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001177 # Iterate over the task list and call into the siggen code
1178 dealtwith = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001179 todeal = set(self.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001180 while len(todeal) > 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001181 for tid in todeal.copy():
1182 if len(self.runtaskentries[tid].depends - dealtwith) == 0:
1183 dealtwith.add(tid)
1184 todeal.remove(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001185 self.prepare_task_hash(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001186
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001187 bb.parse.siggen.writeout_file_checksum_cache()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001188
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001189 #self.dump_data()
1190 return len(self.runtaskentries)
1191
Brad Bishop19323692019-04-05 15:28:33 -04001192 def prepare_task_hash(self, tid):
Andrew Geissler5a43b432020-06-13 10:46:56 -05001193 dc = bb.parse.siggen.get_data_caches(self.dataCaches, mc_from_tid(tid))
1194 bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, dc)
1195 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends, dc)
Brad Bishop08902b02019-08-20 09:16:51 -04001196 self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001197
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001198 def dump_data(self):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001199 """
1200 Dump some debug information on the internal data structures
1201 """
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001202 logger.debug3("run_tasks:")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001203 for tid in self.runtaskentries:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001204 logger.debug3(" %s: %s Deps %s RevDeps %s", tid,
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001205 self.runtaskentries[tid].weight,
1206 self.runtaskentries[tid].depends,
1207 self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001208
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001209class RunQueueWorker():
1210 def __init__(self, process, pipe):
1211 self.process = process
1212 self.pipe = pipe
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001213
1214class RunQueue:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001215 def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001216
1217 self.cooker = cooker
1218 self.cfgData = cfgData
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001219 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001220
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001221 self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
1222 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001223 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001224
1225 self.state = runQueuePrepare
1226
1227 # For disk space monitor
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001228 # Invoked at regular time intervals via the bitbake heartbeat event
1229 # while the build is running. We generate a unique name for the handler
1230 # here, just in case that there ever is more than one RunQueue instance,
Brad Bishop96ff1982019-08-19 13:50:42 -04001231 # start the handler when reaching runQueueSceneInit, and stop it when
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001232 # done with the build.
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001233 self.dm = monitordisk.diskMonitor(cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001234 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
1235 self.dm_event_handler_registered = False
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001236 self.rqexe = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001237 self.worker = {}
1238 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001239
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001240 def _start_worker(self, mc, fakeroot = False, rqexec = None):
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001241 logger.debug("Starting bitbake-worker")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001242 magic = "decafbad"
1243 if self.cooker.configuration.profile:
1244 magic = "decafbadbad"
Andrew Geissler95ac1b82021-03-31 14:34:31 -05001245 fakerootlogs = None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001246 if fakeroot:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001247 magic = magic + "beef"
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001248 mcdata = self.cooker.databuilder.mcdata[mc]
Brad Bishop19323692019-04-05 15:28:33 -04001249 fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD"))
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001250 fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001251 env = os.environ.copy()
1252 for key, value in (var.split('=') for var in fakerootenv):
1253 env[key] = value
Brad Bishop19323692019-04-05 15:28:33 -04001254 worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
Andrew Geissler95ac1b82021-03-31 14:34:31 -05001255 fakerootlogs = self.rqdata.dataCaches[mc].fakerootlogs
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001256 else:
1257 worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
1258 bb.utils.nonblockingfd(worker.stdout)
Andrew Geissler95ac1b82021-03-31 14:34:31 -05001259 workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec, fakerootlogs=fakerootlogs)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001260
1261 workerdata = {
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001262 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1263 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1264 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1265 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001266 "sigdata" : bb.parse.siggen.get_taskdata(),
Andrew Geissler82c905d2020-04-13 13:39:40 -05001267 "logdefaultlevel" : bb.msg.loggerDefaultLogLevel,
Andrew Geisslerc9f78652020-09-18 14:11:35 -05001268 "build_verbose_shell" : self.cooker.configuration.build_verbose_shell,
1269 "build_verbose_stdout" : self.cooker.configuration.build_verbose_stdout,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001270 "logdefaultdomain" : bb.msg.loggerDefaultDomains,
1271 "prhost" : self.cooker.prhost,
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001272 "buildname" : self.cfgData.getVar("BUILDNAME"),
1273 "date" : self.cfgData.getVar("DATE"),
1274 "time" : self.cfgData.getVar("TIME"),
Brad Bishopa34c0302019-09-23 22:34:48 -04001275 "hashservaddr" : self.cooker.hashservaddr,
Andrew Geissler9b4d8b02021-02-19 12:26:16 -06001276 "umask" : self.cfgData.getVar("BB_DEFAULT_UMASK"),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001277 }
1278
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001279 worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001280 worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001281 worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001282 worker.stdin.flush()
1283
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001284 return RunQueueWorker(worker, workerpipe)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001285
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001286 def _teardown_worker(self, worker):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001287 if not worker:
1288 return
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001289 logger.debug("Teardown for bitbake-worker")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001290 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001291 worker.process.stdin.write(b"<quit></quit>")
1292 worker.process.stdin.flush()
1293 worker.process.stdin.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001294 except IOError:
1295 pass
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001296 while worker.process.returncode is None:
1297 worker.pipe.read()
1298 worker.process.poll()
1299 while worker.pipe.read():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001300 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001301 worker.pipe.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001302
1303 def start_worker(self):
1304 if self.worker:
1305 self.teardown_workers()
1306 self.teardown = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001307 for mc in self.rqdata.dataCaches:
1308 self.worker[mc] = self._start_worker(mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001309
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001310 def start_fakeworker(self, rqexec, mc):
1311 if not mc in self.fakeworker:
1312 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001313
1314 def teardown_workers(self):
1315 self.teardown = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001316 for mc in self.worker:
1317 self._teardown_worker(self.worker[mc])
1318 self.worker = {}
1319 for mc in self.fakeworker:
1320 self._teardown_worker(self.fakeworker[mc])
1321 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001322
1323 def read_workers(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001324 for mc in self.worker:
1325 self.worker[mc].pipe.read()
1326 for mc in self.fakeworker:
1327 self.fakeworker[mc].pipe.read()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001328
1329 def active_fds(self):
1330 fds = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001331 for mc in self.worker:
1332 fds.append(self.worker[mc].pipe.input)
1333 for mc in self.fakeworker:
1334 fds.append(self.fakeworker[mc].pipe.input)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001335 return fds
1336
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001337 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001338 def get_timestamp(f):
1339 try:
1340 if not os.access(f, os.F_OK):
1341 return None
1342 return os.stat(f)[stat.ST_MTIME]
1343 except:
1344 return None
1345
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001346 (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
1347 if taskname is None:
1348 taskname = tn
1349
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001350 if self.stamppolicy == "perfile":
1351 fulldeptree = False
1352 else:
1353 fulldeptree = True
1354 stampwhitelist = []
1355 if self.stamppolicy == "whitelist":
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001356 stampwhitelist = self.rqdata.stampfnwhitelist[mc]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001357
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001358 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001359
1360 # If the stamp is missing, it's not current
1361 if not os.access(stampfile, os.F_OK):
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001362 logger.debug2("Stampfile %s not available", stampfile)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001363 return False
1364 # If it's a 'nostamp' task, it's not current
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001365 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001366 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001367 logger.debug2("%s.%s is nostamp\n", fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001368 return False
1369
1370 if taskname != "do_setscene" and taskname.endswith("_setscene"):
1371 return True
1372
1373 if cache is None:
1374 cache = {}
1375
1376 iscurrent = True
1377 t1 = get_timestamp(stampfile)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001378 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001379 if iscurrent:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001380 (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
1381 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1382 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001383 t2 = get_timestamp(stampfile2)
1384 t3 = get_timestamp(stampfile3)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001385 if t3 and not t2:
1386 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001387 if t3 and t3 > t2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001388 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001389 if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
1390 if not t2:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001391 logger.debug2('Stampfile %s does not exist', stampfile2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001392 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001393 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001394 if t1 < t2:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001395 logger.debug2('Stampfile %s < %s', stampfile, stampfile2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001396 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001397 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001398 if recurse and iscurrent:
1399 if dep in cache:
1400 iscurrent = cache[dep]
1401 if not iscurrent:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001402 logger.debug2('Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001403 else:
1404 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1405 cache[dep] = iscurrent
1406 if recurse:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001407 cache[tid] = iscurrent
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001408 return iscurrent
1409
Brad Bishop1d80a2e2019-11-15 16:35:03 -05001410 def validate_hashes(self, tocheck, data, currentcount=0, siginfo=False, summary=True):
Brad Bishop96ff1982019-08-19 13:50:42 -04001411 valid = set()
1412 if self.hashvalidate:
Brad Bishop08902b02019-08-20 09:16:51 -04001413 sq_data = {}
1414 sq_data['hash'] = {}
1415 sq_data['hashfn'] = {}
1416 sq_data['unihash'] = {}
Brad Bishop96ff1982019-08-19 13:50:42 -04001417 for tid in tocheck:
1418 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04001419 sq_data['hash'][tid] = self.rqdata.runtaskentries[tid].hash
1420 sq_data['hashfn'][tid] = self.rqdata.dataCaches[mc].hashfn[taskfn]
1421 sq_data['unihash'][tid] = self.rqdata.runtaskentries[tid].unihash
Brad Bishop96ff1982019-08-19 13:50:42 -04001422
Brad Bishop1d80a2e2019-11-15 16:35:03 -05001423 valid = self.validate_hash(sq_data, data, siginfo, currentcount, summary)
Brad Bishop96ff1982019-08-19 13:50:42 -04001424
1425 return valid
1426
Brad Bishop1d80a2e2019-11-15 16:35:03 -05001427 def validate_hash(self, sq_data, d, siginfo, currentcount, summary):
1428 locs = {"sq_data" : sq_data, "d" : d, "siginfo" : siginfo, "currentcount" : currentcount, "summary" : summary}
Brad Bishop19323692019-04-05 15:28:33 -04001429
Brad Bishop08902b02019-08-20 09:16:51 -04001430 # Metadata has **kwargs so args can be added, sq_data can also gain new fields
Brad Bishop1d80a2e2019-11-15 16:35:03 -05001431 call = self.hashvalidate + "(sq_data, d, siginfo=siginfo, currentcount=currentcount, summary=summary)"
Brad Bishop19323692019-04-05 15:28:33 -04001432
Brad Bishop19323692019-04-05 15:28:33 -04001433 return bb.utils.better_eval(call, locs)
1434
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001435 def _execute_runqueue(self):
1436 """
1437 Run the tasks in a queue prepared by rqdata.prepare()
1438 Upon failure, optionally try to recover the build using any alternate providers
1439 (if the abort on failure configuration option isn't set)
1440 """
1441
1442 retval = True
1443
1444 if self.state is runQueuePrepare:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001445 # NOTE: if you add, remove or significantly refactor the stages of this
1446 # process then you should recalculate the weightings here. This is quite
1447 # easy to do - just change the next line temporarily to pass debug=True as
1448 # the last parameter and you'll get a printout of the weightings as well
1449 # as a map to the lines where next_stage() was called. Of course this isn't
1450 # critical, but it helps to keep the progress reporting accurate.
1451 self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
1452 "Initialising tasks",
1453 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001454 if self.rqdata.prepare() == 0:
1455 self.state = runQueueComplete
1456 else:
1457 self.state = runQueueSceneInit
Brad Bishop00e122a2019-10-05 11:10:57 -04001458 bb.parse.siggen.save_unitaskhashes()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001459
1460 if self.state is runQueueSceneInit:
Brad Bishop96ff1982019-08-19 13:50:42 -04001461 self.rqdata.init_progress_reporter.next_stage()
1462
1463 # we are ready to run, emit dependency info to any UI or class which
1464 # needs it
1465 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1466 self.rqdata.init_progress_reporter.next_stage()
1467 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
1468
Brad Bishope2d5b612018-11-23 10:55:50 +13001469 if not self.dm_event_handler_registered:
1470 res = bb.event.register(self.dm_event_handler_name,
Brad Bishop96ff1982019-08-19 13:50:42 -04001471 lambda x: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False,
Andrew Geissler9b4d8b02021-02-19 12:26:16 -06001472 ('bb.event.HeartbeatEvent',), data=self.cfgData)
Brad Bishope2d5b612018-11-23 10:55:50 +13001473 self.dm_event_handler_registered = True
1474
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001475 dump = self.cooker.configuration.dump_signatures
1476 if dump:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001477 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001478 if 'printdiff' in dump:
1479 invalidtasks = self.print_diffscenetasks()
1480 self.dump_signatures(dump)
1481 if 'printdiff' in dump:
1482 self.write_diffscenetasks(invalidtasks)
1483 self.state = runQueueComplete
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001484
Brad Bishop96ff1982019-08-19 13:50:42 -04001485 if self.state is runQueueSceneInit:
1486 self.rqdata.init_progress_reporter.next_stage()
1487 self.start_worker()
1488 self.rqdata.init_progress_reporter.next_stage()
1489 self.rqexe = RunQueueExecute(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001490
Brad Bishop96ff1982019-08-19 13:50:42 -04001491 # If we don't have any setscene functions, skip execution
1492 if len(self.rqdata.runq_setscene_tids) == 0:
1493 logger.info('No setscene tasks')
1494 for tid in self.rqdata.runtaskentries:
1495 if len(self.rqdata.runtaskentries[tid].depends) == 0:
1496 self.rqexe.setbuildable(tid)
1497 self.rqexe.tasks_notcovered.add(tid)
1498 self.rqexe.sqdone = True
1499 logger.info('Executing Tasks')
1500 self.state = runQueueRunning
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001501
1502 if self.state is runQueueRunning:
1503 retval = self.rqexe.execute()
1504
1505 if self.state is runQueueCleanUp:
1506 retval = self.rqexe.finish()
1507
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001508 build_done = self.state is runQueueComplete or self.state is runQueueFailed
1509
1510 if build_done and self.dm_event_handler_registered:
Andrew Geissler9b4d8b02021-02-19 12:26:16 -06001511 bb.event.remove(self.dm_event_handler_name, None, data=self.cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001512 self.dm_event_handler_registered = False
1513
1514 if build_done and self.rqexe:
Brad Bishop08902b02019-08-20 09:16:51 -04001515 bb.parse.siggen.save_unitaskhashes()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001516 self.teardown_workers()
Brad Bishop96ff1982019-08-19 13:50:42 -04001517 if self.rqexe:
1518 if self.rqexe.stats.failed:
1519 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
1520 else:
1521 # Let's avoid the word "failed" if nothing actually did
1522 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001523
1524 if self.state is runQueueFailed:
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001525 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001526
1527 if self.state is runQueueComplete:
1528 # All done
1529 return False
1530
1531 # Loop
1532 return retval
1533
1534 def execute_runqueue(self):
1535 # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
1536 try:
1537 return self._execute_runqueue()
1538 except bb.runqueue.TaskFailure:
1539 raise
1540 except SystemExit:
1541 raise
1542 except bb.BBHandledException:
1543 try:
1544 self.teardown_workers()
1545 except:
1546 pass
1547 self.state = runQueueComplete
1548 raise
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001549 except Exception as err:
1550 logger.exception("An uncaught exception occurred in runqueue")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001551 try:
1552 self.teardown_workers()
1553 except:
1554 pass
1555 self.state = runQueueComplete
1556 raise
1557
1558 def finish_runqueue(self, now = False):
1559 if not self.rqexe:
1560 self.state = runQueueComplete
1561 return
1562
1563 if now:
1564 self.rqexe.finish_now()
1565 else:
1566 self.rqexe.finish()
1567
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001568 def rq_dump_sigfn(self, fn, options):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001569 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
Andrew Geissler5a43b432020-06-13 10:46:56 -05001570 mc = bb.runqueue.mc_from_tid(fn)
1571 the_data = bb_cache.loadDataFull(fn, self.cooker.collections[mc].get_file_appends(fn))
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001572 siggen = bb.parse.siggen
1573 dataCaches = self.rqdata.dataCaches
1574 siggen.dump_sigfn(fn, dataCaches, options)
1575
1576 def dump_signatures(self, options):
1577 fns = set()
1578 bb.note("Reparsing files to collect dependency data")
1579
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001580 for tid in self.rqdata.runtaskentries:
1581 fn = fn_from_tid(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001582 fns.add(fn)
1583
1584 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1585 # We cannot use the real multiprocessing.Pool easily due to some local data
1586 # that can't be pickled. This is a cheap multi-process solution.
1587 launched = []
1588 while fns:
1589 if len(launched) < max_process:
1590 p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
1591 p.start()
1592 launched.append(p)
1593 for q in launched:
1594 # The finished processes are joined when calling is_alive()
1595 if not q.is_alive():
1596 launched.remove(q)
1597 for p in launched:
1598 p.join()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001599
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001600 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001601
1602 return
1603
1604 def print_diffscenetasks(self):
1605
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001606 noexec = []
Brad Bishop96ff1982019-08-19 13:50:42 -04001607 tocheck = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001608
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001609 for tid in self.rqdata.runtaskentries:
1610 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1611 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001612
1613 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001614 noexec.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001615 continue
1616
Brad Bishop96ff1982019-08-19 13:50:42 -04001617 tocheck.add(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001618
Brad Bishop1d80a2e2019-11-15 16:35:03 -05001619 valid_new = self.validate_hashes(tocheck, self.cooker.data, 0, True, summary=False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001620
1621 # Tasks which are both setscene and noexec never care about dependencies
1622 # We therefore find tasks which are setscene and noexec and mark their
1623 # unique dependencies as valid.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001624 for tid in noexec:
1625 if tid not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001626 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001627 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001628 hasnoexecparents = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001629 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1630 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001631 continue
1632 hasnoexecparents = False
1633 break
1634 if hasnoexecparents:
1635 valid_new.add(dep)
1636
1637 invalidtasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001638 for tid in self.rqdata.runtaskentries:
1639 if tid not in valid_new and tid not in noexec:
1640 invalidtasks.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001641
1642 found = set()
1643 processed = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001644 for tid in invalidtasks:
1645 toprocess = set([tid])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001646 while toprocess:
1647 next = set()
1648 for t in toprocess:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001649 for dep in self.rqdata.runtaskentries[t].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001650 if dep in invalidtasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001651 found.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001652 if dep not in processed:
1653 processed.add(dep)
1654 next.add(dep)
1655 toprocess = next
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001656 if tid in found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001657 toprocess = set()
1658
1659 tasklist = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001660 for tid in invalidtasks.difference(found):
1661 tasklist.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001662
1663 if tasklist:
1664 bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
1665
1666 return invalidtasks.difference(found)
1667
1668 def write_diffscenetasks(self, invalidtasks):
1669
1670 # Define recursion callback
1671 def recursecb(key, hash1, hash2):
1672 hashes = [hash1, hash2]
1673 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1674
1675 recout = []
1676 if len(hashfiles) == 2:
1677 out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
Brad Bishopc342db32019-05-15 21:57:59 -04001678 recout.extend(list(' ' + l for l in out2))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001679 else:
1680 recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
1681
1682 return recout
1683
1684
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001685 for tid in invalidtasks:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001686 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1687 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001688 h = self.rqdata.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001689 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
1690 match = None
1691 for m in matches:
1692 if h in m:
1693 match = m
1694 if match is None:
1695 bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001696 matches = {k : v for k, v in iter(matches.items()) if h not in k}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001697 if matches:
1698 latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
Brad Bishop19323692019-04-05 15:28:33 -04001699 prevh = __find_sha256__.search(latestmatch).group(0)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001700 output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
1701 bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
1702
Brad Bishop96ff1982019-08-19 13:50:42 -04001703
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001704class RunQueueExecute:
1705
1706 def __init__(self, rq):
1707 self.rq = rq
1708 self.cooker = rq.cooker
1709 self.cfgData = rq.cfgData
1710 self.rqdata = rq.rqdata
1711
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001712 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
1713 self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001714
Brad Bishop96ff1982019-08-19 13:50:42 -04001715 self.sq_buildable = set()
1716 self.sq_running = set()
1717 self.sq_live = set()
1718
Brad Bishop08902b02019-08-20 09:16:51 -04001719 self.updated_taskhash_queue = []
1720 self.pending_migrations = set()
1721
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001722 self.runq_buildable = set()
1723 self.runq_running = set()
1724 self.runq_complete = set()
Andrew Geissler82c905d2020-04-13 13:39:40 -05001725 self.runq_tasksrun = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001726
1727 self.build_stamps = {}
1728 self.build_stamps2 = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001729 self.failed_tids = []
Brad Bishop96ff1982019-08-19 13:50:42 -04001730 self.sq_deferred = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001731
1732 self.stampcache = {}
1733
Brad Bishop08902b02019-08-20 09:16:51 -04001734 self.holdoff_tasks = set()
Brad Bishopc68388fc2019-08-26 01:33:31 -04001735 self.holdoff_need_update = True
Brad Bishop96ff1982019-08-19 13:50:42 -04001736 self.sqdone = False
1737
1738 self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
1739 self.sq_stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
1740
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001741 for mc in rq.worker:
1742 rq.worker[mc].pipe.setrunqueueexec(self)
1743 for mc in rq.fakeworker:
1744 rq.fakeworker[mc].pipe.setrunqueueexec(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001745
1746 if self.number_tasks <= 0:
1747 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1748
Brad Bishop96ff1982019-08-19 13:50:42 -04001749 # List of setscene tasks which we've covered
1750 self.scenequeue_covered = set()
1751 # List of tasks which are covered (including setscene ones)
1752 self.tasks_covered = set()
1753 self.tasks_scenequeue_done = set()
1754 self.scenequeue_notcovered = set()
1755 self.tasks_notcovered = set()
1756 self.scenequeue_notneeded = set()
1757
Brad Bishop08902b02019-08-20 09:16:51 -04001758 # We can't skip specified target tasks which aren't setscene tasks
1759 self.cantskip = set(self.rqdata.target_tids)
1760 self.cantskip.difference_update(self.rqdata.runq_setscene_tids)
1761 self.cantskip.intersection_update(self.rqdata.runtaskentries)
Brad Bishop96ff1982019-08-19 13:50:42 -04001762
1763 schedulers = self.get_schedulers()
1764 for scheduler in schedulers:
1765 if self.scheduler == scheduler.name:
1766 self.sched = scheduler(self, self.rqdata)
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001767 logger.debug("Using runqueue scheduler '%s'", scheduler.name)
Brad Bishop96ff1982019-08-19 13:50:42 -04001768 break
1769 else:
1770 bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
1771 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1772
Brad Bishop08902b02019-08-20 09:16:51 -04001773 #if len(self.rqdata.runq_setscene_tids) > 0:
1774 self.sqdata = SQData()
1775 build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
Brad Bishop96ff1982019-08-19 13:50:42 -04001776
Andrew Geissler95ac1b82021-03-31 14:34:31 -05001777 def runqueue_process_waitpid(self, task, status, fakerootlog=None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001778
1779 # self.build_stamps[pid] may not exist when use shared work directory.
1780 if task in self.build_stamps:
1781 self.build_stamps2.remove(self.build_stamps[task])
1782 del self.build_stamps[task]
1783
Brad Bishop96ff1982019-08-19 13:50:42 -04001784 if task in self.sq_live:
1785 if status != 0:
1786 self.sq_task_fail(task, status)
1787 else:
1788 self.sq_task_complete(task)
1789 self.sq_live.remove(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001790 else:
Brad Bishop96ff1982019-08-19 13:50:42 -04001791 if status != 0:
Andrew Geissler95ac1b82021-03-31 14:34:31 -05001792 self.task_fail(task, status, fakerootlog=fakerootlog)
Brad Bishop96ff1982019-08-19 13:50:42 -04001793 else:
1794 self.task_complete(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001795 return True
1796
1797 def finish_now(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001798 for mc in self.rq.worker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001799 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001800 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1801 self.rq.worker[mc].process.stdin.flush()
1802 except IOError:
1803 # worker must have died?
1804 pass
1805 for mc in self.rq.fakeworker:
1806 try:
1807 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1808 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001809 except IOError:
1810 # worker must have died?
1811 pass
1812
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001813 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001814 self.rq.state = runQueueFailed
1815 return
1816
1817 self.rq.state = runQueueComplete
1818 return
1819
1820 def finish(self):
1821 self.rq.state = runQueueCleanUp
1822
Brad Bishop96ff1982019-08-19 13:50:42 -04001823 active = self.stats.active + self.sq_stats.active
1824 if active > 0:
1825 bb.event.fire(runQueueExitWait(active), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001826 self.rq.read_workers()
1827 return self.rq.active_fds()
1828
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001829 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001830 self.rq.state = runQueueFailed
1831 return True
1832
1833 self.rq.state = runQueueComplete
1834 return True
1835
Brad Bishop96ff1982019-08-19 13:50:42 -04001836 # Used by setscene only
1837 def check_dependencies(self, task, taskdeps):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001838 if not self.rq.depvalidate:
1839 return False
1840
Brad Bishop08902b02019-08-20 09:16:51 -04001841 # Must not edit parent data
1842 taskdeps = set(taskdeps)
1843
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001844 taskdata = {}
1845 taskdeps.add(task)
1846 for dep in taskdeps:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001847 (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
1848 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001849 taskdata[dep] = [pn, taskname, fn]
1850 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001851 locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001852 valid = bb.utils.better_eval(call, locs)
1853 return valid
1854
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001855 def can_start_task(self):
Brad Bishop96ff1982019-08-19 13:50:42 -04001856 active = self.stats.active + self.sq_stats.active
1857 can_start = active < self.number_tasks
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001858 return can_start
1859
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001860 def get_schedulers(self):
1861 schedulers = set(obj for obj in globals().values()
1862 if type(obj) is type and
1863 issubclass(obj, RunQueueScheduler))
1864
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001865 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001866 if user_schedulers:
1867 for sched in user_schedulers.split():
1868 if not "." in sched:
1869 bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
1870 continue
1871
1872 modname, name = sched.rsplit(".", 1)
1873 try:
1874 module = __import__(modname, fromlist=(name,))
1875 except ImportError as exc:
1876 logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
1877 raise SystemExit(1)
1878 else:
1879 schedulers.add(getattr(module, name))
1880 return schedulers
1881
1882 def setbuildable(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001883 self.runq_buildable.add(task)
Brad Bishop316dfdd2018-06-25 12:45:53 -04001884 self.sched.newbuildable(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001885
1886 def task_completeoutright(self, task):
1887 """
1888 Mark a task as completed
1889 Look at the reverse dependencies and mark any task with
1890 completed dependencies as buildable
1891 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001892 self.runq_complete.add(task)
1893 for revdep in self.rqdata.runtaskentries[task].revdeps:
1894 if revdep in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001895 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001896 if revdep in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001897 continue
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001898 alldeps = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001899 for dep in self.rqdata.runtaskentries[revdep].depends:
1900 if dep not in self.runq_complete:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001901 alldeps = False
1902 break
1903 if alldeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001904 self.setbuildable(revdep)
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001905 logger.debug("Marking task %s as buildable", revdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001906
1907 def task_complete(self, task):
1908 self.stats.taskCompleted()
1909 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1910 self.task_completeoutright(task)
Andrew Geissler82c905d2020-04-13 13:39:40 -05001911 self.runq_tasksrun.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001912
Andrew Geissler95ac1b82021-03-31 14:34:31 -05001913 def task_fail(self, task, exitcode, fakerootlog=None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001914 """
1915 Called when a task has failed
1916 Updates the state engine with the failure
1917 """
1918 self.stats.taskFailed()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001919 self.failed_tids.append(task)
Andrew Geissler95ac1b82021-03-31 14:34:31 -05001920
1921 fakeroot_log = ""
1922 if fakerootlog and os.path.exists(fakerootlog):
1923 with open(fakerootlog) as fakeroot_log_file:
1924 fakeroot_failed = False
1925 for line in reversed(fakeroot_log_file.readlines()):
1926 for fakeroot_error in ['mismatch', 'error', 'fatal']:
1927 if fakeroot_error in line.lower():
1928 fakeroot_failed = True
1929 if 'doing new pid setup and server start' in line:
1930 break
1931 fakeroot_log = line + fakeroot_log
1932
1933 if not fakeroot_failed:
1934 fakeroot_log = None
1935
1936 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq, fakeroot_log=fakeroot_log), self.cfgData)
1937
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001938 if self.rqdata.taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001939 self.rq.state = runQueueCleanUp
1940
1941 def task_skip(self, task, reason):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001942 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001943 self.setbuildable(task)
1944 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
1945 self.task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001946 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001947 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001948
Brad Bishop08902b02019-08-20 09:16:51 -04001949 def summarise_scenequeue_errors(self):
1950 err = False
1951 if not self.sqdone:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001952 logger.debug('We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered)))
Brad Bishop08902b02019-08-20 09:16:51 -04001953 completeevent = sceneQueueComplete(self.sq_stats, self.rq)
1954 bb.event.fire(completeevent, self.cfgData)
1955 if self.sq_deferred:
1956 logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred))
1957 err = True
1958 if self.updated_taskhash_queue:
1959 logger.error("Scenequeue had unprocessed changed taskhash entries: %s" % pprint.pformat(self.updated_taskhash_queue))
1960 err = True
1961 if self.holdoff_tasks:
1962 logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks))
1963 err = True
1964
Andrew Geissler95ac1b82021-03-31 14:34:31 -05001965 for tid in self.scenequeue_covered.intersection(self.scenequeue_notcovered):
1966 # No task should end up in both covered and uncovered, that is a bug.
1967 logger.error("Setscene task %s in both covered and notcovered." % tid)
1968
Brad Bishop08902b02019-08-20 09:16:51 -04001969 for tid in self.rqdata.runq_setscene_tids:
1970 if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
1971 err = True
1972 logger.error("Setscene Task %s was never marked as covered or not covered" % tid)
1973 if tid not in self.sq_buildable:
1974 err = True
1975 logger.error("Setscene Task %s was never marked as buildable" % tid)
1976 if tid not in self.sq_running:
1977 err = True
1978 logger.error("Setscene Task %s was never marked as running" % tid)
1979
1980 for x in self.rqdata.runtaskentries:
1981 if x not in self.tasks_covered and x not in self.tasks_notcovered:
1982 logger.error("Task %s was never moved from the setscene queue" % x)
1983 err = True
1984 if x not in self.tasks_scenequeue_done:
1985 logger.error("Task %s was never processed by the setscene code" % x)
1986 err = True
1987 if len(self.rqdata.runtaskentries[x].depends) == 0 and x not in self.runq_buildable:
1988 logger.error("Task %s was never marked as buildable by the setscene code" % x)
1989 err = True
1990 return err
1991
1992
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001993 def execute(self):
1994 """
Brad Bishop96ff1982019-08-19 13:50:42 -04001995 Run the tasks in a queue prepared by prepare_runqueue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001996 """
1997
1998 self.rq.read_workers()
Andrew Geissler82c905d2020-04-13 13:39:40 -05001999 if self.updated_taskhash_queue or self.pending_migrations:
2000 self.process_possible_migrations()
2001
2002 if not hasattr(self, "sorted_setscene_tids"):
2003 # Don't want to sort this set every execution
2004 self.sorted_setscene_tids = sorted(self.rqdata.runq_setscene_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002005
Brad Bishop96ff1982019-08-19 13:50:42 -04002006 task = None
2007 if not self.sqdone and self.can_start_task():
2008 # Find the next setscene to run
Andrew Geissler82c905d2020-04-13 13:39:40 -05002009 for nexttask in self.sorted_setscene_tids:
Brad Bishop96ff1982019-08-19 13:50:42 -04002010 if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values():
2011 if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
2012 if nexttask not in self.rqdata.target_tids:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002013 logger.debug2("Skipping setscene for task %s" % nexttask)
Brad Bishop96ff1982019-08-19 13:50:42 -04002014 self.sq_task_skip(nexttask)
2015 self.scenequeue_notneeded.add(nexttask)
2016 if nexttask in self.sq_deferred:
2017 del self.sq_deferred[nexttask]
2018 return True
Brad Bishop08902b02019-08-20 09:16:51 -04002019 # If covered tasks are running, need to wait for them to complete
2020 for t in self.sqdata.sq_covered_tasks[nexttask]:
2021 if t in self.runq_running and t not in self.runq_complete:
2022 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04002023 if nexttask in self.sq_deferred:
2024 if self.sq_deferred[nexttask] not in self.runq_complete:
2025 continue
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002026 logger.debug("Task %s no longer deferred" % nexttask)
Brad Bishop96ff1982019-08-19 13:50:42 -04002027 del self.sq_deferred[nexttask]
Brad Bishop1d80a2e2019-11-15 16:35:03 -05002028 valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False, summary=False)
Brad Bishop96ff1982019-08-19 13:50:42 -04002029 if not valid:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002030 logger.debug("%s didn't become valid, skipping setscene" % nexttask)
Brad Bishop96ff1982019-08-19 13:50:42 -04002031 self.sq_task_failoutright(nexttask)
2032 return True
2033 else:
2034 self.sqdata.outrightfail.remove(nexttask)
2035 if nexttask in self.sqdata.outrightfail:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002036 logger.debug2('No package found, so skipping setscene task %s', nexttask)
Brad Bishop96ff1982019-08-19 13:50:42 -04002037 self.sq_task_failoutright(nexttask)
2038 return True
2039 if nexttask in self.sqdata.unskippable:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002040 logger.debug2("Setscene task %s is unskippable" % nexttask)
Brad Bishop96ff1982019-08-19 13:50:42 -04002041 task = nexttask
2042 break
2043 if task is not None:
2044 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
2045 taskname = taskname + "_setscene"
2046 if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002047 logger.debug2('Stamp for underlying task %s is current, so skipping setscene variant', task)
Brad Bishop96ff1982019-08-19 13:50:42 -04002048 self.sq_task_failoutright(task)
2049 return True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002050
Brad Bishop96ff1982019-08-19 13:50:42 -04002051 if self.cooker.configuration.force:
2052 if task in self.rqdata.target_tids:
2053 self.sq_task_failoutright(task)
2054 return True
2055
2056 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002057 logger.debug2('Setscene stamp current task %s, so skip it and its dependencies', task)
Brad Bishop96ff1982019-08-19 13:50:42 -04002058 self.sq_task_skip(task)
2059 return True
2060
2061 if self.cooker.configuration.skipsetscene:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002062 logger.debug2('No setscene tasks should be executed. Skipping %s', task)
Brad Bishop96ff1982019-08-19 13:50:42 -04002063 self.sq_task_failoutright(task)
2064 return True
2065
2066 startevent = sceneQueueTaskStarted(task, self.sq_stats, self.rq)
2067 bb.event.fire(startevent, self.cfgData)
2068
2069 taskdepdata = self.sq_build_taskdepdata(task)
2070
2071 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2072 taskhash = self.rqdata.get_task_hash(task)
2073 unihash = self.rqdata.get_task_unihash(task)
2074 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
2075 if not mc in self.rq.fakeworker:
2076 self.rq.start_fakeworker(self, mc)
Andrew Geissler5a43b432020-06-13 10:46:56 -05002077 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Brad Bishop96ff1982019-08-19 13:50:42 -04002078 self.rq.fakeworker[mc].process.stdin.flush()
2079 else:
Andrew Geissler5a43b432020-06-13 10:46:56 -05002080 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Brad Bishop96ff1982019-08-19 13:50:42 -04002081 self.rq.worker[mc].process.stdin.flush()
2082
2083 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2084 self.build_stamps2.append(self.build_stamps[task])
2085 self.sq_running.add(task)
2086 self.sq_live.add(task)
2087 self.sq_stats.taskActive()
2088 if self.can_start_task():
2089 return True
2090
Brad Bishopc68388fc2019-08-26 01:33:31 -04002091 self.update_holdofftasks()
2092
Brad Bishop08902b02019-08-20 09:16:51 -04002093 if not self.sq_live and not self.sqdone and not self.sq_deferred and not self.updated_taskhash_queue and not self.holdoff_tasks:
Andrew Geissler82c905d2020-04-13 13:39:40 -05002094 hashequiv_logger.verbose("Setscene tasks completed")
Brad Bishop96ff1982019-08-19 13:50:42 -04002095
Brad Bishop08902b02019-08-20 09:16:51 -04002096 err = self.summarise_scenequeue_errors()
Brad Bishop96ff1982019-08-19 13:50:42 -04002097 if err:
2098 self.rq.state = runQueueFailed
2099 return True
2100
2101 if self.cooker.configuration.setsceneonly:
2102 self.rq.state = runQueueComplete
2103 return True
2104 self.sqdone = True
2105
2106 if self.stats.total == 0:
2107 # nothing to do
2108 self.rq.state = runQueueComplete
2109 return True
2110
2111 if self.cooker.configuration.setsceneonly:
2112 task = None
2113 else:
2114 task = self.sched.next()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002115 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002116 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002117
Brad Bishop96ff1982019-08-19 13:50:42 -04002118 if self.rqdata.setscenewhitelist is not None:
2119 if self.check_setscenewhitelist(task):
2120 self.task_fail(task, "setscene whitelist")
2121 return True
2122
2123 if task in self.tasks_covered:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002124 logger.debug2("Setscene covered task %s", task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002125 self.task_skip(task, "covered")
2126 return True
2127
2128 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002129 logger.debug2("Stamp current task %s", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002130
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002131 self.task_skip(task, "existing")
Andrew Geissler82c905d2020-04-13 13:39:40 -05002132 self.runq_tasksrun.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002133 return True
2134
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002135 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002136 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2137 startevent = runQueueTaskStarted(task, self.stats, self.rq,
2138 noexec=True)
2139 bb.event.fire(startevent, self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002140 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002141 self.stats.taskActive()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002142 if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002143 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002144 self.task_complete(task)
2145 return True
2146 else:
2147 startevent = runQueueTaskStarted(task, self.stats, self.rq)
2148 bb.event.fire(startevent, self.cfgData)
2149
2150 taskdepdata = self.build_taskdepdata(task)
2151
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002152 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop19323692019-04-05 15:28:33 -04002153 taskhash = self.rqdata.get_task_hash(task)
2154 unihash = self.rqdata.get_task_unihash(task)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002155 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002156 if not mc in self.rq.fakeworker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002157 try:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002158 self.rq.start_fakeworker(self, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002159 except OSError as exc:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002160 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002161 self.rq.state = runQueueFailed
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002162 self.stats.taskFailed()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002163 return True
Andrew Geissler5a43b432020-06-13 10:46:56 -05002164 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002165 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002166 else:
Andrew Geissler5a43b432020-06-13 10:46:56 -05002167 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002168 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002169
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002170 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2171 self.build_stamps2.append(self.build_stamps[task])
2172 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002173 self.stats.taskActive()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002174 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002175 return True
2176
Brad Bishop96ff1982019-08-19 13:50:42 -04002177 if self.stats.active > 0 or self.sq_stats.active > 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002178 self.rq.read_workers()
2179 return self.rq.active_fds()
2180
Brad Bishop96ff1982019-08-19 13:50:42 -04002181 # No more tasks can be run. If we have deferred setscene tasks we should run them.
2182 if self.sq_deferred:
2183 tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0])
2184 logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid)
2185 self.sq_task_failoutright(tid)
2186 return True
2187
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002188 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002189 self.rq.state = runQueueFailed
2190 return True
2191
2192 # Sanity Checks
Brad Bishop08902b02019-08-20 09:16:51 -04002193 err = self.summarise_scenequeue_errors()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002194 for task in self.rqdata.runtaskentries:
2195 if task not in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002196 logger.error("Task %s never buildable!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002197 err = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002198 elif task not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002199 logger.error("Task %s never ran!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002200 err = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002201 elif task not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002202 logger.error("Task %s never completed!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002203 err = True
2204
2205 if err:
2206 self.rq.state = runQueueFailed
2207 else:
2208 self.rq.state = runQueueComplete
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002209
2210 return True
2211
Brad Bishopc68388fc2019-08-26 01:33:31 -04002212 def filtermcdeps(self, task, mc, deps):
Andrew Geissler99467da2019-02-25 18:54:23 -06002213 ret = set()
Andrew Geissler99467da2019-02-25 18:54:23 -06002214 for dep in deps:
Brad Bishopc68388fc2019-08-26 01:33:31 -04002215 thismc = mc_from_tid(dep)
2216 if thismc != mc:
Andrew Geissler99467da2019-02-25 18:54:23 -06002217 continue
2218 ret.add(dep)
2219 return ret
2220
Brad Bishopa34c0302019-09-23 22:34:48 -04002221 # We filter out multiconfig dependencies from taskdepdata we pass to the tasks
Andrew Geissler99467da2019-02-25 18:54:23 -06002222 # as most code can't handle them
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002223 def build_taskdepdata(self, task):
2224 taskdepdata = {}
Brad Bishopc68388fc2019-08-26 01:33:31 -04002225 mc = mc_from_tid(task)
Brad Bishop08902b02019-08-20 09:16:51 -04002226 next = self.rqdata.runtaskentries[task].depends.copy()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002227 next.add(task)
Brad Bishopc68388fc2019-08-26 01:33:31 -04002228 next = self.filtermcdeps(task, mc, next)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002229 while next:
2230 additional = []
2231 for revdep in next:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002232 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2233 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2234 deps = self.rqdata.runtaskentries[revdep].depends
2235 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002236 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002237 unihash = self.rqdata.runtaskentries[revdep].unihash
Brad Bishopc68388fc2019-08-26 01:33:31 -04002238 deps = self.filtermcdeps(task, mc, deps)
Brad Bishop19323692019-04-05 15:28:33 -04002239 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002240 for revdep2 in deps:
2241 if revdep2 not in taskdepdata:
2242 additional.append(revdep2)
2243 next = additional
2244
2245 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2246 return taskdepdata
2247
Brad Bishop08902b02019-08-20 09:16:51 -04002248 def update_holdofftasks(self):
Brad Bishopc68388fc2019-08-26 01:33:31 -04002249
2250 if not self.holdoff_need_update:
2251 return
2252
2253 notcovered = set(self.scenequeue_notcovered)
2254 notcovered |= self.cantskip
2255 for tid in self.scenequeue_notcovered:
2256 notcovered |= self.sqdata.sq_covered_tasks[tid]
2257 notcovered |= self.sqdata.unskippable.difference(self.rqdata.runq_setscene_tids)
2258 notcovered.intersection_update(self.tasks_scenequeue_done)
2259
2260 covered = set(self.scenequeue_covered)
2261 for tid in self.scenequeue_covered:
2262 covered |= self.sqdata.sq_covered_tasks[tid]
2263 covered.difference_update(notcovered)
2264 covered.intersection_update(self.tasks_scenequeue_done)
2265
2266 for tid in notcovered | covered:
2267 if len(self.rqdata.runtaskentries[tid].depends) == 0:
2268 self.setbuildable(tid)
2269 elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete):
2270 self.setbuildable(tid)
2271
2272 self.tasks_covered = covered
2273 self.tasks_notcovered = notcovered
2274
Brad Bishop08902b02019-08-20 09:16:51 -04002275 self.holdoff_tasks = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002276
Brad Bishop08902b02019-08-20 09:16:51 -04002277 for tid in self.rqdata.runq_setscene_tids:
2278 if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
2279 self.holdoff_tasks.add(tid)
2280
2281 for tid in self.holdoff_tasks.copy():
2282 for dep in self.sqdata.sq_covered_tasks[tid]:
2283 if dep not in self.runq_complete:
2284 self.holdoff_tasks.add(dep)
2285
Brad Bishopc68388fc2019-08-26 01:33:31 -04002286 self.holdoff_need_update = False
2287
Brad Bishop08902b02019-08-20 09:16:51 -04002288 def process_possible_migrations(self):
2289
2290 changed = set()
Andrew Geissler82c905d2020-04-13 13:39:40 -05002291 toprocess = set()
Brad Bishop08902b02019-08-20 09:16:51 -04002292 for tid, unihash in self.updated_taskhash_queue.copy():
2293 if tid in self.runq_running and tid not in self.runq_complete:
2294 continue
2295
2296 self.updated_taskhash_queue.remove((tid, unihash))
2297
2298 if unihash != self.rqdata.runtaskentries[tid].unihash:
Andrew Geissler82c905d2020-04-13 13:39:40 -05002299 hashequiv_logger.verbose("Task %s unihash changed to %s" % (tid, unihash))
Brad Bishop08902b02019-08-20 09:16:51 -04002300 self.rqdata.runtaskentries[tid].unihash = unihash
2301 bb.parse.siggen.set_unihash(tid, unihash)
Andrew Geissler82c905d2020-04-13 13:39:40 -05002302 toprocess.add(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04002303
Andrew Geissler82c905d2020-04-13 13:39:40 -05002304 # Work out all tasks which depend upon these
2305 total = set()
2306 next = set()
2307 for p in toprocess:
2308 next |= self.rqdata.runtaskentries[p].revdeps
2309 while next:
2310 current = next.copy()
2311 total = total | next
2312 next = set()
2313 for ntid in current:
2314 next |= self.rqdata.runtaskentries[ntid].revdeps
2315 next.difference_update(total)
Brad Bishop08902b02019-08-20 09:16:51 -04002316
Andrew Geissler82c905d2020-04-13 13:39:40 -05002317 # Now iterate those tasks in dependency order to regenerate their taskhash/unihash
2318 next = set()
2319 for p in total:
2320 if len(self.rqdata.runtaskentries[p].depends) == 0:
2321 next.add(p)
2322 elif self.rqdata.runtaskentries[p].depends.isdisjoint(total):
2323 next.add(p)
2324
2325 # When an item doesn't have dependencies in total, we can process it. Drop items from total when handled
2326 while next:
2327 current = next.copy()
2328 next = set()
2329 for tid in current:
2330 if len(self.rqdata.runtaskentries[p].depends) and not self.rqdata.runtaskentries[tid].depends.isdisjoint(total):
2331 continue
2332 orighash = self.rqdata.runtaskentries[tid].hash
Andrew Geissler5a43b432020-06-13 10:46:56 -05002333 dc = bb.parse.siggen.get_data_caches(self.rqdata.dataCaches, mc_from_tid(tid))
2334 newhash = bb.parse.siggen.get_taskhash(tid, self.rqdata.runtaskentries[tid].depends, dc)
Andrew Geissler82c905d2020-04-13 13:39:40 -05002335 origuni = self.rqdata.runtaskentries[tid].unihash
2336 newuni = bb.parse.siggen.get_unihash(tid)
2337 # FIXME, need to check it can come from sstate at all for determinism?
2338 remapped = False
2339 if newuni == origuni:
2340 # Nothing to do, we match, skip code below
2341 remapped = True
2342 elif tid in self.scenequeue_covered or tid in self.sq_live:
2343 # Already ran this setscene task or it running. Report the new taskhash
2344 bb.parse.siggen.report_unihash_equiv(tid, newhash, origuni, newuni, self.rqdata.dataCaches)
2345 hashequiv_logger.verbose("Already covered setscene for %s so ignoring rehash (remap)" % (tid))
2346 remapped = True
2347
2348 if not remapped:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002349 #logger.debug("Task %s hash changes: %s->%s %s->%s" % (tid, orighash, newhash, origuni, newuni))
Andrew Geissler82c905d2020-04-13 13:39:40 -05002350 self.rqdata.runtaskentries[tid].hash = newhash
2351 self.rqdata.runtaskentries[tid].unihash = newuni
2352 changed.add(tid)
2353
2354 next |= self.rqdata.runtaskentries[tid].revdeps
2355 total.remove(tid)
2356 next.intersection_update(total)
Brad Bishop08902b02019-08-20 09:16:51 -04002357
2358 if changed:
2359 for mc in self.rq.worker:
2360 self.rq.worker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
2361 for mc in self.rq.fakeworker:
2362 self.rq.fakeworker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
2363
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002364 hashequiv_logger.debug(pprint.pformat("Tasks changed:\n%s" % (changed)))
Brad Bishop08902b02019-08-20 09:16:51 -04002365
2366 for tid in changed:
2367 if tid not in self.rqdata.runq_setscene_tids:
2368 continue
Brad Bishop08902b02019-08-20 09:16:51 -04002369 if tid not in self.pending_migrations:
2370 self.pending_migrations.add(tid)
2371
Andrew Geissler82c905d2020-04-13 13:39:40 -05002372 update_tasks = []
Brad Bishop08902b02019-08-20 09:16:51 -04002373 for tid in self.pending_migrations.copy():
Andrew Geissler82c905d2020-04-13 13:39:40 -05002374 if tid in self.runq_running or tid in self.sq_live:
Brad Bishop6dbb3162019-11-25 09:41:34 -05002375 # Too late, task already running, not much we can do now
2376 self.pending_migrations.remove(tid)
2377 continue
2378
Brad Bishop08902b02019-08-20 09:16:51 -04002379 valid = True
2380 # Check no tasks this covers are running
2381 for dep in self.sqdata.sq_covered_tasks[tid]:
2382 if dep in self.runq_running and dep not in self.runq_complete:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002383 hashequiv_logger.debug2("Task %s is running which blocks setscene for %s from running" % (dep, tid))
Brad Bishop08902b02019-08-20 09:16:51 -04002384 valid = False
2385 break
2386 if not valid:
2387 continue
2388
2389 self.pending_migrations.remove(tid)
Brad Bishopc68388fc2019-08-26 01:33:31 -04002390 changed = True
Brad Bishop08902b02019-08-20 09:16:51 -04002391
2392 if tid in self.tasks_scenequeue_done:
2393 self.tasks_scenequeue_done.remove(tid)
2394 for dep in self.sqdata.sq_covered_tasks[tid]:
Andrew Geissler82c905d2020-04-13 13:39:40 -05002395 if dep in self.runq_complete and dep not in self.runq_tasksrun:
2396 bb.error("Task %s marked as completed but now needing to rerun? Aborting build." % dep)
2397 self.failed_tids.append(tid)
2398 self.rq.state = runQueueCleanUp
2399 return
2400
Brad Bishop08902b02019-08-20 09:16:51 -04002401 if dep not in self.runq_complete:
2402 if dep in self.tasks_scenequeue_done and dep not in self.sqdata.unskippable:
2403 self.tasks_scenequeue_done.remove(dep)
2404
2405 if tid in self.sq_buildable:
2406 self.sq_buildable.remove(tid)
2407 if tid in self.sq_running:
2408 self.sq_running.remove(tid)
Andrew Geissler82c905d2020-04-13 13:39:40 -05002409 harddepfail = False
2410 for t in self.sqdata.sq_harddeps:
2411 if tid in self.sqdata.sq_harddeps[t] and t in self.scenequeue_notcovered:
2412 harddepfail = True
2413 break
2414 if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
Brad Bishop08902b02019-08-20 09:16:51 -04002415 if tid not in self.sq_buildable:
2416 self.sq_buildable.add(tid)
2417 if len(self.sqdata.sq_revdeps[tid]) == 0:
2418 self.sq_buildable.add(tid)
2419
2420 if tid in self.sqdata.outrightfail:
2421 self.sqdata.outrightfail.remove(tid)
2422 if tid in self.scenequeue_notcovered:
2423 self.scenequeue_notcovered.remove(tid)
2424 if tid in self.scenequeue_covered:
2425 self.scenequeue_covered.remove(tid)
2426 if tid in self.scenequeue_notneeded:
2427 self.scenequeue_notneeded.remove(tid)
2428
2429 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2430 self.sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
2431
2432 if tid in self.stampcache:
2433 del self.stampcache[tid]
2434
2435 if tid in self.build_stamps:
2436 del self.build_stamps[tid]
2437
Andrew Geissler82c905d2020-04-13 13:39:40 -05002438 update_tasks.append((tid, harddepfail, tid in self.sqdata.valid))
2439
2440 if update_tasks:
Brad Bishop08902b02019-08-20 09:16:51 -04002441 self.sqdone = False
Andrew Geissler82c905d2020-04-13 13:39:40 -05002442 update_scenequeue_data([t[0] for t in update_tasks], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
2443
2444 for (tid, harddepfail, origvalid) in update_tasks:
Brad Bishop1d80a2e2019-11-15 16:35:03 -05002445 if tid in self.sqdata.valid and not origvalid:
Andrew Geissler82c905d2020-04-13 13:39:40 -05002446 hashequiv_logger.verbose("Setscene task %s became valid" % tid)
2447 if harddepfail:
2448 self.sq_task_failoutright(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04002449
2450 if changed:
Brad Bishopc68388fc2019-08-26 01:33:31 -04002451 self.holdoff_need_update = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002452
Brad Bishop96ff1982019-08-19 13:50:42 -04002453 def scenequeue_updatecounters(self, task, fail=False):
Brad Bishop08902b02019-08-20 09:16:51 -04002454
2455 for dep in sorted(self.sqdata.sq_deps[task]):
Brad Bishop96ff1982019-08-19 13:50:42 -04002456 if fail and task in self.sqdata.sq_harddeps and dep in self.sqdata.sq_harddeps[task]:
Andrew Geissler95ac1b82021-03-31 14:34:31 -05002457 if dep in self.scenequeue_covered or dep in self.scenequeue_notcovered:
2458 # dependency could be already processed, e.g. noexec setscene task
2459 continue
Andrew Geissler3b8a17c2021-04-15 15:55:55 -05002460 noexec, stamppresent = check_setscene_stamps(dep, self.rqdata, self.rq, self.stampcache)
2461 if noexec or stamppresent:
2462 continue
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002463 logger.debug2("%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
Brad Bishop96ff1982019-08-19 13:50:42 -04002464 self.sq_task_failoutright(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002465 continue
Brad Bishop08902b02019-08-20 09:16:51 -04002466 if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
2467 if dep not in self.sq_buildable:
2468 self.sq_buildable.add(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002469
Brad Bishop96ff1982019-08-19 13:50:42 -04002470 next = set([task])
2471 while next:
2472 new = set()
Brad Bishop08902b02019-08-20 09:16:51 -04002473 for t in sorted(next):
Brad Bishop96ff1982019-08-19 13:50:42 -04002474 self.tasks_scenequeue_done.add(t)
2475 # Look down the dependency chain for non-setscene things which this task depends on
2476 # and mark as 'done'
2477 for dep in self.rqdata.runtaskentries[t].depends:
2478 if dep in self.rqdata.runq_setscene_tids or dep in self.tasks_scenequeue_done:
2479 continue
2480 if self.rqdata.runtaskentries[dep].revdeps.issubset(self.tasks_scenequeue_done):
2481 new.add(dep)
Brad Bishop96ff1982019-08-19 13:50:42 -04002482 next = new
2483
Brad Bishopc68388fc2019-08-26 01:33:31 -04002484 self.holdoff_need_update = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002485
2486 def sq_task_completeoutright(self, task):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002487 """
2488 Mark a task as completed
2489 Look at the reverse dependencies and mark any task with
2490 completed dependencies as buildable
2491 """
2492
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002493 logger.debug('Found task %s which could be accelerated', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002494 self.scenequeue_covered.add(task)
2495 self.scenequeue_updatecounters(task)
2496
Brad Bishop96ff1982019-08-19 13:50:42 -04002497 def sq_check_taskfail(self, task):
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002498 if self.rqdata.setscenewhitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002499 realtask = task.split('_setscene')[0]
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002500 (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
2501 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002502 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2503 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2504 self.rq.state = runQueueCleanUp
2505
Brad Bishop96ff1982019-08-19 13:50:42 -04002506 def sq_task_complete(self, task):
2507 self.sq_stats.taskCompleted()
2508 bb.event.fire(sceneQueueTaskCompleted(task, self.sq_stats, self.rq), self.cfgData)
2509 self.sq_task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002510
Brad Bishop96ff1982019-08-19 13:50:42 -04002511 def sq_task_fail(self, task, result):
2512 self.sq_stats.taskFailed()
2513 bb.event.fire(sceneQueueTaskFailed(task, self.sq_stats, result, self), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002514 self.scenequeue_notcovered.add(task)
2515 self.scenequeue_updatecounters(task, True)
Brad Bishop96ff1982019-08-19 13:50:42 -04002516 self.sq_check_taskfail(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002517
Brad Bishop96ff1982019-08-19 13:50:42 -04002518 def sq_task_failoutright(self, task):
2519 self.sq_running.add(task)
2520 self.sq_buildable.add(task)
2521 self.sq_stats.taskSkipped()
2522 self.sq_stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002523 self.scenequeue_notcovered.add(task)
2524 self.scenequeue_updatecounters(task, True)
2525
Brad Bishop96ff1982019-08-19 13:50:42 -04002526 def sq_task_skip(self, task):
2527 self.sq_running.add(task)
2528 self.sq_buildable.add(task)
2529 self.sq_task_completeoutright(task)
2530 self.sq_stats.taskSkipped()
2531 self.sq_stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002532
Brad Bishop96ff1982019-08-19 13:50:42 -04002533 def sq_build_taskdepdata(self, task):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002534 def getsetscenedeps(tid):
2535 deps = set()
2536 (mc, fn, taskname, _) = split_tid_mcfn(tid)
2537 realtid = tid + "_setscene"
2538 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
2539 for (depname, idependtask) in idepends:
2540 if depname not in self.rqdata.taskData[mc].build_targets:
2541 continue
2542
2543 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2544 if depfn is None:
2545 continue
2546 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2547 deps.add(deptid)
2548 return deps
2549
2550 taskdepdata = {}
2551 next = getsetscenedeps(task)
2552 next.add(task)
2553 while next:
2554 additional = []
2555 for revdep in next:
2556 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2557 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2558 deps = getsetscenedeps(revdep)
2559 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2560 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002561 unihash = self.rqdata.runtaskentries[revdep].unihash
2562 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002563 for revdep2 in deps:
2564 if revdep2 not in taskdepdata:
2565 additional.append(revdep2)
2566 next = additional
2567
2568 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2569 return taskdepdata
2570
Brad Bishop96ff1982019-08-19 13:50:42 -04002571 def check_setscenewhitelist(self, tid):
2572 # Check task that is going to run against the whitelist
2573 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2574 # Ignore covered tasks
2575 if tid in self.tasks_covered:
2576 return False
2577 # Ignore stamped tasks
2578 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
2579 return False
2580 # Ignore noexec tasks
2581 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2582 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2583 return False
2584
2585 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2586 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2587 if tid in self.rqdata.runq_setscene_tids:
2588 msg = 'Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)
2589 else:
2590 msg = 'Task %s.%s attempted to execute unexpectedly' % (pn, taskname)
Andrew Geissler82c905d2020-04-13 13:39:40 -05002591 for t in self.scenequeue_notcovered:
2592 msg = msg + "\nTask %s, unihash %s, taskhash %s" % (t, self.rqdata.runtaskentries[t].unihash, self.rqdata.runtaskentries[t].hash)
Brad Bishop96ff1982019-08-19 13:50:42 -04002593 logger.error(msg + '\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered))
2594 return True
2595 return False
2596
2597class SQData(object):
2598 def __init__(self):
2599 # SceneQueue dependencies
2600 self.sq_deps = {}
2601 # SceneQueue reverse dependencies
2602 self.sq_revdeps = {}
Brad Bishop96ff1982019-08-19 13:50:42 -04002603 # Injected inter-setscene task dependencies
2604 self.sq_harddeps = {}
2605 # Cache of stamp files so duplicates can't run in parallel
2606 self.stamps = {}
2607 # Setscene tasks directly depended upon by the build
2608 self.unskippable = set()
2609 # List of setscene tasks which aren't present
2610 self.outrightfail = set()
2611 # A list of normal tasks a setscene task covers
2612 self.sq_covered_tasks = {}
2613
2614def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
2615
2616 sq_revdeps = {}
2617 sq_revdeps_squash = {}
2618 sq_collated_deps = {}
2619
2620 # We need to construct a dependency graph for the setscene functions. Intermediate
2621 # dependencies between the setscene tasks only complicate the code. This code
2622 # therefore aims to collapse the huge runqueue dependency tree into a smaller one
2623 # only containing the setscene functions.
2624
2625 rqdata.init_progress_reporter.next_stage()
2626
2627 # First process the chains up to the first setscene task.
2628 endpoints = {}
2629 for tid in rqdata.runtaskentries:
2630 sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps)
2631 sq_revdeps_squash[tid] = set()
2632 if (len(sq_revdeps[tid]) == 0) and tid not in rqdata.runq_setscene_tids:
2633 #bb.warn("Added endpoint %s" % (tid))
2634 endpoints[tid] = set()
2635
2636 rqdata.init_progress_reporter.next_stage()
2637
2638 # Secondly process the chains between setscene tasks.
2639 for tid in rqdata.runq_setscene_tids:
2640 sq_collated_deps[tid] = set()
2641 #bb.warn("Added endpoint 2 %s" % (tid))
2642 for dep in rqdata.runtaskentries[tid].depends:
2643 if tid in sq_revdeps[dep]:
2644 sq_revdeps[dep].remove(tid)
2645 if dep not in endpoints:
2646 endpoints[dep] = set()
2647 #bb.warn(" Added endpoint 3 %s" % (dep))
2648 endpoints[dep].add(tid)
2649
2650 rqdata.init_progress_reporter.next_stage()
2651
2652 def process_endpoints(endpoints):
2653 newendpoints = {}
2654 for point, task in endpoints.items():
2655 tasks = set()
2656 if task:
2657 tasks |= task
2658 if sq_revdeps_squash[point]:
2659 tasks |= sq_revdeps_squash[point]
2660 if point not in rqdata.runq_setscene_tids:
2661 for t in tasks:
2662 sq_collated_deps[t].add(point)
2663 sq_revdeps_squash[point] = set()
2664 if point in rqdata.runq_setscene_tids:
2665 sq_revdeps_squash[point] = tasks
2666 tasks = set()
2667 continue
2668 for dep in rqdata.runtaskentries[point].depends:
2669 if point in sq_revdeps[dep]:
2670 sq_revdeps[dep].remove(point)
2671 if tasks:
2672 sq_revdeps_squash[dep] |= tasks
2673 if len(sq_revdeps[dep]) == 0 and dep not in rqdata.runq_setscene_tids:
2674 newendpoints[dep] = task
2675 if len(newendpoints) != 0:
2676 process_endpoints(newendpoints)
2677
2678 process_endpoints(endpoints)
2679
2680 rqdata.init_progress_reporter.next_stage()
2681
Brad Bishop08902b02019-08-20 09:16:51 -04002682 # Build a list of tasks which are "unskippable"
2683 # These are direct endpoints referenced by the build upto and including setscene tasks
Brad Bishop96ff1982019-08-19 13:50:42 -04002684 # Take the build endpoints (no revdeps) and find the sstate tasks they depend upon
2685 new = True
2686 for tid in rqdata.runtaskentries:
2687 if len(rqdata.runtaskentries[tid].revdeps) == 0:
2688 sqdata.unskippable.add(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04002689 sqdata.unskippable |= sqrq.cantskip
Brad Bishop96ff1982019-08-19 13:50:42 -04002690 while new:
2691 new = False
Brad Bishop08902b02019-08-20 09:16:51 -04002692 orig = sqdata.unskippable.copy()
2693 for tid in sorted(orig, reverse=True):
Brad Bishop96ff1982019-08-19 13:50:42 -04002694 if tid in rqdata.runq_setscene_tids:
2695 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04002696 if len(rqdata.runtaskentries[tid].depends) == 0:
2697 # These are tasks which have no setscene tasks in their chain, need to mark as directly buildable
Brad Bishop96ff1982019-08-19 13:50:42 -04002698 sqrq.setbuildable(tid)
Brad Bishop96ff1982019-08-19 13:50:42 -04002699 sqdata.unskippable |= rqdata.runtaskentries[tid].depends
Brad Bishop08902b02019-08-20 09:16:51 -04002700 if sqdata.unskippable != orig:
2701 new = True
2702
2703 sqrq.tasks_scenequeue_done |= sqdata.unskippable.difference(rqdata.runq_setscene_tids)
Brad Bishop96ff1982019-08-19 13:50:42 -04002704
2705 rqdata.init_progress_reporter.next_stage(len(rqdata.runtaskentries))
2706
2707 # Sanity check all dependencies could be changed to setscene task references
2708 for taskcounter, tid in enumerate(rqdata.runtaskentries):
2709 if tid in rqdata.runq_setscene_tids:
2710 pass
2711 elif len(sq_revdeps_squash[tid]) != 0:
2712 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
2713 else:
2714 del sq_revdeps_squash[tid]
2715 rqdata.init_progress_reporter.update(taskcounter)
2716
2717 rqdata.init_progress_reporter.next_stage()
2718
2719 # Resolve setscene inter-task dependencies
2720 # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
2721 # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
2722 for tid in rqdata.runq_setscene_tids:
2723 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2724 realtid = tid + "_setscene"
2725 idepends = rqdata.taskData[mc].taskentries[realtid].idepends
2726 sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", rqdata.dataCaches[mc], taskfn, noextra=True)
2727 for (depname, idependtask) in idepends:
2728
2729 if depname not in rqdata.taskData[mc].build_targets:
2730 continue
2731
2732 depfn = rqdata.taskData[mc].build_targets[depname][0]
2733 if depfn is None:
2734 continue
2735 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2736 if deptid not in rqdata.runtaskentries:
2737 bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
2738
2739 if not deptid in sqdata.sq_harddeps:
2740 sqdata.sq_harddeps[deptid] = set()
2741 sqdata.sq_harddeps[deptid].add(tid)
2742
2743 sq_revdeps_squash[tid].add(deptid)
2744 # Have to zero this to avoid circular dependencies
2745 sq_revdeps_squash[deptid] = set()
2746
2747 rqdata.init_progress_reporter.next_stage()
2748
2749 for task in sqdata.sq_harddeps:
2750 for dep in sqdata.sq_harddeps[task]:
2751 sq_revdeps_squash[dep].add(task)
2752
2753 rqdata.init_progress_reporter.next_stage()
2754
2755 #for tid in sq_revdeps_squash:
2756 # data = ""
2757 # for dep in sq_revdeps_squash[tid]:
2758 # data = data + "\n %s" % dep
2759 # bb.warn("Task %s_setscene: is %s " % (tid, data))
2760
2761 sqdata.sq_revdeps = sq_revdeps_squash
Brad Bishop96ff1982019-08-19 13:50:42 -04002762 sqdata.sq_covered_tasks = sq_collated_deps
2763
2764 # Build reverse version of revdeps to populate deps structure
2765 for tid in sqdata.sq_revdeps:
2766 sqdata.sq_deps[tid] = set()
2767 for tid in sqdata.sq_revdeps:
2768 for dep in sqdata.sq_revdeps[tid]:
2769 sqdata.sq_deps[dep].add(tid)
2770
2771 rqdata.init_progress_reporter.next_stage()
2772
Brad Bishop00e122a2019-10-05 11:10:57 -04002773 sqdata.multiconfigs = set()
Brad Bishop96ff1982019-08-19 13:50:42 -04002774 for tid in sqdata.sq_revdeps:
Brad Bishop00e122a2019-10-05 11:10:57 -04002775 sqdata.multiconfigs.add(mc_from_tid(tid))
Brad Bishop96ff1982019-08-19 13:50:42 -04002776 if len(sqdata.sq_revdeps[tid]) == 0:
2777 sqrq.sq_buildable.add(tid)
2778
2779 rqdata.init_progress_reporter.finish()
2780
Brad Bishop00e122a2019-10-05 11:10:57 -04002781 sqdata.noexec = set()
2782 sqdata.stamppresent = set()
2783 sqdata.valid = set()
Brad Bishop96ff1982019-08-19 13:50:42 -04002784
Brad Bishop1d80a2e2019-11-15 16:35:03 -05002785 update_scenequeue_data(sqdata.sq_revdeps, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True)
Brad Bishop00e122a2019-10-05 11:10:57 -04002786
Andrew Geissler95ac1b82021-03-31 14:34:31 -05002787 # Compute a list of 'stale' sstate tasks where the current hash does not match the one
2788 # in any stamp files. Pass the list out to metadata as an event.
2789 found = {}
2790 for tid in rqdata.runq_setscene_tids:
2791 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2792 stamps = bb.build.find_stale_stamps(taskname, rqdata.dataCaches[mc], taskfn)
2793 if stamps:
2794 if mc not in found:
2795 found[mc] = {}
2796 found[mc][tid] = stamps
2797 for mc in found:
2798 event = bb.event.StaleSetSceneTasks(found[mc])
2799 bb.event.fire(event, cooker.databuilder.mcdata[mc])
2800
Andrew Geissler3b8a17c2021-04-15 15:55:55 -05002801def check_setscene_stamps(tid, rqdata, rq, stampcache, noexecstamp=False):
2802
2803 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2804
2805 taskdep = rqdata.dataCaches[mc].task_deps[taskfn]
2806
2807 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2808 bb.build.make_stamp(taskname + "_setscene", rqdata.dataCaches[mc], taskfn)
2809 return True, False
2810
2811 if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache):
2812 logger.debug2('Setscene stamp current for task %s', tid)
2813 return False, True
2814
2815 if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache):
2816 logger.debug2('Normal stamp current for task %s', tid)
2817 return False, True
2818
2819 return False, False
2820
Brad Bishop1d80a2e2019-11-15 16:35:03 -05002821def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True):
Brad Bishop00e122a2019-10-05 11:10:57 -04002822
2823 tocheck = set()
2824
2825 for tid in sorted(tids):
2826 if tid in sqdata.stamppresent:
2827 sqdata.stamppresent.remove(tid)
2828 if tid in sqdata.valid:
2829 sqdata.valid.remove(tid)
2830
Andrew Geissler3b8a17c2021-04-15 15:55:55 -05002831 noexec, stamppresent = check_setscene_stamps(tid, rqdata, rq, stampcache, noexecstamp=True)
Brad Bishop00e122a2019-10-05 11:10:57 -04002832
Andrew Geissler3b8a17c2021-04-15 15:55:55 -05002833 if noexec:
Brad Bishop00e122a2019-10-05 11:10:57 -04002834 sqdata.noexec.add(tid)
2835 sqrq.sq_task_skip(tid)
Brad Bishop00e122a2019-10-05 11:10:57 -04002836 continue
2837
Andrew Geissler3b8a17c2021-04-15 15:55:55 -05002838 if stamppresent:
Brad Bishop00e122a2019-10-05 11:10:57 -04002839 sqdata.stamppresent.add(tid)
2840 sqrq.sq_task_skip(tid)
2841 continue
2842
2843 tocheck.add(tid)
2844
Brad Bishop1d80a2e2019-11-15 16:35:03 -05002845 sqdata.valid |= rq.validate_hashes(tocheck, cooker.data, len(sqdata.stamppresent), False, summary=summary)
Brad Bishop00e122a2019-10-05 11:10:57 -04002846
2847 sqdata.hashes = {}
2848 for mc in sorted(sqdata.multiconfigs):
Brad Bishop08902b02019-08-20 09:16:51 -04002849 for tid in sorted(sqdata.sq_revdeps):
Brad Bishop96ff1982019-08-19 13:50:42 -04002850 if mc_from_tid(tid) != mc:
2851 continue
Brad Bishop00e122a2019-10-05 11:10:57 -04002852 if tid in sqdata.stamppresent:
2853 continue
2854 if tid in sqdata.valid:
2855 continue
2856 if tid in sqdata.noexec:
2857 continue
2858 if tid in sqrq.scenequeue_notcovered:
2859 continue
2860 sqdata.outrightfail.add(tid)
Brad Bishop96ff1982019-08-19 13:50:42 -04002861
Brad Bishop00e122a2019-10-05 11:10:57 -04002862 h = pending_hash_index(tid, rqdata)
2863 if h not in sqdata.hashes:
2864 sqdata.hashes[h] = tid
2865 else:
2866 sqrq.sq_deferred[tid] = sqdata.hashes[h]
Andrew Geissler82c905d2020-04-13 13:39:40 -05002867 bb.note("Deferring %s after %s" % (tid, sqdata.hashes[h]))
Brad Bishop96ff1982019-08-19 13:50:42 -04002868
2869
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002870class TaskFailure(Exception):
2871 """
2872 Exception raised when a task in a runqueue fails
2873 """
2874 def __init__(self, x):
2875 self.args = x
2876
2877
2878class runQueueExitWait(bb.event.Event):
2879 """
2880 Event when waiting for task processes to exit
2881 """
2882
2883 def __init__(self, remain):
2884 self.remain = remain
2885 self.message = "Waiting for %s active tasks to finish" % remain
2886 bb.event.Event.__init__(self)
2887
2888class runQueueEvent(bb.event.Event):
2889 """
2890 Base runQueue event class
2891 """
2892 def __init__(self, task, stats, rq):
2893 self.taskid = task
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002894 self.taskstring = task
2895 self.taskname = taskname_from_tid(task)
2896 self.taskfile = fn_from_tid(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002897 self.taskhash = rq.rqdata.get_task_hash(task)
2898 self.stats = stats.copy()
2899 bb.event.Event.__init__(self)
2900
2901class sceneQueueEvent(runQueueEvent):
2902 """
2903 Base sceneQueue event class
2904 """
2905 def __init__(self, task, stats, rq, noexec=False):
2906 runQueueEvent.__init__(self, task, stats, rq)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002907 self.taskstring = task + "_setscene"
2908 self.taskname = taskname_from_tid(task) + "_setscene"
2909 self.taskfile = fn_from_tid(task)
2910 self.taskhash = rq.rqdata.get_task_hash(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002911
2912class runQueueTaskStarted(runQueueEvent):
2913 """
2914 Event notifying a task was started
2915 """
2916 def __init__(self, task, stats, rq, noexec=False):
2917 runQueueEvent.__init__(self, task, stats, rq)
2918 self.noexec = noexec
2919
2920class sceneQueueTaskStarted(sceneQueueEvent):
2921 """
2922 Event notifying a setscene task was started
2923 """
2924 def __init__(self, task, stats, rq, noexec=False):
2925 sceneQueueEvent.__init__(self, task, stats, rq)
2926 self.noexec = noexec
2927
2928class runQueueTaskFailed(runQueueEvent):
2929 """
2930 Event notifying a task failed
2931 """
Andrew Geissler95ac1b82021-03-31 14:34:31 -05002932 def __init__(self, task, stats, exitcode, rq, fakeroot_log=None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002933 runQueueEvent.__init__(self, task, stats, rq)
2934 self.exitcode = exitcode
Andrew Geissler95ac1b82021-03-31 14:34:31 -05002935 self.fakeroot_log = fakeroot_log
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002936
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002937 def __str__(self):
Andrew Geissler95ac1b82021-03-31 14:34:31 -05002938 if self.fakeroot_log:
2939 return "Task (%s) failed with exit code '%s' \nPseudo log:\n%s" % (self.taskstring, self.exitcode, self.fakeroot_log)
2940 else:
2941 return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002942
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002943class sceneQueueTaskFailed(sceneQueueEvent):
2944 """
2945 Event notifying a setscene task failed
2946 """
2947 def __init__(self, task, stats, exitcode, rq):
2948 sceneQueueEvent.__init__(self, task, stats, rq)
2949 self.exitcode = exitcode
2950
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002951 def __str__(self):
2952 return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
2953
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002954class sceneQueueComplete(sceneQueueEvent):
2955 """
2956 Event when all the sceneQueue tasks are complete
2957 """
2958 def __init__(self, stats, rq):
2959 self.stats = stats.copy()
2960 bb.event.Event.__init__(self)
2961
2962class runQueueTaskCompleted(runQueueEvent):
2963 """
2964 Event notifying a task completed
2965 """
2966
2967class sceneQueueTaskCompleted(sceneQueueEvent):
2968 """
2969 Event notifying a setscene task completed
2970 """
2971
2972class runQueueTaskSkipped(runQueueEvent):
2973 """
2974 Event notifying a task was skipped
2975 """
2976 def __init__(self, task, stats, rq, reason):
2977 runQueueEvent.__init__(self, task, stats, rq)
2978 self.reason = reason
2979
Brad Bishop08902b02019-08-20 09:16:51 -04002980class taskUniHashUpdate(bb.event.Event):
2981 """
2982 Base runQueue event class
2983 """
2984 def __init__(self, task, unihash):
2985 self.taskid = task
2986 self.unihash = unihash
2987 bb.event.Event.__init__(self)
2988
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002989class runQueuePipe():
2990 """
2991 Abstraction for a pipe between a worker thread and the server
2992 """
Andrew Geissler95ac1b82021-03-31 14:34:31 -05002993 def __init__(self, pipein, pipeout, d, rq, rqexec, fakerootlogs=None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002994 self.input = pipein
2995 if pipeout:
2996 pipeout.close()
2997 bb.utils.nonblockingfd(self.input)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002998 self.queue = b""
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002999 self.d = d
3000 self.rq = rq
3001 self.rqexec = rqexec
Andrew Geissler95ac1b82021-03-31 14:34:31 -05003002 self.fakerootlogs = fakerootlogs
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003003
3004 def setrunqueueexec(self, rqexec):
3005 self.rqexec = rqexec
3006
3007 def read(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003008 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
3009 for worker in workers.values():
3010 worker.process.poll()
3011 if worker.process.returncode is not None and not self.rq.teardown:
3012 bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
3013 self.rq.finish_runqueue(True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003014
3015 start = len(self.queue)
3016 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003017 self.queue = self.queue + (self.input.read(102400) or b"")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003018 except (OSError, IOError) as e:
3019 if e.errno != errno.EAGAIN:
3020 raise
3021 end = len(self.queue)
3022 found = True
3023 while found and len(self.queue):
3024 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003025 index = self.queue.find(b"</event>")
3026 while index != -1 and self.queue.startswith(b"<event>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003027 try:
3028 event = pickle.loads(self.queue[7:index])
Andrew Geissler475cb722020-07-10 16:00:51 -05003029 except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e:
3030 if isinstance(e, pickle.UnpicklingError) and "truncated" in str(e):
3031 # The pickled data could contain "</event>" so search for the next occurance
3032 # unpickling again, this should be the only way an unpickle error could occur
3033 index = self.queue.find(b"</event>", index + 1)
3034 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003035 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
3036 bb.event.fire_from_worker(event, self.d)
Brad Bishop08902b02019-08-20 09:16:51 -04003037 if isinstance(event, taskUniHashUpdate):
3038 self.rqexec.updated_taskhash_queue.append((event.taskid, event.unihash))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003039 found = True
3040 self.queue = self.queue[index+8:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003041 index = self.queue.find(b"</event>")
3042 index = self.queue.find(b"</exitcode>")
3043 while index != -1 and self.queue.startswith(b"<exitcode>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003044 try:
3045 task, status = pickle.loads(self.queue[10:index])
Andrew Geissler475cb722020-07-10 16:00:51 -05003046 except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003047 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
Andrew Geissler95ac1b82021-03-31 14:34:31 -05003048 (_, _, _, taskfn) = split_tid_mcfn(task)
3049 fakerootlog = None
3050 if self.fakerootlogs and taskfn and taskfn in self.fakerootlogs:
3051 fakerootlog = self.fakerootlogs[taskfn]
3052 self.rqexec.runqueue_process_waitpid(task, status, fakerootlog=fakerootlog)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003053 found = True
3054 self.queue = self.queue[index+11:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003055 index = self.queue.find(b"</exitcode>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003056 return (end > start)
3057
3058 def close(self):
3059 while self.read():
3060 continue
3061 if len(self.queue) > 0:
3062 print("Warning, worker left partial message: %s" % self.queue)
3063 self.input.close()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003064
Andrew Geisslerc9f78652020-09-18 14:11:35 -05003065def get_setscene_enforce_whitelist(d, targets):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05003066 if d.getVar('BB_SETSCENE_ENFORCE') != '1':
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003067 return None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05003068 whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003069 outlist = []
3070 for item in whitelist[:]:
3071 if item.startswith('%:'):
Andrew Geisslerc9f78652020-09-18 14:11:35 -05003072 for (mc, target, task, fn) in targets:
3073 outlist.append(target + ':' + item.split(':')[1])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003074 else:
3075 outlist.append(item)
3076 return outlist
3077
3078def check_setscene_enforce_whitelist(pn, taskname, whitelist):
3079 import fnmatch
Brad Bishopd7bf8c12018-02-25 22:55:05 -05003080 if whitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003081 item = '%s:%s' % (pn, taskname)
3082 for whitelist_item in whitelist:
3083 if fnmatch.fnmatch(item, whitelist_item):
3084 return True
3085 return False
3086 return True