blob: 10511a09dc1c3d48d249d4475b82032ec28a1f90 [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001"""
2BitBake 'RunQueue' implementation
3
4Handles preparation and execution of a queue of tasks
5"""
6
7# Copyright (C) 2006-2007 Richard Purdie
8#
Brad Bishopc342db32019-05-15 21:57:59 -04009# SPDX-License-Identifier: GPL-2.0-only
Patrick Williamsc124f4f2015-09-15 14:41:29 -050010#
Patrick Williamsc124f4f2015-09-15 14:41:29 -050011
12import copy
13import os
14import sys
Patrick Williamsc124f4f2015-09-15 14:41:29 -050015import stat
Patrick Williamsc124f4f2015-09-15 14:41:29 -050016import errno
17import logging
18import re
19import bb
Andrew Geissler82c905d2020-04-13 13:39:40 -050020from bb import msg, event
Patrick Williamsc124f4f2015-09-15 14:41:29 -050021from bb import monitordisk
22import subprocess
Patrick Williamsc0f7c042017-02-23 20:41:17 -060023import pickle
Brad Bishop6e60e8b2018-02-01 10:27:11 -050024from multiprocessing import Process
Brad Bishop19323692019-04-05 15:28:33 -040025import shlex
Brad Bishop96ff1982019-08-19 13:50:42 -040026import pprint
Patrick Williamsc124f4f2015-09-15 14:41:29 -050027
28bblogger = logging.getLogger("BitBake")
29logger = logging.getLogger("BitBake.RunQueue")
Andrew Geissler82c905d2020-04-13 13:39:40 -050030hashequiv_logger = logging.getLogger("BitBake.RunQueue.HashEquiv")
Patrick Williamsc124f4f2015-09-15 14:41:29 -050031
Brad Bishop19323692019-04-05 15:28:33 -040032__find_sha256__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{64}(?![a-z0-9])' )
Patrick Williamsc124f4f2015-09-15 14:41:29 -050033
Patrick Williamsc0f7c042017-02-23 20:41:17 -060034def fn_from_tid(tid):
35 return tid.rsplit(":", 1)[0]
36
37def taskname_from_tid(tid):
38 return tid.rsplit(":", 1)[1]
39
Andrew Geissler99467da2019-02-25 18:54:23 -060040def mc_from_tid(tid):
Andrew Geisslerd1e89492021-02-12 15:35:20 -060041 if tid.startswith('mc:') and tid.count(':') >= 2:
Andrew Geissler99467da2019-02-25 18:54:23 -060042 return tid.split(':')[1]
43 return ""
44
Patrick Williamsc0f7c042017-02-23 20:41:17 -060045def split_tid(tid):
46 (mc, fn, taskname, _) = split_tid_mcfn(tid)
47 return (mc, fn, taskname)
48
Andrew Geissler5a43b432020-06-13 10:46:56 -050049def split_mc(n):
Andrew Geisslerd1e89492021-02-12 15:35:20 -060050 if n.startswith("mc:") and n.count(':') >= 2:
Andrew Geissler5a43b432020-06-13 10:46:56 -050051 _, mc, n = n.split(":", 2)
52 return (mc, n)
53 return ('', n)
54
Patrick Williamsc0f7c042017-02-23 20:41:17 -060055def split_tid_mcfn(tid):
Andrew Geisslerd1e89492021-02-12 15:35:20 -060056 if tid.startswith('mc:') and tid.count(':') >= 2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -060057 elems = tid.split(':')
58 mc = elems[1]
59 fn = ":".join(elems[2:-1])
60 taskname = elems[-1]
Brad Bishop15ae2502019-06-18 21:44:24 -040061 mcfn = "mc:" + mc + ":" + fn
Patrick Williamsc0f7c042017-02-23 20:41:17 -060062 else:
63 tid = tid.rsplit(":", 1)
64 mc = ""
65 fn = tid[0]
66 taskname = tid[1]
67 mcfn = fn
68
69 return (mc, fn, taskname, mcfn)
70
71def build_tid(mc, fn, taskname):
72 if mc:
Brad Bishop15ae2502019-06-18 21:44:24 -040073 return "mc:" + mc + ":" + fn + ":" + taskname
Patrick Williamsc0f7c042017-02-23 20:41:17 -060074 return fn + ":" + taskname
75
Brad Bishop96ff1982019-08-19 13:50:42 -040076# Index used to pair up potentially matching multiconfig tasks
77# We match on PN, taskname and hash being equal
78def pending_hash_index(tid, rqdata):
79 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
80 pn = rqdata.dataCaches[mc].pkg_fn[taskfn]
Brad Bishop00e122a2019-10-05 11:10:57 -040081 h = rqdata.runtaskentries[tid].unihash
Brad Bishop96ff1982019-08-19 13:50:42 -040082 return pn + ":" + "taskname" + h
83
Patrick Williamsc124f4f2015-09-15 14:41:29 -050084class RunQueueStats:
85 """
86 Holds statistics on the tasks handled by the associated runQueue
87 """
Andrew Geissler5199d832021-09-24 16:47:35 -050088 def __init__(self, total, setscene_total):
Patrick Williamsc124f4f2015-09-15 14:41:29 -050089 self.completed = 0
90 self.skipped = 0
91 self.failed = 0
92 self.active = 0
Andrew Geissler5199d832021-09-24 16:47:35 -050093 self.setscene_active = 0
94 self.setscene_covered = 0
95 self.setscene_notcovered = 0
96 self.setscene_total = setscene_total
Patrick Williamsc124f4f2015-09-15 14:41:29 -050097 self.total = total
98
99 def copy(self):
Andrew Geissler5199d832021-09-24 16:47:35 -0500100 obj = self.__class__(self.total, self.setscene_total)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500101 obj.__dict__.update(self.__dict__)
102 return obj
103
104 def taskFailed(self):
105 self.active = self.active - 1
106 self.failed = self.failed + 1
107
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800108 def taskCompleted(self):
109 self.active = self.active - 1
110 self.completed = self.completed + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500111
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800112 def taskSkipped(self):
113 self.active = self.active + 1
114 self.skipped = self.skipped + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500115
116 def taskActive(self):
117 self.active = self.active + 1
118
Andrew Geissler5199d832021-09-24 16:47:35 -0500119 def updateCovered(self, covered, notcovered):
120 self.setscene_covered = covered
121 self.setscene_notcovered = notcovered
122
123 def updateActiveSetscene(self, active):
124 self.setscene_active = active
125
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500126# These values indicate the next step due to be run in the
127# runQueue state machine
128runQueuePrepare = 2
129runQueueSceneInit = 3
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500130runQueueRunning = 6
131runQueueFailed = 7
132runQueueCleanUp = 8
133runQueueComplete = 9
134
135class RunQueueScheduler(object):
136 """
137 Control the order tasks are scheduled in.
138 """
139 name = "basic"
140
141 def __init__(self, runqueue, rqdata):
142 """
143 The default scheduler just returns the first buildable task (the
144 priority map is sorted by task number)
145 """
146 self.rq = runqueue
147 self.rqdata = rqdata
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600148 self.numTasks = len(self.rqdata.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500149
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600150 self.prio_map = [self.rqdata.runtaskentries.keys()]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500151
Brad Bishop08902b02019-08-20 09:16:51 -0400152 self.buildable = set()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800153 self.skip_maxthread = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500154 self.stamps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600155 for tid in self.rqdata.runtaskentries:
156 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
157 self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
158 if tid in self.rq.runq_buildable:
159 self.buildable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500160
161 self.rev_prio_map = None
162
163 def next_buildable_task(self):
164 """
165 Return the id of the first task we find that is buildable
166 """
Andrew Geissler82c905d2020-04-13 13:39:40 -0500167 # Once tasks are running we don't need to worry about them again
168 self.buildable.difference_update(self.rq.runq_running)
Brad Bishop08902b02019-08-20 09:16:51 -0400169 buildable = set(self.buildable)
Brad Bishop08902b02019-08-20 09:16:51 -0400170 buildable.difference_update(self.rq.holdoff_tasks)
171 buildable.intersection_update(self.rq.tasks_covered | self.rq.tasks_notcovered)
Brad Bishop96ff1982019-08-19 13:50:42 -0400172 if not buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500173 return None
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800174
175 # Filter out tasks that have a max number of threads that have been exceeded
176 skip_buildable = {}
177 for running in self.rq.runq_running.difference(self.rq.runq_complete):
178 rtaskname = taskname_from_tid(running)
179 if rtaskname not in self.skip_maxthread:
180 self.skip_maxthread[rtaskname] = self.rq.cfgData.getVarFlag(rtaskname, "number_threads")
181 if not self.skip_maxthread[rtaskname]:
182 continue
183 if rtaskname in skip_buildable:
184 skip_buildable[rtaskname] += 1
185 else:
186 skip_buildable[rtaskname] = 1
187
Brad Bishop96ff1982019-08-19 13:50:42 -0400188 if len(buildable) == 1:
Brad Bishop08902b02019-08-20 09:16:51 -0400189 tid = buildable.pop()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800190 taskname = taskname_from_tid(tid)
191 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
192 return None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600193 stamp = self.stamps[tid]
194 if stamp not in self.rq.build_stamps.values():
195 return tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500196
197 if not self.rev_prio_map:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600198 self.rev_prio_map = {}
199 for tid in self.rqdata.runtaskentries:
200 self.rev_prio_map[tid] = self.prio_map.index(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500201
202 best = None
203 bestprio = None
Brad Bishop96ff1982019-08-19 13:50:42 -0400204 for tid in buildable:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800205 taskname = taskname_from_tid(tid)
206 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
207 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600208 prio = self.rev_prio_map[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500209 if bestprio is None or bestprio > prio:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600210 stamp = self.stamps[tid]
211 if stamp in self.rq.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500212 continue
213 bestprio = prio
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600214 best = tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500215
216 return best
217
218 def next(self):
219 """
220 Return the id of the task we should build next
221 """
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800222 if self.rq.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500223 return self.next_buildable_task()
224
Brad Bishop316dfdd2018-06-25 12:45:53 -0400225 def newbuildable(self, task):
Brad Bishop08902b02019-08-20 09:16:51 -0400226 self.buildable.add(task)
227
228 def removebuildable(self, task):
229 self.buildable.remove(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500230
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500231 def describe_task(self, taskid):
232 result = 'ID %s' % taskid
233 if self.rev_prio_map:
234 result = result + (' pri %d' % self.rev_prio_map[taskid])
235 return result
236
237 def dump_prio(self, comment):
238 bb.debug(3, '%s (most important first):\n%s' %
239 (comment,
240 '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
241 index, taskid in enumerate(self.prio_map)])))
242
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500243class RunQueueSchedulerSpeed(RunQueueScheduler):
244 """
245 A scheduler optimised for speed. The priority map is sorted by task weight,
246 heavier weighted tasks (tasks needed by the most other tasks) are run first.
247 """
248 name = "speed"
249
250 def __init__(self, runqueue, rqdata):
251 """
252 The priority map is sorted by task weight.
253 """
254 RunQueueScheduler.__init__(self, runqueue, rqdata)
255
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600256 weights = {}
257 for tid in self.rqdata.runtaskentries:
258 weight = self.rqdata.runtaskentries[tid].weight
259 if not weight in weights:
260 weights[weight] = []
261 weights[weight].append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500262
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600263 self.prio_map = []
264 for weight in sorted(weights):
265 for w in weights[weight]:
266 self.prio_map.append(w)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500267
268 self.prio_map.reverse()
269
270class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
271 """
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500272 A scheduler optimised to complete .bb files as quickly as possible. The
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500273 priority map is sorted by task weight, but then reordered so once a given
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500274 .bb file starts to build, it's completed as quickly as possible by
275 running all tasks related to the same .bb file one after the after.
276 This works well where disk space is at a premium and classes like OE's
277 rm_work are in force.
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500278 """
279 name = "completion"
280
281 def __init__(self, runqueue, rqdata):
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500282 super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500283
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500284 # Extract list of tasks for each recipe, with tasks sorted
285 # ascending from "must run first" (typically do_fetch) to
286 # "runs last" (do_build). The speed scheduler prioritizes
287 # tasks that must run first before the ones that run later;
288 # this is what we depend on here.
289 task_lists = {}
290 for taskid in self.prio_map:
291 fn, taskname = taskid.rsplit(':', 1)
292 task_lists.setdefault(fn, []).append(taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500293
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500294 # Now unify the different task lists. The strategy is that
295 # common tasks get skipped and new ones get inserted after the
296 # preceeding common one(s) as they are found. Because task
297 # lists should differ only by their number of tasks, but not
298 # the ordering of the common tasks, this should result in a
299 # deterministic result that is a superset of the individual
300 # task ordering.
301 all_tasks = []
302 for recipe, new_tasks in task_lists.items():
303 index = 0
304 old_task = all_tasks[index] if index < len(all_tasks) else None
305 for new_task in new_tasks:
306 if old_task == new_task:
307 # Common task, skip it. This is the fast-path which
308 # avoids a full search.
309 index += 1
310 old_task = all_tasks[index] if index < len(all_tasks) else None
311 else:
312 try:
313 index = all_tasks.index(new_task)
314 # Already present, just not at the current
315 # place. We re-synchronized by changing the
316 # index so that it matches again. Now
317 # move on to the next existing task.
318 index += 1
319 old_task = all_tasks[index] if index < len(all_tasks) else None
320 except ValueError:
321 # Not present. Insert before old_task, which
322 # remains the same (but gets shifted back).
323 all_tasks.insert(index, new_task)
324 index += 1
325 bb.debug(3, 'merged task list: %s' % all_tasks)
326
327 # Now reverse the order so that tasks that finish the work on one
328 # recipe are considered more imporant (= come first). The ordering
329 # is now so that do_build is most important.
330 all_tasks.reverse()
331
332 # Group tasks of the same kind before tasks of less important
333 # kinds at the head of the queue (because earlier = lower
334 # priority number = runs earlier), while preserving the
335 # ordering by recipe. If recipe foo is more important than
336 # bar, then the goal is to work on foo's do_populate_sysroot
337 # before bar's do_populate_sysroot and on the more important
338 # tasks of foo before any of the less important tasks in any
339 # other recipe (if those other recipes are more important than
340 # foo).
341 #
342 # All of this only applies when tasks are runable. Explicit
343 # dependencies still override this ordering by priority.
344 #
345 # Here's an example why this priority re-ordering helps with
346 # minimizing disk usage. Consider a recipe foo with a higher
347 # priority than bar where foo DEPENDS on bar. Then the
348 # implicit rule (from base.bbclass) is that foo's do_configure
349 # depends on bar's do_populate_sysroot. This ensures that
350 # bar's do_populate_sysroot gets done first. Normally the
351 # tasks from foo would continue to run once that is done, and
352 # bar only gets completed and cleaned up later. By ordering
353 # bar's task that depend on bar's do_populate_sysroot before foo's
354 # do_configure, that problem gets avoided.
355 task_index = 0
356 self.dump_prio('original priorities')
357 for task in all_tasks:
358 for index in range(task_index, self.numTasks):
359 taskid = self.prio_map[index]
360 taskname = taskid.rsplit(':', 1)[1]
361 if taskname == task:
362 del self.prio_map[index]
363 self.prio_map.insert(task_index, taskid)
364 task_index += 1
365 self.dump_prio('completion priorities')
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500366
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600367class RunTaskEntry(object):
368 def __init__(self):
369 self.depends = set()
370 self.revdeps = set()
371 self.hash = None
Brad Bishop19323692019-04-05 15:28:33 -0400372 self.unihash = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600373 self.task = None
374 self.weight = 1
375
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500376class RunQueueData:
377 """
378 BitBake Run Queue implementation
379 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600380 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500381 self.cooker = cooker
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600382 self.dataCaches = dataCaches
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500383 self.taskData = taskData
384 self.targets = targets
385 self.rq = rq
386 self.warn_multi_bb = False
387
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500388 self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
389 self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
Andrew Geisslerc9f78652020-09-18 14:11:35 -0500390 self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData, targets)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600391 self.setscenewhitelist_checked = False
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500392 self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600393 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500394
395 self.reset()
396
397 def reset(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600398 self.runtaskentries = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500399
400 def runq_depends_names(self, ids):
401 import re
402 ret = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600403 for id in ids:
404 nam = os.path.basename(id)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500405 nam = re.sub("_[^,]*,", ",", nam)
406 ret.extend([nam])
407 return ret
408
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600409 def get_task_hash(self, tid):
410 return self.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500411
Brad Bishop19323692019-04-05 15:28:33 -0400412 def get_task_unihash(self, tid):
413 return self.runtaskentries[tid].unihash
414
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600415 def get_user_idstring(self, tid, task_name_suffix = ""):
416 return tid + task_name_suffix
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500417
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500418 def get_short_user_idstring(self, task, task_name_suffix = ""):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500419 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
420 pn = self.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600421 taskname = taskname_from_tid(task) + task_name_suffix
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500422 return "%s:%s" % (pn, taskname)
423
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500424 def circular_depchains_handler(self, tasks):
425 """
426 Some tasks aren't buildable, likely due to circular dependency issues.
427 Identify the circular dependencies and print them in a user readable format.
428 """
429 from copy import deepcopy
430
431 valid_chains = []
432 explored_deps = {}
433 msgs = []
434
Andrew Geissler99467da2019-02-25 18:54:23 -0600435 class TooManyLoops(Exception):
436 pass
437
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500438 def chain_reorder(chain):
439 """
440 Reorder a dependency chain so the lowest task id is first
441 """
442 lowest = 0
443 new_chain = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600444 for entry in range(len(chain)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500445 if chain[entry] < chain[lowest]:
446 lowest = entry
447 new_chain.extend(chain[lowest:])
448 new_chain.extend(chain[:lowest])
449 return new_chain
450
451 def chain_compare_equal(chain1, chain2):
452 """
453 Compare two dependency chains and see if they're the same
454 """
455 if len(chain1) != len(chain2):
456 return False
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600457 for index in range(len(chain1)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500458 if chain1[index] != chain2[index]:
459 return False
460 return True
461
462 def chain_array_contains(chain, chain_array):
463 """
464 Return True if chain_array contains chain
465 """
466 for ch in chain_array:
467 if chain_compare_equal(ch, chain):
468 return True
469 return False
470
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600471 def find_chains(tid, prev_chain):
472 prev_chain.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500473 total_deps = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600474 total_deps.extend(self.runtaskentries[tid].revdeps)
475 for revdep in self.runtaskentries[tid].revdeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500476 if revdep in prev_chain:
477 idx = prev_chain.index(revdep)
478 # To prevent duplicates, reorder the chain to start with the lowest taskid
479 # and search through an array of those we've already printed
480 chain = prev_chain[idx:]
481 new_chain = chain_reorder(chain)
482 if not chain_array_contains(new_chain, valid_chains):
483 valid_chains.append(new_chain)
484 msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
485 for dep in new_chain:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600486 msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500487 msgs.append("\n")
488 if len(valid_chains) > 10:
489 msgs.append("Aborted dependency loops search after 10 matches.\n")
Andrew Geissler99467da2019-02-25 18:54:23 -0600490 raise TooManyLoops
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500491 continue
492 scan = False
493 if revdep not in explored_deps:
494 scan = True
495 elif revdep in explored_deps[revdep]:
496 scan = True
497 else:
498 for dep in prev_chain:
499 if dep in explored_deps[revdep]:
500 scan = True
501 if scan:
502 find_chains(revdep, copy.deepcopy(prev_chain))
503 for dep in explored_deps[revdep]:
504 if dep not in total_deps:
505 total_deps.append(dep)
506
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600507 explored_deps[tid] = total_deps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500508
Andrew Geissler99467da2019-02-25 18:54:23 -0600509 try:
510 for task in tasks:
511 find_chains(task, [])
512 except TooManyLoops:
513 pass
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500514
515 return msgs
516
517 def calculate_task_weights(self, endpoints):
518 """
519 Calculate a number representing the "weight" of each task. Heavier weighted tasks
520 have more dependencies and hence should be executed sooner for maximum speed.
521
522 This function also sanity checks the task list finding tasks that are not
523 possible to execute due to circular dependencies.
524 """
525
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600526 numTasks = len(self.runtaskentries)
527 weight = {}
528 deps_left = {}
529 task_done = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500530
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600531 for tid in self.runtaskentries:
532 task_done[tid] = False
533 weight[tid] = 1
534 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500535
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600536 for tid in endpoints:
537 weight[tid] = 10
538 task_done[tid] = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500539
540 while True:
541 next_points = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600542 for tid in endpoints:
543 for revdep in self.runtaskentries[tid].depends:
544 weight[revdep] = weight[revdep] + weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500545 deps_left[revdep] = deps_left[revdep] - 1
546 if deps_left[revdep] == 0:
547 next_points.append(revdep)
548 task_done[revdep] = True
549 endpoints = next_points
550 if len(next_points) == 0:
551 break
552
553 # Circular dependency sanity check
554 problem_tasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600555 for tid in self.runtaskentries:
556 if task_done[tid] is False or deps_left[tid] != 0:
557 problem_tasks.append(tid)
Andrew Geisslerd1e89492021-02-12 15:35:20 -0600558 logger.debug2("Task %s is not buildable", tid)
559 logger.debug2("(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600560 self.runtaskentries[tid].weight = weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500561
562 if problem_tasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600563 message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500564 message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
565 message = message + "Identifying dependency loops (this may take a short while)...\n"
566 logger.error(message)
567
568 msgs = self.circular_depchains_handler(problem_tasks)
569
570 message = "\n"
571 for msg in msgs:
572 message = message + msg
573 bb.msg.fatal("RunQueue", message)
574
575 return weight
576
577 def prepare(self):
578 """
579 Turn a set of taskData into a RunQueue and compute data needed
580 to optimise the execution order.
581 """
582
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600583 runq_build = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500584 recursivetasks = {}
585 recursiveitasks = {}
586 recursivetasksselfref = set()
587
588 taskData = self.taskData
589
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600590 found = False
591 for mc in self.taskData:
592 if len(taskData[mc].taskentries) > 0:
593 found = True
594 break
595 if not found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500596 # Nothing to do
597 return 0
598
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600599 self.init_progress_reporter.start()
600 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500601
602 # Step A - Work out a list of tasks to run
603 #
604 # Taskdata gives us a list of possible providers for every build and run
605 # target ordered by priority. It also gives information on each of those
606 # providers.
607 #
608 # To create the actual list of tasks to execute we fix the list of
609 # providers and then resolve the dependencies into task IDs. This
610 # process is repeated for each type of dependency (tdepends, deptask,
611 # rdeptast, recrdeptask, idepends).
612
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600613 def add_build_dependencies(depids, tasknames, depends, mc):
614 for depname in depids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500615 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600616 if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500617 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600618 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500619 if depdata is None:
620 continue
621 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600622 t = depdata + ":" + taskname
623 if t in taskData[mc].taskentries:
624 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500625
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600626 def add_runtime_dependencies(depids, tasknames, depends, mc):
627 for depname in depids:
628 if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500629 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600630 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500631 if depdata is None:
632 continue
633 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600634 t = depdata + ":" + taskname
635 if t in taskData[mc].taskentries:
636 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500637
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800638 def add_mc_dependencies(mc, tid):
639 mcdeps = taskData[mc].get_mcdepends()
640 for dep in mcdeps:
641 mcdependency = dep.split(':')
642 pn = mcdependency[3]
643 frommc = mcdependency[1]
644 mcdep = mcdependency[2]
645 deptask = mcdependency[4]
646 if mc == frommc:
647 fn = taskData[mcdep].build_targets[pn][0]
648 newdep = '%s:%s' % (fn,deptask)
649 taskData[mc].taskentries[tid].tdepends.append(newdep)
650
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600651 for mc in taskData:
652 for tid in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500653
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600654 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
655 #runtid = build_tid(mc, fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500656
Andrew Geisslerd1e89492021-02-12 15:35:20 -0600657 #logger.debug2("Processing %s,%s:%s", mc, fn, taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600658
659 depends = set()
660 task_deps = self.dataCaches[mc].task_deps[taskfn]
661
662 self.runtaskentries[tid] = RunTaskEntry()
663
664 if fn in taskData[mc].failed_fns:
665 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500666
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800667 # We add multiconfig dependencies before processing internal task deps (tdepends)
668 if 'mcdepends' in task_deps and taskname in task_deps['mcdepends']:
669 add_mc_dependencies(mc, tid)
670
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500671 # Resolve task internal dependencies
672 #
673 # e.g. addtask before X after Y
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600674 for t in taskData[mc].taskentries[tid].tdepends:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800675 (depmc, depfn, deptaskname, _) = split_tid_mcfn(t)
676 depends.add(build_tid(depmc, depfn, deptaskname))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500677
678 # Resolve 'deptask' dependencies
679 #
680 # e.g. do_sometask[deptask] = "do_someothertask"
681 # (makes sure sometask runs after someothertask of all DEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600682 if 'deptask' in task_deps and taskname in task_deps['deptask']:
683 tasknames = task_deps['deptask'][taskname].split()
684 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500685
686 # Resolve 'rdeptask' dependencies
687 #
688 # e.g. do_sometask[rdeptask] = "do_someothertask"
689 # (makes sure sometask runs after someothertask of all RDEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600690 if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
691 tasknames = task_deps['rdeptask'][taskname].split()
692 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500693
694 # Resolve inter-task dependencies
695 #
696 # e.g. do_sometask[depends] = "targetname:do_someothertask"
697 # (makes sure sometask runs after targetname's someothertask)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600698 idepends = taskData[mc].taskentries[tid].idepends
699 for (depname, idependtask) in idepends:
700 if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500701 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600702 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500703 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600704 t = depdata + ":" + idependtask
705 depends.add(t)
706 if t not in taskData[mc].taskentries:
707 bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
708 irdepends = taskData[mc].taskentries[tid].irdepends
709 for (depname, idependtask) in irdepends:
710 if depname in taskData[mc].run_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500711 # Won't be in run_targets if ASSUME_PROVIDED
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500712 if not taskData[mc].run_targets[depname]:
713 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600714 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500715 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600716 t = depdata + ":" + idependtask
717 depends.add(t)
718 if t not in taskData[mc].taskentries:
719 bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500720
721 # Resolve recursive 'recrdeptask' dependencies (Part A)
722 #
723 # e.g. do_sometask[recrdeptask] = "do_someothertask"
724 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
725 # We cover the recursive part of the dependencies below
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600726 if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
727 tasknames = task_deps['recrdeptask'][taskname].split()
728 recursivetasks[tid] = tasknames
729 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
730 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
731 if taskname in tasknames:
732 recursivetasksselfref.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500733
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600734 if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
735 recursiveitasks[tid] = []
736 for t in task_deps['recideptask'][taskname].split():
737 newdep = build_tid(mc, fn, t)
738 recursiveitasks[tid].append(newdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500739
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600740 self.runtaskentries[tid].depends = depends
Brad Bishop316dfdd2018-06-25 12:45:53 -0400741 # Remove all self references
742 self.runtaskentries[tid].depends.discard(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500743
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600744 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500745
Brad Bishop316dfdd2018-06-25 12:45:53 -0400746 self.init_progress_reporter.next_stage()
747
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500748 # Resolve recursive 'recrdeptask' dependencies (Part B)
749 #
750 # e.g. do_sometask[recrdeptask] = "do_someothertask"
751 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600752 # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600753
Brad Bishop316dfdd2018-06-25 12:45:53 -0400754 # Generating/interating recursive lists of dependencies is painful and potentially slow
755 # Precompute recursive task dependencies here by:
756 # a) create a temp list of reverse dependencies (revdeps)
757 # b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0)
758 # c) combine the total list of dependencies in cumulativedeps
759 # d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500760
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500761
Brad Bishop316dfdd2018-06-25 12:45:53 -0400762 revdeps = {}
763 deps = {}
764 cumulativedeps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600765 for tid in self.runtaskentries:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400766 deps[tid] = set(self.runtaskentries[tid].depends)
767 revdeps[tid] = set()
768 cumulativedeps[tid] = set()
769 # Generate a temp list of reverse dependencies
770 for tid in self.runtaskentries:
771 for dep in self.runtaskentries[tid].depends:
772 revdeps[dep].add(tid)
773 # Find the dependency chain endpoints
774 endpoints = set()
775 for tid in self.runtaskentries:
776 if len(deps[tid]) == 0:
777 endpoints.add(tid)
778 # Iterate the chains collating dependencies
779 while endpoints:
780 next = set()
781 for tid in endpoints:
782 for dep in revdeps[tid]:
783 cumulativedeps[dep].add(fn_from_tid(tid))
784 cumulativedeps[dep].update(cumulativedeps[tid])
785 if tid in deps[dep]:
786 deps[dep].remove(tid)
787 if len(deps[dep]) == 0:
788 next.add(dep)
789 endpoints = next
790 #for tid in deps:
791 # if len(deps[tid]) != 0:
792 # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
793
794 # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
795 # resolve these recursively until we aren't adding any further extra dependencies
796 extradeps = True
797 while extradeps:
798 extradeps = 0
799 for tid in recursivetasks:
800 tasknames = recursivetasks[tid]
801
802 totaldeps = set(self.runtaskentries[tid].depends)
803 if tid in recursiveitasks:
804 totaldeps.update(recursiveitasks[tid])
805 for dep in recursiveitasks[tid]:
806 if dep not in self.runtaskentries:
807 continue
808 totaldeps.update(self.runtaskentries[dep].depends)
809
810 deps = set()
811 for dep in totaldeps:
812 if dep in cumulativedeps:
813 deps.update(cumulativedeps[dep])
814
815 for t in deps:
816 for taskname in tasknames:
817 newtid = t + ":" + taskname
818 if newtid == tid:
819 continue
820 if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
821 extradeps += 1
822 self.runtaskentries[tid].depends.add(newtid)
823
824 # Handle recursive tasks which depend upon other recursive tasks
825 deps = set()
826 for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
827 deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
828 for newtid in deps:
829 for taskname in tasknames:
830 if not newtid.endswith(":" + taskname):
831 continue
832 if newtid in self.runtaskentries:
833 extradeps += 1
834 self.runtaskentries[tid].depends.add(newtid)
835
836 bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps)
837
838 # Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work
839 for tid in recursivetasksselfref:
840 self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600841
842 self.init_progress_reporter.next_stage()
843
844 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500845
846 # Step B - Mark all active tasks
847 #
848 # Start with the tasks we were asked to run and mark all dependencies
849 # as active too. If the task is to be 'forced', clear its stamp. Once
850 # all active tasks are marked, prune the ones we don't need.
851
852 logger.verbose("Marking Active Tasks")
853
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600854 def mark_active(tid, depth):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500855 """
856 Mark an item as active along with its depends
857 (calls itself recursively)
858 """
859
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600860 if tid in runq_build:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500861 return
862
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600863 runq_build[tid] = 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500864
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600865 depends = self.runtaskentries[tid].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500866 for depend in depends:
867 mark_active(depend, depth+1)
868
Brad Bishop79641f22019-09-10 07:20:22 -0400869 def invalidate_task(tid, error_nostamp):
870 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
871 taskdep = self.dataCaches[mc].task_deps[taskfn]
872 if fn + ":" + taskname not in taskData[mc].taskentries:
873 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
874 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
875 if error_nostamp:
876 bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
877 else:
878 bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
879 else:
880 logger.verbose("Invalidate task %s, %s", taskname, fn)
881 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn)
882
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600883 self.target_tids = []
884 for (mc, target, task, fn) in self.targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500885
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600886 if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500887 continue
888
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600889 if target in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500890 continue
891
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500892 parents = False
893 if task.endswith('-'):
894 parents = True
895 task = task[:-1]
896
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600897 if fn in taskData[mc].failed_fns:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500898 continue
899
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600900 # fn already has mc prefix
901 tid = fn + ":" + task
902 self.target_tids.append(tid)
903 if tid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500904 import difflib
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600905 tasks = []
906 for x in taskData[mc].taskentries:
907 if x.startswith(fn + ":"):
908 tasks.append(taskname_from_tid(x))
909 close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500910 if close_matches:
911 extra = ". Close matches:\n %s" % "\n ".join(close_matches)
912 else:
913 extra = ""
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600914 bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
915
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500916 # For tasks called "XXXX-", ony run their dependencies
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500917 if parents:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600918 for i in self.runtaskentries[tid].depends:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500919 mark_active(i, 1)
920 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600921 mark_active(tid, 1)
922
923 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500924
925 # Step C - Prune all inactive tasks
926 #
927 # Once all active tasks are marked, prune the ones we don't need.
928
Brad Bishop316dfdd2018-06-25 12:45:53 -0400929 delcount = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600930 for tid in list(self.runtaskentries.keys()):
931 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400932 delcount[tid] = self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600933 del self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600934
Brad Bishop316dfdd2018-06-25 12:45:53 -0400935 # Handle --runall
936 if self.cooker.configuration.runall:
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500937 # re-run the mark_active and then drop unused tasks from new list
938 runq_build = {}
Brad Bishop316dfdd2018-06-25 12:45:53 -0400939
940 for task in self.cooker.configuration.runall:
Andrew Geissler82c905d2020-04-13 13:39:40 -0500941 if not task.startswith("do_"):
942 task = "do_{0}".format(task)
Brad Bishop316dfdd2018-06-25 12:45:53 -0400943 runall_tids = set()
944 for tid in list(self.runtaskentries):
Andrew Geissler82c905d2020-04-13 13:39:40 -0500945 wanttid = "{0}:{1}".format(fn_from_tid(tid), task)
Brad Bishop316dfdd2018-06-25 12:45:53 -0400946 if wanttid in delcount:
947 self.runtaskentries[wanttid] = delcount[wanttid]
948 if wanttid in self.runtaskentries:
949 runall_tids.add(wanttid)
950
951 for tid in list(runall_tids):
952 mark_active(tid,1)
Brad Bishop79641f22019-09-10 07:20:22 -0400953 if self.cooker.configuration.force:
954 invalidate_task(tid, False)
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500955
956 for tid in list(self.runtaskentries.keys()):
957 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400958 delcount[tid] = self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500959 del self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500960
961 if len(self.runtaskentries) == 0:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400962 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
963
964 self.init_progress_reporter.next_stage()
965
966 # Handle runonly
967 if self.cooker.configuration.runonly:
968 # re-run the mark_active and then drop unused tasks from new list
969 runq_build = {}
970
971 for task in self.cooker.configuration.runonly:
Andrew Geissler82c905d2020-04-13 13:39:40 -0500972 if not task.startswith("do_"):
973 task = "do_{0}".format(task)
974 runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == task }
Brad Bishop316dfdd2018-06-25 12:45:53 -0400975
976 for tid in list(runonly_tids):
977 mark_active(tid,1)
Brad Bishop79641f22019-09-10 07:20:22 -0400978 if self.cooker.configuration.force:
979 invalidate_task(tid, False)
Brad Bishop316dfdd2018-06-25 12:45:53 -0400980
981 for tid in list(self.runtaskentries.keys()):
982 if tid not in runq_build:
983 delcount[tid] = self.runtaskentries[tid]
984 del self.runtaskentries[tid]
985
986 if len(self.runtaskentries) == 0:
987 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500988
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500989 #
990 # Step D - Sanity checks and computation
991 #
992
993 # Check to make sure we still have tasks to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600994 if len(self.runtaskentries) == 0:
995 if not taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500996 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
997 else:
998 bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
999
Brad Bishop316dfdd2018-06-25 12:45:53 -04001000 logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001001
1002 logger.verbose("Assign Weightings")
1003
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001004 self.init_progress_reporter.next_stage()
1005
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001006 # Generate a list of reverse dependencies to ease future calculations
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001007 for tid in self.runtaskentries:
1008 for dep in self.runtaskentries[tid].depends:
1009 self.runtaskentries[dep].revdeps.add(tid)
1010
1011 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001012
1013 # Identify tasks at the end of dependency chains
1014 # Error on circular dependency loops (length two)
1015 endpoints = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001016 for tid in self.runtaskentries:
1017 revdeps = self.runtaskentries[tid].revdeps
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001018 if len(revdeps) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001019 endpoints.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001020 for dep in revdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001021 if dep in self.runtaskentries[tid].depends:
1022 bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
1023
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001024
1025 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
1026
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001027 self.init_progress_reporter.next_stage()
1028
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001029 # Calculate task weights
1030 # Check of higher length circular dependencies
1031 self.runq_weight = self.calculate_task_weights(endpoints)
1032
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001033 self.init_progress_reporter.next_stage()
1034
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001035 # Sanity Check - Check for multiple tasks building the same provider
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001036 for mc in self.dataCaches:
1037 prov_list = {}
1038 seen_fn = []
1039 for tid in self.runtaskentries:
1040 (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1041 if taskfn in seen_fn:
1042 continue
1043 if mc != tidmc:
1044 continue
1045 seen_fn.append(taskfn)
1046 for prov in self.dataCaches[mc].fn_provides[taskfn]:
1047 if prov not in prov_list:
1048 prov_list[prov] = [taskfn]
1049 elif taskfn not in prov_list[prov]:
1050 prov_list[prov].append(taskfn)
1051 for prov in prov_list:
1052 if len(prov_list[prov]) < 2:
1053 continue
1054 if prov in self.multi_provider_whitelist:
1055 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001056 seen_pn = []
1057 # If two versions of the same PN are being built its fatal, we don't support it.
1058 for fn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001059 pn = self.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001060 if pn not in seen_pn:
1061 seen_pn.append(pn)
1062 else:
1063 bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001064 msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
1065 #
1066 # Construct a list of things which uniquely depend on each provider
1067 # since this may help the user figure out which dependency is triggering this warning
1068 #
1069 msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
1070 deplist = {}
1071 commondeps = None
1072 for provfn in prov_list[prov]:
1073 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001074 for tid in self.runtaskentries:
1075 fn = fn_from_tid(tid)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001076 if fn != provfn:
1077 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001078 for dep in self.runtaskentries[tid].revdeps:
1079 fn = fn_from_tid(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001080 if fn == provfn:
1081 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001082 deps.add(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001083 if not commondeps:
1084 commondeps = set(deps)
1085 else:
1086 commondeps &= deps
1087 deplist[provfn] = deps
1088 for provfn in deplist:
1089 msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
1090 #
1091 # Construct a list of provides and runtime providers for each recipe
1092 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
1093 #
1094 msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
1095 provide_results = {}
1096 rprovide_results = {}
1097 commonprovs = None
1098 commonrprovs = None
1099 for provfn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001100 provides = set(self.dataCaches[mc].fn_provides[provfn])
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001101 rprovides = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001102 for rprovide in self.dataCaches[mc].rproviders:
1103 if provfn in self.dataCaches[mc].rproviders[rprovide]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001104 rprovides.add(rprovide)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001105 for package in self.dataCaches[mc].packages:
1106 if provfn in self.dataCaches[mc].packages[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001107 rprovides.add(package)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001108 for package in self.dataCaches[mc].packages_dynamic:
1109 if provfn in self.dataCaches[mc].packages_dynamic[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001110 rprovides.add(package)
1111 if not commonprovs:
1112 commonprovs = set(provides)
1113 else:
1114 commonprovs &= provides
1115 provide_results[provfn] = provides
1116 if not commonrprovs:
1117 commonrprovs = set(rprovides)
1118 else:
1119 commonrprovs &= rprovides
1120 rprovide_results[provfn] = rprovides
1121 #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
1122 #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
1123 for provfn in prov_list[prov]:
1124 msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
1125 msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
1126
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001127 if self.warn_multi_bb:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001128 logger.verbnote(msg)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001129 else:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001130 logger.error(msg)
1131
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001132 self.init_progress_reporter.next_stage()
1133
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001134 # Create a whitelist usable by the stamp checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001135 self.stampfnwhitelist = {}
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001136 for mc in self.taskData:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001137 self.stampfnwhitelist[mc] = []
1138 for entry in self.stampwhitelist.split():
1139 if entry not in self.taskData[mc].build_targets:
1140 continue
1141 fn = self.taskData.build_targets[entry][0]
1142 self.stampfnwhitelist[mc].append(fn)
1143
1144 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001145
1146 # Iterate over the task list looking for tasks with a 'setscene' function
Andrew Geissler82c905d2020-04-13 13:39:40 -05001147 self.runq_setscene_tids = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001148 if not self.cooker.configuration.nosetscene:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001149 for tid in self.runtaskentries:
1150 (mc, fn, taskname, _) = split_tid_mcfn(tid)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001151 setscenetid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001152 if setscenetid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001153 continue
Andrew Geissler82c905d2020-04-13 13:39:40 -05001154 self.runq_setscene_tids.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001155
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001156 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001157
1158 # Invalidate task if force mode active
1159 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001160 for tid in self.target_tids:
1161 invalidate_task(tid, False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001162
1163 # Invalidate task if invalidate mode active
1164 if self.cooker.configuration.invalidate_stamp:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001165 for tid in self.target_tids:
1166 fn = fn_from_tid(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001167 for st in self.cooker.configuration.invalidate_stamp.split(','):
1168 if not st.startswith("do_"):
1169 st = "do_%s" % st
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001170 invalidate_task(fn + ":" + st, True)
1171
1172 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001173
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001174 # Create and print to the logs a virtual/xxxx -> PN (fn) table
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001175 for mc in taskData:
1176 virtmap = taskData[mc].get_providermap(prefix="virtual/")
1177 virtpnmap = {}
1178 for v in virtmap:
1179 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
1180 bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
1181 if hasattr(bb.parse.siggen, "tasks_resolved"):
1182 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1183
1184 self.init_progress_reporter.next_stage()
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001185
Brad Bishop00e122a2019-10-05 11:10:57 -04001186 bb.parse.siggen.set_setscene_tasks(self.runq_setscene_tids)
1187
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001188 # Iterate over the task list and call into the siggen code
1189 dealtwith = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001190 todeal = set(self.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001191 while len(todeal) > 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001192 for tid in todeal.copy():
1193 if len(self.runtaskentries[tid].depends - dealtwith) == 0:
1194 dealtwith.add(tid)
1195 todeal.remove(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001196 self.prepare_task_hash(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001197
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001198 bb.parse.siggen.writeout_file_checksum_cache()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001199
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001200 #self.dump_data()
1201 return len(self.runtaskentries)
1202
Brad Bishop19323692019-04-05 15:28:33 -04001203 def prepare_task_hash(self, tid):
Andrew Geissler5a43b432020-06-13 10:46:56 -05001204 dc = bb.parse.siggen.get_data_caches(self.dataCaches, mc_from_tid(tid))
1205 bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, dc)
1206 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends, dc)
Brad Bishop08902b02019-08-20 09:16:51 -04001207 self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001208
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001209 def dump_data(self):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001210 """
1211 Dump some debug information on the internal data structures
1212 """
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001213 logger.debug3("run_tasks:")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001214 for tid in self.runtaskentries:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001215 logger.debug3(" %s: %s Deps %s RevDeps %s", tid,
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001216 self.runtaskentries[tid].weight,
1217 self.runtaskentries[tid].depends,
1218 self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001219
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001220class RunQueueWorker():
1221 def __init__(self, process, pipe):
1222 self.process = process
1223 self.pipe = pipe
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001224
1225class RunQueue:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001226 def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001227
1228 self.cooker = cooker
1229 self.cfgData = cfgData
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001230 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001231
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001232 self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
1233 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001234 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001235
1236 self.state = runQueuePrepare
1237
1238 # For disk space monitor
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001239 # Invoked at regular time intervals via the bitbake heartbeat event
1240 # while the build is running. We generate a unique name for the handler
1241 # here, just in case that there ever is more than one RunQueue instance,
Brad Bishop96ff1982019-08-19 13:50:42 -04001242 # start the handler when reaching runQueueSceneInit, and stop it when
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001243 # done with the build.
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001244 self.dm = monitordisk.diskMonitor(cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001245 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
1246 self.dm_event_handler_registered = False
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001247 self.rqexe = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001248 self.worker = {}
1249 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001250
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001251 def _start_worker(self, mc, fakeroot = False, rqexec = None):
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001252 logger.debug("Starting bitbake-worker")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001253 magic = "decafbad"
1254 if self.cooker.configuration.profile:
1255 magic = "decafbadbad"
Andrew Geissler95ac1b82021-03-31 14:34:31 -05001256 fakerootlogs = None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001257 if fakeroot:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001258 magic = magic + "beef"
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001259 mcdata = self.cooker.databuilder.mcdata[mc]
Brad Bishop19323692019-04-05 15:28:33 -04001260 fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD"))
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001261 fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001262 env = os.environ.copy()
1263 for key, value in (var.split('=') for var in fakerootenv):
1264 env[key] = value
Brad Bishop19323692019-04-05 15:28:33 -04001265 worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
Andrew Geissler95ac1b82021-03-31 14:34:31 -05001266 fakerootlogs = self.rqdata.dataCaches[mc].fakerootlogs
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001267 else:
1268 worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
1269 bb.utils.nonblockingfd(worker.stdout)
Andrew Geissler95ac1b82021-03-31 14:34:31 -05001270 workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec, fakerootlogs=fakerootlogs)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001271
1272 workerdata = {
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001273 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1274 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1275 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1276 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001277 "sigdata" : bb.parse.siggen.get_taskdata(),
Andrew Geissler82c905d2020-04-13 13:39:40 -05001278 "logdefaultlevel" : bb.msg.loggerDefaultLogLevel,
Andrew Geisslerc9f78652020-09-18 14:11:35 -05001279 "build_verbose_shell" : self.cooker.configuration.build_verbose_shell,
1280 "build_verbose_stdout" : self.cooker.configuration.build_verbose_stdout,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001281 "logdefaultdomain" : bb.msg.loggerDefaultDomains,
1282 "prhost" : self.cooker.prhost,
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001283 "buildname" : self.cfgData.getVar("BUILDNAME"),
1284 "date" : self.cfgData.getVar("DATE"),
1285 "time" : self.cfgData.getVar("TIME"),
Brad Bishopa34c0302019-09-23 22:34:48 -04001286 "hashservaddr" : self.cooker.hashservaddr,
Andrew Geissler9b4d8b02021-02-19 12:26:16 -06001287 "umask" : self.cfgData.getVar("BB_DEFAULT_UMASK"),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001288 }
1289
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001290 worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001291 worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001292 worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001293 worker.stdin.flush()
1294
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001295 return RunQueueWorker(worker, workerpipe)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001296
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001297 def _teardown_worker(self, worker):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001298 if not worker:
1299 return
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001300 logger.debug("Teardown for bitbake-worker")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001301 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001302 worker.process.stdin.write(b"<quit></quit>")
1303 worker.process.stdin.flush()
1304 worker.process.stdin.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001305 except IOError:
1306 pass
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001307 while worker.process.returncode is None:
1308 worker.pipe.read()
1309 worker.process.poll()
1310 while worker.pipe.read():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001311 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001312 worker.pipe.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001313
1314 def start_worker(self):
1315 if self.worker:
1316 self.teardown_workers()
1317 self.teardown = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001318 for mc in self.rqdata.dataCaches:
1319 self.worker[mc] = self._start_worker(mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001320
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001321 def start_fakeworker(self, rqexec, mc):
1322 if not mc in self.fakeworker:
1323 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001324
1325 def teardown_workers(self):
1326 self.teardown = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001327 for mc in self.worker:
1328 self._teardown_worker(self.worker[mc])
1329 self.worker = {}
1330 for mc in self.fakeworker:
1331 self._teardown_worker(self.fakeworker[mc])
1332 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001333
1334 def read_workers(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001335 for mc in self.worker:
1336 self.worker[mc].pipe.read()
1337 for mc in self.fakeworker:
1338 self.fakeworker[mc].pipe.read()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001339
1340 def active_fds(self):
1341 fds = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001342 for mc in self.worker:
1343 fds.append(self.worker[mc].pipe.input)
1344 for mc in self.fakeworker:
1345 fds.append(self.fakeworker[mc].pipe.input)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001346 return fds
1347
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001348 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001349 def get_timestamp(f):
1350 try:
1351 if not os.access(f, os.F_OK):
1352 return None
1353 return os.stat(f)[stat.ST_MTIME]
1354 except:
1355 return None
1356
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001357 (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
1358 if taskname is None:
1359 taskname = tn
1360
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001361 if self.stamppolicy == "perfile":
1362 fulldeptree = False
1363 else:
1364 fulldeptree = True
1365 stampwhitelist = []
1366 if self.stamppolicy == "whitelist":
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001367 stampwhitelist = self.rqdata.stampfnwhitelist[mc]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001368
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001369 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001370
1371 # If the stamp is missing, it's not current
1372 if not os.access(stampfile, os.F_OK):
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001373 logger.debug2("Stampfile %s not available", stampfile)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001374 return False
1375 # If it's a 'nostamp' task, it's not current
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001376 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001377 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001378 logger.debug2("%s.%s is nostamp\n", fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001379 return False
1380
1381 if taskname != "do_setscene" and taskname.endswith("_setscene"):
1382 return True
1383
1384 if cache is None:
1385 cache = {}
1386
1387 iscurrent = True
1388 t1 = get_timestamp(stampfile)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001389 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001390 if iscurrent:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001391 (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
1392 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1393 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001394 t2 = get_timestamp(stampfile2)
1395 t3 = get_timestamp(stampfile3)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001396 if t3 and not t2:
1397 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001398 if t3 and t3 > t2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001399 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001400 if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
1401 if not t2:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001402 logger.debug2('Stampfile %s does not exist', stampfile2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001403 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001404 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001405 if t1 < t2:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001406 logger.debug2('Stampfile %s < %s', stampfile, stampfile2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001407 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001408 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001409 if recurse and iscurrent:
1410 if dep in cache:
1411 iscurrent = cache[dep]
1412 if not iscurrent:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001413 logger.debug2('Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001414 else:
1415 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1416 cache[dep] = iscurrent
1417 if recurse:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001418 cache[tid] = iscurrent
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001419 return iscurrent
1420
Brad Bishop1d80a2e2019-11-15 16:35:03 -05001421 def validate_hashes(self, tocheck, data, currentcount=0, siginfo=False, summary=True):
Brad Bishop96ff1982019-08-19 13:50:42 -04001422 valid = set()
1423 if self.hashvalidate:
Brad Bishop08902b02019-08-20 09:16:51 -04001424 sq_data = {}
1425 sq_data['hash'] = {}
1426 sq_data['hashfn'] = {}
1427 sq_data['unihash'] = {}
Brad Bishop96ff1982019-08-19 13:50:42 -04001428 for tid in tocheck:
1429 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04001430 sq_data['hash'][tid] = self.rqdata.runtaskentries[tid].hash
1431 sq_data['hashfn'][tid] = self.rqdata.dataCaches[mc].hashfn[taskfn]
1432 sq_data['unihash'][tid] = self.rqdata.runtaskentries[tid].unihash
Brad Bishop96ff1982019-08-19 13:50:42 -04001433
Brad Bishop1d80a2e2019-11-15 16:35:03 -05001434 valid = self.validate_hash(sq_data, data, siginfo, currentcount, summary)
Brad Bishop96ff1982019-08-19 13:50:42 -04001435
1436 return valid
1437
Brad Bishop1d80a2e2019-11-15 16:35:03 -05001438 def validate_hash(self, sq_data, d, siginfo, currentcount, summary):
1439 locs = {"sq_data" : sq_data, "d" : d, "siginfo" : siginfo, "currentcount" : currentcount, "summary" : summary}
Brad Bishop19323692019-04-05 15:28:33 -04001440
Brad Bishop08902b02019-08-20 09:16:51 -04001441 # Metadata has **kwargs so args can be added, sq_data can also gain new fields
Brad Bishop1d80a2e2019-11-15 16:35:03 -05001442 call = self.hashvalidate + "(sq_data, d, siginfo=siginfo, currentcount=currentcount, summary=summary)"
Brad Bishop19323692019-04-05 15:28:33 -04001443
Brad Bishop19323692019-04-05 15:28:33 -04001444 return bb.utils.better_eval(call, locs)
1445
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001446 def _execute_runqueue(self):
1447 """
1448 Run the tasks in a queue prepared by rqdata.prepare()
1449 Upon failure, optionally try to recover the build using any alternate providers
1450 (if the abort on failure configuration option isn't set)
1451 """
1452
1453 retval = True
1454
1455 if self.state is runQueuePrepare:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001456 # NOTE: if you add, remove or significantly refactor the stages of this
1457 # process then you should recalculate the weightings here. This is quite
1458 # easy to do - just change the next line temporarily to pass debug=True as
1459 # the last parameter and you'll get a printout of the weightings as well
1460 # as a map to the lines where next_stage() was called. Of course this isn't
1461 # critical, but it helps to keep the progress reporting accurate.
1462 self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
1463 "Initialising tasks",
1464 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001465 if self.rqdata.prepare() == 0:
1466 self.state = runQueueComplete
1467 else:
1468 self.state = runQueueSceneInit
Brad Bishop00e122a2019-10-05 11:10:57 -04001469 bb.parse.siggen.save_unitaskhashes()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001470
1471 if self.state is runQueueSceneInit:
Brad Bishop96ff1982019-08-19 13:50:42 -04001472 self.rqdata.init_progress_reporter.next_stage()
1473
1474 # we are ready to run, emit dependency info to any UI or class which
1475 # needs it
1476 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1477 self.rqdata.init_progress_reporter.next_stage()
1478 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
1479
Brad Bishope2d5b612018-11-23 10:55:50 +13001480 if not self.dm_event_handler_registered:
1481 res = bb.event.register(self.dm_event_handler_name,
Brad Bishop96ff1982019-08-19 13:50:42 -04001482 lambda x: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False,
Andrew Geissler9b4d8b02021-02-19 12:26:16 -06001483 ('bb.event.HeartbeatEvent',), data=self.cfgData)
Brad Bishope2d5b612018-11-23 10:55:50 +13001484 self.dm_event_handler_registered = True
1485
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001486 dump = self.cooker.configuration.dump_signatures
1487 if dump:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001488 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001489 if 'printdiff' in dump:
1490 invalidtasks = self.print_diffscenetasks()
1491 self.dump_signatures(dump)
1492 if 'printdiff' in dump:
1493 self.write_diffscenetasks(invalidtasks)
1494 self.state = runQueueComplete
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001495
Brad Bishop96ff1982019-08-19 13:50:42 -04001496 if self.state is runQueueSceneInit:
1497 self.rqdata.init_progress_reporter.next_stage()
1498 self.start_worker()
1499 self.rqdata.init_progress_reporter.next_stage()
1500 self.rqexe = RunQueueExecute(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001501
Brad Bishop96ff1982019-08-19 13:50:42 -04001502 # If we don't have any setscene functions, skip execution
1503 if len(self.rqdata.runq_setscene_tids) == 0:
1504 logger.info('No setscene tasks')
1505 for tid in self.rqdata.runtaskentries:
1506 if len(self.rqdata.runtaskentries[tid].depends) == 0:
1507 self.rqexe.setbuildable(tid)
1508 self.rqexe.tasks_notcovered.add(tid)
1509 self.rqexe.sqdone = True
1510 logger.info('Executing Tasks')
1511 self.state = runQueueRunning
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001512
1513 if self.state is runQueueRunning:
1514 retval = self.rqexe.execute()
1515
1516 if self.state is runQueueCleanUp:
1517 retval = self.rqexe.finish()
1518
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001519 build_done = self.state is runQueueComplete or self.state is runQueueFailed
1520
1521 if build_done and self.dm_event_handler_registered:
Andrew Geissler9b4d8b02021-02-19 12:26:16 -06001522 bb.event.remove(self.dm_event_handler_name, None, data=self.cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001523 self.dm_event_handler_registered = False
1524
1525 if build_done and self.rqexe:
Brad Bishop08902b02019-08-20 09:16:51 -04001526 bb.parse.siggen.save_unitaskhashes()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001527 self.teardown_workers()
Brad Bishop96ff1982019-08-19 13:50:42 -04001528 if self.rqexe:
1529 if self.rqexe.stats.failed:
1530 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
1531 else:
1532 # Let's avoid the word "failed" if nothing actually did
1533 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001534
1535 if self.state is runQueueFailed:
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001536 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001537
1538 if self.state is runQueueComplete:
1539 # All done
1540 return False
1541
1542 # Loop
1543 return retval
1544
1545 def execute_runqueue(self):
1546 # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
1547 try:
1548 return self._execute_runqueue()
1549 except bb.runqueue.TaskFailure:
1550 raise
1551 except SystemExit:
1552 raise
1553 except bb.BBHandledException:
1554 try:
1555 self.teardown_workers()
1556 except:
1557 pass
1558 self.state = runQueueComplete
1559 raise
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001560 except Exception as err:
1561 logger.exception("An uncaught exception occurred in runqueue")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001562 try:
1563 self.teardown_workers()
1564 except:
1565 pass
1566 self.state = runQueueComplete
1567 raise
1568
1569 def finish_runqueue(self, now = False):
1570 if not self.rqexe:
1571 self.state = runQueueComplete
1572 return
1573
1574 if now:
1575 self.rqexe.finish_now()
1576 else:
1577 self.rqexe.finish()
1578
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001579 def rq_dump_sigfn(self, fn, options):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001580 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
Andrew Geissler5a43b432020-06-13 10:46:56 -05001581 mc = bb.runqueue.mc_from_tid(fn)
1582 the_data = bb_cache.loadDataFull(fn, self.cooker.collections[mc].get_file_appends(fn))
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001583 siggen = bb.parse.siggen
1584 dataCaches = self.rqdata.dataCaches
1585 siggen.dump_sigfn(fn, dataCaches, options)
1586
1587 def dump_signatures(self, options):
1588 fns = set()
1589 bb.note("Reparsing files to collect dependency data")
1590
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001591 for tid in self.rqdata.runtaskentries:
1592 fn = fn_from_tid(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001593 fns.add(fn)
1594
1595 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1596 # We cannot use the real multiprocessing.Pool easily due to some local data
1597 # that can't be pickled. This is a cheap multi-process solution.
1598 launched = []
1599 while fns:
1600 if len(launched) < max_process:
1601 p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
1602 p.start()
1603 launched.append(p)
1604 for q in launched:
1605 # The finished processes are joined when calling is_alive()
1606 if not q.is_alive():
1607 launched.remove(q)
1608 for p in launched:
1609 p.join()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001610
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001611 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001612
1613 return
1614
1615 def print_diffscenetasks(self):
1616
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001617 noexec = []
Brad Bishop96ff1982019-08-19 13:50:42 -04001618 tocheck = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001619
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001620 for tid in self.rqdata.runtaskentries:
1621 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1622 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001623
1624 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001625 noexec.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001626 continue
1627
Brad Bishop96ff1982019-08-19 13:50:42 -04001628 tocheck.add(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001629
Brad Bishop1d80a2e2019-11-15 16:35:03 -05001630 valid_new = self.validate_hashes(tocheck, self.cooker.data, 0, True, summary=False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001631
1632 # Tasks which are both setscene and noexec never care about dependencies
1633 # We therefore find tasks which are setscene and noexec and mark their
1634 # unique dependencies as valid.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001635 for tid in noexec:
1636 if tid not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001637 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001638 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001639 hasnoexecparents = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001640 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1641 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001642 continue
1643 hasnoexecparents = False
1644 break
1645 if hasnoexecparents:
1646 valid_new.add(dep)
1647
1648 invalidtasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001649 for tid in self.rqdata.runtaskentries:
1650 if tid not in valid_new and tid not in noexec:
1651 invalidtasks.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001652
1653 found = set()
1654 processed = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001655 for tid in invalidtasks:
1656 toprocess = set([tid])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001657 while toprocess:
1658 next = set()
1659 for t in toprocess:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001660 for dep in self.rqdata.runtaskentries[t].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001661 if dep in invalidtasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001662 found.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001663 if dep not in processed:
1664 processed.add(dep)
1665 next.add(dep)
1666 toprocess = next
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001667 if tid in found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001668 toprocess = set()
1669
1670 tasklist = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001671 for tid in invalidtasks.difference(found):
1672 tasklist.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001673
1674 if tasklist:
1675 bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
1676
1677 return invalidtasks.difference(found)
1678
1679 def write_diffscenetasks(self, invalidtasks):
1680
1681 # Define recursion callback
1682 def recursecb(key, hash1, hash2):
1683 hashes = [hash1, hash2]
1684 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1685
1686 recout = []
1687 if len(hashfiles) == 2:
1688 out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
Brad Bishopc342db32019-05-15 21:57:59 -04001689 recout.extend(list(' ' + l for l in out2))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001690 else:
1691 recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
1692
1693 return recout
1694
1695
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001696 for tid in invalidtasks:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001697 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1698 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001699 h = self.rqdata.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001700 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
1701 match = None
1702 for m in matches:
1703 if h in m:
1704 match = m
1705 if match is None:
1706 bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001707 matches = {k : v for k, v in iter(matches.items()) if h not in k}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001708 if matches:
1709 latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
Brad Bishop19323692019-04-05 15:28:33 -04001710 prevh = __find_sha256__.search(latestmatch).group(0)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001711 output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
1712 bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
1713
Brad Bishop96ff1982019-08-19 13:50:42 -04001714
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001715class RunQueueExecute:
1716
1717 def __init__(self, rq):
1718 self.rq = rq
1719 self.cooker = rq.cooker
1720 self.cfgData = rq.cfgData
1721 self.rqdata = rq.rqdata
1722
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001723 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
1724 self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001725
Brad Bishop96ff1982019-08-19 13:50:42 -04001726 self.sq_buildable = set()
1727 self.sq_running = set()
1728 self.sq_live = set()
1729
Brad Bishop08902b02019-08-20 09:16:51 -04001730 self.updated_taskhash_queue = []
1731 self.pending_migrations = set()
1732
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001733 self.runq_buildable = set()
1734 self.runq_running = set()
1735 self.runq_complete = set()
Andrew Geissler82c905d2020-04-13 13:39:40 -05001736 self.runq_tasksrun = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001737
1738 self.build_stamps = {}
1739 self.build_stamps2 = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001740 self.failed_tids = []
Brad Bishop96ff1982019-08-19 13:50:42 -04001741 self.sq_deferred = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001742
1743 self.stampcache = {}
1744
Brad Bishop08902b02019-08-20 09:16:51 -04001745 self.holdoff_tasks = set()
Brad Bishopc68388fc2019-08-26 01:33:31 -04001746 self.holdoff_need_update = True
Brad Bishop96ff1982019-08-19 13:50:42 -04001747 self.sqdone = False
1748
Andrew Geissler5199d832021-09-24 16:47:35 -05001749 self.stats = RunQueueStats(len(self.rqdata.runtaskentries), len(self.rqdata.runq_setscene_tids))
Brad Bishop96ff1982019-08-19 13:50:42 -04001750
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001751 for mc in rq.worker:
1752 rq.worker[mc].pipe.setrunqueueexec(self)
1753 for mc in rq.fakeworker:
1754 rq.fakeworker[mc].pipe.setrunqueueexec(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001755
1756 if self.number_tasks <= 0:
1757 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1758
Brad Bishop96ff1982019-08-19 13:50:42 -04001759 # List of setscene tasks which we've covered
1760 self.scenequeue_covered = set()
1761 # List of tasks which are covered (including setscene ones)
1762 self.tasks_covered = set()
1763 self.tasks_scenequeue_done = set()
1764 self.scenequeue_notcovered = set()
1765 self.tasks_notcovered = set()
1766 self.scenequeue_notneeded = set()
1767
Brad Bishop08902b02019-08-20 09:16:51 -04001768 # We can't skip specified target tasks which aren't setscene tasks
1769 self.cantskip = set(self.rqdata.target_tids)
1770 self.cantskip.difference_update(self.rqdata.runq_setscene_tids)
1771 self.cantskip.intersection_update(self.rqdata.runtaskentries)
Brad Bishop96ff1982019-08-19 13:50:42 -04001772
1773 schedulers = self.get_schedulers()
1774 for scheduler in schedulers:
1775 if self.scheduler == scheduler.name:
1776 self.sched = scheduler(self, self.rqdata)
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001777 logger.debug("Using runqueue scheduler '%s'", scheduler.name)
Brad Bishop96ff1982019-08-19 13:50:42 -04001778 break
1779 else:
1780 bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
1781 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1782
Brad Bishop08902b02019-08-20 09:16:51 -04001783 #if len(self.rqdata.runq_setscene_tids) > 0:
1784 self.sqdata = SQData()
1785 build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
Brad Bishop96ff1982019-08-19 13:50:42 -04001786
Andrew Geissler95ac1b82021-03-31 14:34:31 -05001787 def runqueue_process_waitpid(self, task, status, fakerootlog=None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001788
1789 # self.build_stamps[pid] may not exist when use shared work directory.
1790 if task in self.build_stamps:
1791 self.build_stamps2.remove(self.build_stamps[task])
1792 del self.build_stamps[task]
1793
Brad Bishop96ff1982019-08-19 13:50:42 -04001794 if task in self.sq_live:
1795 if status != 0:
1796 self.sq_task_fail(task, status)
1797 else:
1798 self.sq_task_complete(task)
1799 self.sq_live.remove(task)
Andrew Geissler5199d832021-09-24 16:47:35 -05001800 self.stats.updateActiveSetscene(len(self.sq_live))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001801 else:
Brad Bishop96ff1982019-08-19 13:50:42 -04001802 if status != 0:
Andrew Geissler95ac1b82021-03-31 14:34:31 -05001803 self.task_fail(task, status, fakerootlog=fakerootlog)
Brad Bishop96ff1982019-08-19 13:50:42 -04001804 else:
1805 self.task_complete(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001806 return True
1807
1808 def finish_now(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001809 for mc in self.rq.worker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001810 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001811 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1812 self.rq.worker[mc].process.stdin.flush()
1813 except IOError:
1814 # worker must have died?
1815 pass
1816 for mc in self.rq.fakeworker:
1817 try:
1818 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1819 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001820 except IOError:
1821 # worker must have died?
1822 pass
1823
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001824 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001825 self.rq.state = runQueueFailed
1826 return
1827
1828 self.rq.state = runQueueComplete
1829 return
1830
1831 def finish(self):
1832 self.rq.state = runQueueCleanUp
1833
Andrew Geissler5199d832021-09-24 16:47:35 -05001834 active = self.stats.active + len(self.sq_live)
Brad Bishop96ff1982019-08-19 13:50:42 -04001835 if active > 0:
1836 bb.event.fire(runQueueExitWait(active), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001837 self.rq.read_workers()
1838 return self.rq.active_fds()
1839
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001840 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001841 self.rq.state = runQueueFailed
1842 return True
1843
1844 self.rq.state = runQueueComplete
1845 return True
1846
Brad Bishop96ff1982019-08-19 13:50:42 -04001847 # Used by setscene only
1848 def check_dependencies(self, task, taskdeps):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001849 if not self.rq.depvalidate:
1850 return False
1851
Brad Bishop08902b02019-08-20 09:16:51 -04001852 # Must not edit parent data
1853 taskdeps = set(taskdeps)
1854
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001855 taskdata = {}
1856 taskdeps.add(task)
1857 for dep in taskdeps:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001858 (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
1859 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001860 taskdata[dep] = [pn, taskname, fn]
1861 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001862 locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001863 valid = bb.utils.better_eval(call, locs)
1864 return valid
1865
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001866 def can_start_task(self):
Andrew Geissler5199d832021-09-24 16:47:35 -05001867 active = self.stats.active + len(self.sq_live)
Brad Bishop96ff1982019-08-19 13:50:42 -04001868 can_start = active < self.number_tasks
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001869 return can_start
1870
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001871 def get_schedulers(self):
1872 schedulers = set(obj for obj in globals().values()
1873 if type(obj) is type and
1874 issubclass(obj, RunQueueScheduler))
1875
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001876 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001877 if user_schedulers:
1878 for sched in user_schedulers.split():
1879 if not "." in sched:
1880 bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
1881 continue
1882
1883 modname, name = sched.rsplit(".", 1)
1884 try:
1885 module = __import__(modname, fromlist=(name,))
1886 except ImportError as exc:
1887 logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
1888 raise SystemExit(1)
1889 else:
1890 schedulers.add(getattr(module, name))
1891 return schedulers
1892
1893 def setbuildable(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001894 self.runq_buildable.add(task)
Brad Bishop316dfdd2018-06-25 12:45:53 -04001895 self.sched.newbuildable(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001896
1897 def task_completeoutright(self, task):
1898 """
1899 Mark a task as completed
1900 Look at the reverse dependencies and mark any task with
1901 completed dependencies as buildable
1902 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001903 self.runq_complete.add(task)
1904 for revdep in self.rqdata.runtaskentries[task].revdeps:
1905 if revdep in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001906 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001907 if revdep in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001908 continue
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001909 alldeps = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001910 for dep in self.rqdata.runtaskentries[revdep].depends:
1911 if dep not in self.runq_complete:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001912 alldeps = False
1913 break
1914 if alldeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001915 self.setbuildable(revdep)
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001916 logger.debug("Marking task %s as buildable", revdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001917
Andrew Geissler5199d832021-09-24 16:47:35 -05001918 for t in self.sq_deferred.copy():
1919 if self.sq_deferred[t] == task:
1920 logger.debug2("Deferred task %s now buildable" % t)
1921 del self.sq_deferred[t]
1922 update_scenequeue_data([t], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
1923
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001924 def task_complete(self, task):
1925 self.stats.taskCompleted()
1926 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1927 self.task_completeoutright(task)
Andrew Geissler82c905d2020-04-13 13:39:40 -05001928 self.runq_tasksrun.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001929
Andrew Geissler95ac1b82021-03-31 14:34:31 -05001930 def task_fail(self, task, exitcode, fakerootlog=None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001931 """
1932 Called when a task has failed
1933 Updates the state engine with the failure
1934 """
1935 self.stats.taskFailed()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001936 self.failed_tids.append(task)
Andrew Geissler95ac1b82021-03-31 14:34:31 -05001937
1938 fakeroot_log = ""
1939 if fakerootlog and os.path.exists(fakerootlog):
1940 with open(fakerootlog) as fakeroot_log_file:
1941 fakeroot_failed = False
1942 for line in reversed(fakeroot_log_file.readlines()):
1943 for fakeroot_error in ['mismatch', 'error', 'fatal']:
1944 if fakeroot_error in line.lower():
1945 fakeroot_failed = True
1946 if 'doing new pid setup and server start' in line:
1947 break
1948 fakeroot_log = line + fakeroot_log
1949
1950 if not fakeroot_failed:
1951 fakeroot_log = None
1952
1953 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq, fakeroot_log=fakeroot_log), self.cfgData)
1954
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001955 if self.rqdata.taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001956 self.rq.state = runQueueCleanUp
1957
1958 def task_skip(self, task, reason):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001959 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001960 self.setbuildable(task)
1961 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
1962 self.task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001963 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001964 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001965
Brad Bishop08902b02019-08-20 09:16:51 -04001966 def summarise_scenequeue_errors(self):
1967 err = False
1968 if not self.sqdone:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06001969 logger.debug('We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered)))
Andrew Geissler5199d832021-09-24 16:47:35 -05001970 completeevent = sceneQueueComplete(self.stats, self.rq)
Brad Bishop08902b02019-08-20 09:16:51 -04001971 bb.event.fire(completeevent, self.cfgData)
1972 if self.sq_deferred:
1973 logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred))
1974 err = True
1975 if self.updated_taskhash_queue:
1976 logger.error("Scenequeue had unprocessed changed taskhash entries: %s" % pprint.pformat(self.updated_taskhash_queue))
1977 err = True
1978 if self.holdoff_tasks:
1979 logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks))
1980 err = True
1981
Andrew Geissler95ac1b82021-03-31 14:34:31 -05001982 for tid in self.scenequeue_covered.intersection(self.scenequeue_notcovered):
1983 # No task should end up in both covered and uncovered, that is a bug.
1984 logger.error("Setscene task %s in both covered and notcovered." % tid)
1985
Brad Bishop08902b02019-08-20 09:16:51 -04001986 for tid in self.rqdata.runq_setscene_tids:
1987 if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
1988 err = True
1989 logger.error("Setscene Task %s was never marked as covered or not covered" % tid)
1990 if tid not in self.sq_buildable:
1991 err = True
1992 logger.error("Setscene Task %s was never marked as buildable" % tid)
1993 if tid not in self.sq_running:
1994 err = True
1995 logger.error("Setscene Task %s was never marked as running" % tid)
1996
1997 for x in self.rqdata.runtaskentries:
1998 if x not in self.tasks_covered and x not in self.tasks_notcovered:
1999 logger.error("Task %s was never moved from the setscene queue" % x)
2000 err = True
2001 if x not in self.tasks_scenequeue_done:
2002 logger.error("Task %s was never processed by the setscene code" % x)
2003 err = True
2004 if len(self.rqdata.runtaskentries[x].depends) == 0 and x not in self.runq_buildable:
2005 logger.error("Task %s was never marked as buildable by the setscene code" % x)
2006 err = True
2007 return err
2008
2009
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002010 def execute(self):
2011 """
Brad Bishop96ff1982019-08-19 13:50:42 -04002012 Run the tasks in a queue prepared by prepare_runqueue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002013 """
2014
2015 self.rq.read_workers()
Andrew Geissler82c905d2020-04-13 13:39:40 -05002016 if self.updated_taskhash_queue or self.pending_migrations:
2017 self.process_possible_migrations()
2018
2019 if not hasattr(self, "sorted_setscene_tids"):
2020 # Don't want to sort this set every execution
2021 self.sorted_setscene_tids = sorted(self.rqdata.runq_setscene_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002022
Brad Bishop96ff1982019-08-19 13:50:42 -04002023 task = None
2024 if not self.sqdone and self.can_start_task():
2025 # Find the next setscene to run
Andrew Geissler82c905d2020-04-13 13:39:40 -05002026 for nexttask in self.sorted_setscene_tids:
Brad Bishop96ff1982019-08-19 13:50:42 -04002027 if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values():
2028 if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
2029 if nexttask not in self.rqdata.target_tids:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002030 logger.debug2("Skipping setscene for task %s" % nexttask)
Brad Bishop96ff1982019-08-19 13:50:42 -04002031 self.sq_task_skip(nexttask)
2032 self.scenequeue_notneeded.add(nexttask)
2033 if nexttask in self.sq_deferred:
2034 del self.sq_deferred[nexttask]
2035 return True
Brad Bishop08902b02019-08-20 09:16:51 -04002036 # If covered tasks are running, need to wait for them to complete
2037 for t in self.sqdata.sq_covered_tasks[nexttask]:
2038 if t in self.runq_running and t not in self.runq_complete:
2039 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04002040 if nexttask in self.sq_deferred:
2041 if self.sq_deferred[nexttask] not in self.runq_complete:
2042 continue
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002043 logger.debug("Task %s no longer deferred" % nexttask)
Brad Bishop96ff1982019-08-19 13:50:42 -04002044 del self.sq_deferred[nexttask]
Brad Bishop1d80a2e2019-11-15 16:35:03 -05002045 valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False, summary=False)
Brad Bishop96ff1982019-08-19 13:50:42 -04002046 if not valid:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002047 logger.debug("%s didn't become valid, skipping setscene" % nexttask)
Brad Bishop96ff1982019-08-19 13:50:42 -04002048 self.sq_task_failoutright(nexttask)
2049 return True
Brad Bishop96ff1982019-08-19 13:50:42 -04002050 if nexttask in self.sqdata.outrightfail:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002051 logger.debug2('No package found, so skipping setscene task %s', nexttask)
Brad Bishop96ff1982019-08-19 13:50:42 -04002052 self.sq_task_failoutright(nexttask)
2053 return True
2054 if nexttask in self.sqdata.unskippable:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002055 logger.debug2("Setscene task %s is unskippable" % nexttask)
Brad Bishop96ff1982019-08-19 13:50:42 -04002056 task = nexttask
2057 break
2058 if task is not None:
2059 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
2060 taskname = taskname + "_setscene"
2061 if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002062 logger.debug2('Stamp for underlying task %s is current, so skipping setscene variant', task)
Brad Bishop96ff1982019-08-19 13:50:42 -04002063 self.sq_task_failoutright(task)
2064 return True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002065
Brad Bishop96ff1982019-08-19 13:50:42 -04002066 if self.cooker.configuration.force:
2067 if task in self.rqdata.target_tids:
2068 self.sq_task_failoutright(task)
2069 return True
2070
2071 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002072 logger.debug2('Setscene stamp current task %s, so skip it and its dependencies', task)
Brad Bishop96ff1982019-08-19 13:50:42 -04002073 self.sq_task_skip(task)
2074 return True
2075
2076 if self.cooker.configuration.skipsetscene:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002077 logger.debug2('No setscene tasks should be executed. Skipping %s', task)
Brad Bishop96ff1982019-08-19 13:50:42 -04002078 self.sq_task_failoutright(task)
2079 return True
2080
Andrew Geissler5199d832021-09-24 16:47:35 -05002081 startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
Brad Bishop96ff1982019-08-19 13:50:42 -04002082 bb.event.fire(startevent, self.cfgData)
2083
2084 taskdepdata = self.sq_build_taskdepdata(task)
2085
2086 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2087 taskhash = self.rqdata.get_task_hash(task)
2088 unihash = self.rqdata.get_task_unihash(task)
2089 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
2090 if not mc in self.rq.fakeworker:
2091 self.rq.start_fakeworker(self, mc)
Andrew Geissler5a43b432020-06-13 10:46:56 -05002092 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Brad Bishop96ff1982019-08-19 13:50:42 -04002093 self.rq.fakeworker[mc].process.stdin.flush()
2094 else:
Andrew Geissler5a43b432020-06-13 10:46:56 -05002095 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Brad Bishop96ff1982019-08-19 13:50:42 -04002096 self.rq.worker[mc].process.stdin.flush()
2097
2098 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2099 self.build_stamps2.append(self.build_stamps[task])
2100 self.sq_running.add(task)
2101 self.sq_live.add(task)
Andrew Geissler5199d832021-09-24 16:47:35 -05002102 self.stats.updateActiveSetscene(len(self.sq_live))
Brad Bishop96ff1982019-08-19 13:50:42 -04002103 if self.can_start_task():
2104 return True
2105
Brad Bishopc68388fc2019-08-26 01:33:31 -04002106 self.update_holdofftasks()
2107
Brad Bishop08902b02019-08-20 09:16:51 -04002108 if not self.sq_live and not self.sqdone and not self.sq_deferred and not self.updated_taskhash_queue and not self.holdoff_tasks:
Andrew Geissler82c905d2020-04-13 13:39:40 -05002109 hashequiv_logger.verbose("Setscene tasks completed")
Brad Bishop96ff1982019-08-19 13:50:42 -04002110
Brad Bishop08902b02019-08-20 09:16:51 -04002111 err = self.summarise_scenequeue_errors()
Brad Bishop96ff1982019-08-19 13:50:42 -04002112 if err:
2113 self.rq.state = runQueueFailed
2114 return True
2115
2116 if self.cooker.configuration.setsceneonly:
2117 self.rq.state = runQueueComplete
2118 return True
2119 self.sqdone = True
2120
2121 if self.stats.total == 0:
2122 # nothing to do
2123 self.rq.state = runQueueComplete
2124 return True
2125
2126 if self.cooker.configuration.setsceneonly:
2127 task = None
2128 else:
2129 task = self.sched.next()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002130 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002131 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002132
Brad Bishop96ff1982019-08-19 13:50:42 -04002133 if self.rqdata.setscenewhitelist is not None:
2134 if self.check_setscenewhitelist(task):
2135 self.task_fail(task, "setscene whitelist")
2136 return True
2137
2138 if task in self.tasks_covered:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002139 logger.debug2("Setscene covered task %s", task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002140 self.task_skip(task, "covered")
2141 return True
2142
2143 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002144 logger.debug2("Stamp current task %s", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002145
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002146 self.task_skip(task, "existing")
Andrew Geissler82c905d2020-04-13 13:39:40 -05002147 self.runq_tasksrun.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002148 return True
2149
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002150 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002151 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2152 startevent = runQueueTaskStarted(task, self.stats, self.rq,
2153 noexec=True)
2154 bb.event.fire(startevent, self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002155 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002156 self.stats.taskActive()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002157 if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002158 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002159 self.task_complete(task)
2160 return True
2161 else:
2162 startevent = runQueueTaskStarted(task, self.stats, self.rq)
2163 bb.event.fire(startevent, self.cfgData)
2164
2165 taskdepdata = self.build_taskdepdata(task)
2166
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002167 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop19323692019-04-05 15:28:33 -04002168 taskhash = self.rqdata.get_task_hash(task)
2169 unihash = self.rqdata.get_task_unihash(task)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002170 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002171 if not mc in self.rq.fakeworker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002172 try:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002173 self.rq.start_fakeworker(self, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002174 except OSError as exc:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002175 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002176 self.rq.state = runQueueFailed
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002177 self.stats.taskFailed()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002178 return True
Andrew Geissler5a43b432020-06-13 10:46:56 -05002179 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002180 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002181 else:
Andrew Geissler5a43b432020-06-13 10:46:56 -05002182 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collections[mc].get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002183 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002184
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002185 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2186 self.build_stamps2.append(self.build_stamps[task])
2187 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002188 self.stats.taskActive()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002189 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002190 return True
2191
Andrew Geissler5199d832021-09-24 16:47:35 -05002192 if self.stats.active > 0 or len(self.sq_live) > 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002193 self.rq.read_workers()
2194 return self.rq.active_fds()
2195
Brad Bishop96ff1982019-08-19 13:50:42 -04002196 # No more tasks can be run. If we have deferred setscene tasks we should run them.
2197 if self.sq_deferred:
2198 tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0])
2199 logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid)
Andrew Geissler5199d832021-09-24 16:47:35 -05002200 if tid not in self.runq_complete:
2201 self.sq_task_failoutright(tid)
Brad Bishop96ff1982019-08-19 13:50:42 -04002202 return True
2203
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002204 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002205 self.rq.state = runQueueFailed
2206 return True
2207
2208 # Sanity Checks
Brad Bishop08902b02019-08-20 09:16:51 -04002209 err = self.summarise_scenequeue_errors()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002210 for task in self.rqdata.runtaskentries:
2211 if task not in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002212 logger.error("Task %s never buildable!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002213 err = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002214 elif task not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002215 logger.error("Task %s never ran!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002216 err = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002217 elif task not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002218 logger.error("Task %s never completed!", task)
Brad Bishop08902b02019-08-20 09:16:51 -04002219 err = True
2220
2221 if err:
2222 self.rq.state = runQueueFailed
2223 else:
2224 self.rq.state = runQueueComplete
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002225
2226 return True
2227
Brad Bishopc68388fc2019-08-26 01:33:31 -04002228 def filtermcdeps(self, task, mc, deps):
Andrew Geissler99467da2019-02-25 18:54:23 -06002229 ret = set()
Andrew Geissler99467da2019-02-25 18:54:23 -06002230 for dep in deps:
Brad Bishopc68388fc2019-08-26 01:33:31 -04002231 thismc = mc_from_tid(dep)
2232 if thismc != mc:
Andrew Geissler99467da2019-02-25 18:54:23 -06002233 continue
2234 ret.add(dep)
2235 return ret
2236
Brad Bishopa34c0302019-09-23 22:34:48 -04002237 # We filter out multiconfig dependencies from taskdepdata we pass to the tasks
Andrew Geissler99467da2019-02-25 18:54:23 -06002238 # as most code can't handle them
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002239 def build_taskdepdata(self, task):
2240 taskdepdata = {}
Brad Bishopc68388fc2019-08-26 01:33:31 -04002241 mc = mc_from_tid(task)
Brad Bishop08902b02019-08-20 09:16:51 -04002242 next = self.rqdata.runtaskentries[task].depends.copy()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002243 next.add(task)
Brad Bishopc68388fc2019-08-26 01:33:31 -04002244 next = self.filtermcdeps(task, mc, next)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002245 while next:
2246 additional = []
2247 for revdep in next:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002248 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2249 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2250 deps = self.rqdata.runtaskentries[revdep].depends
2251 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002252 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002253 unihash = self.rqdata.runtaskentries[revdep].unihash
Brad Bishopc68388fc2019-08-26 01:33:31 -04002254 deps = self.filtermcdeps(task, mc, deps)
Brad Bishop19323692019-04-05 15:28:33 -04002255 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002256 for revdep2 in deps:
2257 if revdep2 not in taskdepdata:
2258 additional.append(revdep2)
2259 next = additional
2260
2261 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2262 return taskdepdata
2263
Brad Bishop08902b02019-08-20 09:16:51 -04002264 def update_holdofftasks(self):
Brad Bishopc68388fc2019-08-26 01:33:31 -04002265
2266 if not self.holdoff_need_update:
2267 return
2268
2269 notcovered = set(self.scenequeue_notcovered)
2270 notcovered |= self.cantskip
2271 for tid in self.scenequeue_notcovered:
2272 notcovered |= self.sqdata.sq_covered_tasks[tid]
2273 notcovered |= self.sqdata.unskippable.difference(self.rqdata.runq_setscene_tids)
2274 notcovered.intersection_update(self.tasks_scenequeue_done)
2275
2276 covered = set(self.scenequeue_covered)
2277 for tid in self.scenequeue_covered:
2278 covered |= self.sqdata.sq_covered_tasks[tid]
2279 covered.difference_update(notcovered)
2280 covered.intersection_update(self.tasks_scenequeue_done)
2281
2282 for tid in notcovered | covered:
2283 if len(self.rqdata.runtaskentries[tid].depends) == 0:
2284 self.setbuildable(tid)
2285 elif self.rqdata.runtaskentries[tid].depends.issubset(self.runq_complete):
2286 self.setbuildable(tid)
2287
2288 self.tasks_covered = covered
2289 self.tasks_notcovered = notcovered
2290
Brad Bishop08902b02019-08-20 09:16:51 -04002291 self.holdoff_tasks = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002292
Brad Bishop08902b02019-08-20 09:16:51 -04002293 for tid in self.rqdata.runq_setscene_tids:
2294 if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
2295 self.holdoff_tasks.add(tid)
2296
2297 for tid in self.holdoff_tasks.copy():
2298 for dep in self.sqdata.sq_covered_tasks[tid]:
2299 if dep not in self.runq_complete:
2300 self.holdoff_tasks.add(dep)
2301
Brad Bishopc68388fc2019-08-26 01:33:31 -04002302 self.holdoff_need_update = False
2303
Brad Bishop08902b02019-08-20 09:16:51 -04002304 def process_possible_migrations(self):
2305
2306 changed = set()
Andrew Geissler82c905d2020-04-13 13:39:40 -05002307 toprocess = set()
Brad Bishop08902b02019-08-20 09:16:51 -04002308 for tid, unihash in self.updated_taskhash_queue.copy():
2309 if tid in self.runq_running and tid not in self.runq_complete:
2310 continue
2311
2312 self.updated_taskhash_queue.remove((tid, unihash))
2313
2314 if unihash != self.rqdata.runtaskentries[tid].unihash:
Andrew Geisslerc926e172021-05-07 16:11:35 -05002315 # Make sure we rehash any other tasks with the same task hash that we're deferred against.
2316 torehash = [tid]
2317 for deftid in self.sq_deferred:
2318 if self.sq_deferred[deftid] == tid:
2319 torehash.append(deftid)
2320 for hashtid in torehash:
2321 hashequiv_logger.verbose("Task %s unihash changed to %s" % (hashtid, unihash))
2322 self.rqdata.runtaskentries[hashtid].unihash = unihash
2323 bb.parse.siggen.set_unihash(hashtid, unihash)
2324 toprocess.add(hashtid)
Brad Bishop08902b02019-08-20 09:16:51 -04002325
Andrew Geissler82c905d2020-04-13 13:39:40 -05002326 # Work out all tasks which depend upon these
2327 total = set()
2328 next = set()
2329 for p in toprocess:
2330 next |= self.rqdata.runtaskentries[p].revdeps
2331 while next:
2332 current = next.copy()
2333 total = total | next
2334 next = set()
2335 for ntid in current:
2336 next |= self.rqdata.runtaskentries[ntid].revdeps
2337 next.difference_update(total)
Brad Bishop08902b02019-08-20 09:16:51 -04002338
Andrew Geissler82c905d2020-04-13 13:39:40 -05002339 # Now iterate those tasks in dependency order to regenerate their taskhash/unihash
2340 next = set()
2341 for p in total:
2342 if len(self.rqdata.runtaskentries[p].depends) == 0:
2343 next.add(p)
2344 elif self.rqdata.runtaskentries[p].depends.isdisjoint(total):
2345 next.add(p)
2346
2347 # When an item doesn't have dependencies in total, we can process it. Drop items from total when handled
2348 while next:
2349 current = next.copy()
2350 next = set()
2351 for tid in current:
2352 if len(self.rqdata.runtaskentries[p].depends) and not self.rqdata.runtaskentries[tid].depends.isdisjoint(total):
2353 continue
2354 orighash = self.rqdata.runtaskentries[tid].hash
Andrew Geissler5a43b432020-06-13 10:46:56 -05002355 dc = bb.parse.siggen.get_data_caches(self.rqdata.dataCaches, mc_from_tid(tid))
2356 newhash = bb.parse.siggen.get_taskhash(tid, self.rqdata.runtaskentries[tid].depends, dc)
Andrew Geissler82c905d2020-04-13 13:39:40 -05002357 origuni = self.rqdata.runtaskentries[tid].unihash
2358 newuni = bb.parse.siggen.get_unihash(tid)
2359 # FIXME, need to check it can come from sstate at all for determinism?
2360 remapped = False
2361 if newuni == origuni:
2362 # Nothing to do, we match, skip code below
2363 remapped = True
2364 elif tid in self.scenequeue_covered or tid in self.sq_live:
2365 # Already ran this setscene task or it running. Report the new taskhash
2366 bb.parse.siggen.report_unihash_equiv(tid, newhash, origuni, newuni, self.rqdata.dataCaches)
2367 hashequiv_logger.verbose("Already covered setscene for %s so ignoring rehash (remap)" % (tid))
2368 remapped = True
2369
2370 if not remapped:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002371 #logger.debug("Task %s hash changes: %s->%s %s->%s" % (tid, orighash, newhash, origuni, newuni))
Andrew Geissler82c905d2020-04-13 13:39:40 -05002372 self.rqdata.runtaskentries[tid].hash = newhash
2373 self.rqdata.runtaskentries[tid].unihash = newuni
2374 changed.add(tid)
2375
2376 next |= self.rqdata.runtaskentries[tid].revdeps
2377 total.remove(tid)
2378 next.intersection_update(total)
Brad Bishop08902b02019-08-20 09:16:51 -04002379
2380 if changed:
2381 for mc in self.rq.worker:
2382 self.rq.worker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
2383 for mc in self.rq.fakeworker:
2384 self.rq.fakeworker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
2385
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002386 hashequiv_logger.debug(pprint.pformat("Tasks changed:\n%s" % (changed)))
Brad Bishop08902b02019-08-20 09:16:51 -04002387
2388 for tid in changed:
2389 if tid not in self.rqdata.runq_setscene_tids:
2390 continue
Brad Bishop08902b02019-08-20 09:16:51 -04002391 if tid not in self.pending_migrations:
2392 self.pending_migrations.add(tid)
2393
Andrew Geissler82c905d2020-04-13 13:39:40 -05002394 update_tasks = []
Brad Bishop08902b02019-08-20 09:16:51 -04002395 for tid in self.pending_migrations.copy():
Andrew Geissler82c905d2020-04-13 13:39:40 -05002396 if tid in self.runq_running or tid in self.sq_live:
Brad Bishop6dbb3162019-11-25 09:41:34 -05002397 # Too late, task already running, not much we can do now
2398 self.pending_migrations.remove(tid)
2399 continue
2400
Brad Bishop08902b02019-08-20 09:16:51 -04002401 valid = True
2402 # Check no tasks this covers are running
2403 for dep in self.sqdata.sq_covered_tasks[tid]:
2404 if dep in self.runq_running and dep not in self.runq_complete:
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002405 hashequiv_logger.debug2("Task %s is running which blocks setscene for %s from running" % (dep, tid))
Brad Bishop08902b02019-08-20 09:16:51 -04002406 valid = False
2407 break
2408 if not valid:
2409 continue
2410
2411 self.pending_migrations.remove(tid)
Brad Bishopc68388fc2019-08-26 01:33:31 -04002412 changed = True
Brad Bishop08902b02019-08-20 09:16:51 -04002413
2414 if tid in self.tasks_scenequeue_done:
2415 self.tasks_scenequeue_done.remove(tid)
2416 for dep in self.sqdata.sq_covered_tasks[tid]:
Andrew Geissler82c905d2020-04-13 13:39:40 -05002417 if dep in self.runq_complete and dep not in self.runq_tasksrun:
2418 bb.error("Task %s marked as completed but now needing to rerun? Aborting build." % dep)
2419 self.failed_tids.append(tid)
2420 self.rq.state = runQueueCleanUp
2421 return
2422
Brad Bishop08902b02019-08-20 09:16:51 -04002423 if dep not in self.runq_complete:
2424 if dep in self.tasks_scenequeue_done and dep not in self.sqdata.unskippable:
2425 self.tasks_scenequeue_done.remove(dep)
2426
2427 if tid in self.sq_buildable:
2428 self.sq_buildable.remove(tid)
2429 if tid in self.sq_running:
2430 self.sq_running.remove(tid)
Andrew Geissler82c905d2020-04-13 13:39:40 -05002431 harddepfail = False
2432 for t in self.sqdata.sq_harddeps:
2433 if tid in self.sqdata.sq_harddeps[t] and t in self.scenequeue_notcovered:
2434 harddepfail = True
2435 break
2436 if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
Brad Bishop08902b02019-08-20 09:16:51 -04002437 if tid not in self.sq_buildable:
2438 self.sq_buildable.add(tid)
2439 if len(self.sqdata.sq_revdeps[tid]) == 0:
2440 self.sq_buildable.add(tid)
2441
2442 if tid in self.sqdata.outrightfail:
2443 self.sqdata.outrightfail.remove(tid)
2444 if tid in self.scenequeue_notcovered:
2445 self.scenequeue_notcovered.remove(tid)
2446 if tid in self.scenequeue_covered:
2447 self.scenequeue_covered.remove(tid)
2448 if tid in self.scenequeue_notneeded:
2449 self.scenequeue_notneeded.remove(tid)
2450
2451 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2452 self.sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
2453
2454 if tid in self.stampcache:
2455 del self.stampcache[tid]
2456
2457 if tid in self.build_stamps:
2458 del self.build_stamps[tid]
2459
Andrew Geissler82c905d2020-04-13 13:39:40 -05002460 update_tasks.append((tid, harddepfail, tid in self.sqdata.valid))
2461
2462 if update_tasks:
Brad Bishop08902b02019-08-20 09:16:51 -04002463 self.sqdone = False
Patrick Williams213cb262021-08-07 19:21:33 -05002464 for tid in [t[0] for t in update_tasks]:
2465 h = pending_hash_index(tid, self.rqdata)
2466 if h in self.sqdata.hashes and tid != self.sqdata.hashes[h]:
2467 self.sq_deferred[tid] = self.sqdata.hashes[h]
2468 bb.note("Deferring %s after %s" % (tid, self.sqdata.hashes[h]))
Andrew Geissler82c905d2020-04-13 13:39:40 -05002469 update_scenequeue_data([t[0] for t in update_tasks], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
2470
2471 for (tid, harddepfail, origvalid) in update_tasks:
Brad Bishop1d80a2e2019-11-15 16:35:03 -05002472 if tid in self.sqdata.valid and not origvalid:
Andrew Geissler82c905d2020-04-13 13:39:40 -05002473 hashequiv_logger.verbose("Setscene task %s became valid" % tid)
2474 if harddepfail:
2475 self.sq_task_failoutright(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04002476
2477 if changed:
Andrew Geissler5199d832021-09-24 16:47:35 -05002478 self.stats.updateCovered(len(self.scenequeue_covered), len(self.scenequeue_notcovered))
Brad Bishopc68388fc2019-08-26 01:33:31 -04002479 self.holdoff_need_update = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002480
Brad Bishop96ff1982019-08-19 13:50:42 -04002481 def scenequeue_updatecounters(self, task, fail=False):
Brad Bishop08902b02019-08-20 09:16:51 -04002482
2483 for dep in sorted(self.sqdata.sq_deps[task]):
Brad Bishop96ff1982019-08-19 13:50:42 -04002484 if fail and task in self.sqdata.sq_harddeps and dep in self.sqdata.sq_harddeps[task]:
Andrew Geissler95ac1b82021-03-31 14:34:31 -05002485 if dep in self.scenequeue_covered or dep in self.scenequeue_notcovered:
2486 # dependency could be already processed, e.g. noexec setscene task
2487 continue
Andrew Geissler3b8a17c2021-04-15 15:55:55 -05002488 noexec, stamppresent = check_setscene_stamps(dep, self.rqdata, self.rq, self.stampcache)
2489 if noexec or stamppresent:
2490 continue
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002491 logger.debug2("%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
Brad Bishop96ff1982019-08-19 13:50:42 -04002492 self.sq_task_failoutright(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002493 continue
Brad Bishop08902b02019-08-20 09:16:51 -04002494 if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
2495 if dep not in self.sq_buildable:
2496 self.sq_buildable.add(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002497
Brad Bishop96ff1982019-08-19 13:50:42 -04002498 next = set([task])
2499 while next:
2500 new = set()
Brad Bishop08902b02019-08-20 09:16:51 -04002501 for t in sorted(next):
Brad Bishop96ff1982019-08-19 13:50:42 -04002502 self.tasks_scenequeue_done.add(t)
2503 # Look down the dependency chain for non-setscene things which this task depends on
2504 # and mark as 'done'
2505 for dep in self.rqdata.runtaskentries[t].depends:
2506 if dep in self.rqdata.runq_setscene_tids or dep in self.tasks_scenequeue_done:
2507 continue
2508 if self.rqdata.runtaskentries[dep].revdeps.issubset(self.tasks_scenequeue_done):
2509 new.add(dep)
Brad Bishop96ff1982019-08-19 13:50:42 -04002510 next = new
2511
Andrew Geissler5199d832021-09-24 16:47:35 -05002512 self.stats.updateCovered(len(self.scenequeue_covered), len(self.scenequeue_notcovered))
Brad Bishopc68388fc2019-08-26 01:33:31 -04002513 self.holdoff_need_update = True
Brad Bishop96ff1982019-08-19 13:50:42 -04002514
2515 def sq_task_completeoutright(self, task):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002516 """
2517 Mark a task as completed
2518 Look at the reverse dependencies and mark any task with
2519 completed dependencies as buildable
2520 """
2521
Andrew Geisslerd1e89492021-02-12 15:35:20 -06002522 logger.debug('Found task %s which could be accelerated', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002523 self.scenequeue_covered.add(task)
2524 self.scenequeue_updatecounters(task)
2525
Brad Bishop96ff1982019-08-19 13:50:42 -04002526 def sq_check_taskfail(self, task):
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002527 if self.rqdata.setscenewhitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002528 realtask = task.split('_setscene')[0]
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002529 (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
2530 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002531 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2532 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2533 self.rq.state = runQueueCleanUp
2534
Brad Bishop96ff1982019-08-19 13:50:42 -04002535 def sq_task_complete(self, task):
Andrew Geissler5199d832021-09-24 16:47:35 -05002536 bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
Brad Bishop96ff1982019-08-19 13:50:42 -04002537 self.sq_task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002538
Brad Bishop96ff1982019-08-19 13:50:42 -04002539 def sq_task_fail(self, task, result):
Andrew Geissler5199d832021-09-24 16:47:35 -05002540 bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002541 self.scenequeue_notcovered.add(task)
2542 self.scenequeue_updatecounters(task, True)
Brad Bishop96ff1982019-08-19 13:50:42 -04002543 self.sq_check_taskfail(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002544
Brad Bishop96ff1982019-08-19 13:50:42 -04002545 def sq_task_failoutright(self, task):
2546 self.sq_running.add(task)
2547 self.sq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002548 self.scenequeue_notcovered.add(task)
2549 self.scenequeue_updatecounters(task, True)
2550
Brad Bishop96ff1982019-08-19 13:50:42 -04002551 def sq_task_skip(self, task):
2552 self.sq_running.add(task)
2553 self.sq_buildable.add(task)
2554 self.sq_task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002555
Brad Bishop96ff1982019-08-19 13:50:42 -04002556 def sq_build_taskdepdata(self, task):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002557 def getsetscenedeps(tid):
2558 deps = set()
2559 (mc, fn, taskname, _) = split_tid_mcfn(tid)
2560 realtid = tid + "_setscene"
2561 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
2562 for (depname, idependtask) in idepends:
2563 if depname not in self.rqdata.taskData[mc].build_targets:
2564 continue
2565
2566 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2567 if depfn is None:
2568 continue
2569 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2570 deps.add(deptid)
2571 return deps
2572
2573 taskdepdata = {}
2574 next = getsetscenedeps(task)
2575 next.add(task)
2576 while next:
2577 additional = []
2578 for revdep in next:
2579 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2580 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2581 deps = getsetscenedeps(revdep)
2582 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2583 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002584 unihash = self.rqdata.runtaskentries[revdep].unihash
2585 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002586 for revdep2 in deps:
2587 if revdep2 not in taskdepdata:
2588 additional.append(revdep2)
2589 next = additional
2590
2591 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2592 return taskdepdata
2593
Brad Bishop96ff1982019-08-19 13:50:42 -04002594 def check_setscenewhitelist(self, tid):
2595 # Check task that is going to run against the whitelist
2596 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2597 # Ignore covered tasks
2598 if tid in self.tasks_covered:
2599 return False
2600 # Ignore stamped tasks
2601 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
2602 return False
2603 # Ignore noexec tasks
2604 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2605 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2606 return False
2607
2608 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2609 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2610 if tid in self.rqdata.runq_setscene_tids:
2611 msg = 'Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname)
2612 else:
2613 msg = 'Task %s.%s attempted to execute unexpectedly' % (pn, taskname)
Andrew Geissler82c905d2020-04-13 13:39:40 -05002614 for t in self.scenequeue_notcovered:
2615 msg = msg + "\nTask %s, unihash %s, taskhash %s" % (t, self.rqdata.runtaskentries[t].unihash, self.rqdata.runtaskentries[t].hash)
Brad Bishop96ff1982019-08-19 13:50:42 -04002616 logger.error(msg + '\nThis is usually due to missing setscene tasks. Those missing in this build were: %s' % pprint.pformat(self.scenequeue_notcovered))
2617 return True
2618 return False
2619
2620class SQData(object):
2621 def __init__(self):
2622 # SceneQueue dependencies
2623 self.sq_deps = {}
2624 # SceneQueue reverse dependencies
2625 self.sq_revdeps = {}
Brad Bishop96ff1982019-08-19 13:50:42 -04002626 # Injected inter-setscene task dependencies
2627 self.sq_harddeps = {}
2628 # Cache of stamp files so duplicates can't run in parallel
2629 self.stamps = {}
2630 # Setscene tasks directly depended upon by the build
2631 self.unskippable = set()
2632 # List of setscene tasks which aren't present
2633 self.outrightfail = set()
2634 # A list of normal tasks a setscene task covers
2635 self.sq_covered_tasks = {}
2636
2637def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
2638
2639 sq_revdeps = {}
2640 sq_revdeps_squash = {}
2641 sq_collated_deps = {}
2642
2643 # We need to construct a dependency graph for the setscene functions. Intermediate
2644 # dependencies between the setscene tasks only complicate the code. This code
2645 # therefore aims to collapse the huge runqueue dependency tree into a smaller one
2646 # only containing the setscene functions.
2647
2648 rqdata.init_progress_reporter.next_stage()
2649
2650 # First process the chains up to the first setscene task.
2651 endpoints = {}
2652 for tid in rqdata.runtaskentries:
2653 sq_revdeps[tid] = copy.copy(rqdata.runtaskentries[tid].revdeps)
2654 sq_revdeps_squash[tid] = set()
2655 if (len(sq_revdeps[tid]) == 0) and tid not in rqdata.runq_setscene_tids:
2656 #bb.warn("Added endpoint %s" % (tid))
2657 endpoints[tid] = set()
2658
2659 rqdata.init_progress_reporter.next_stage()
2660
2661 # Secondly process the chains between setscene tasks.
2662 for tid in rqdata.runq_setscene_tids:
2663 sq_collated_deps[tid] = set()
2664 #bb.warn("Added endpoint 2 %s" % (tid))
2665 for dep in rqdata.runtaskentries[tid].depends:
2666 if tid in sq_revdeps[dep]:
2667 sq_revdeps[dep].remove(tid)
2668 if dep not in endpoints:
2669 endpoints[dep] = set()
2670 #bb.warn(" Added endpoint 3 %s" % (dep))
2671 endpoints[dep].add(tid)
2672
2673 rqdata.init_progress_reporter.next_stage()
2674
2675 def process_endpoints(endpoints):
2676 newendpoints = {}
2677 for point, task in endpoints.items():
2678 tasks = set()
2679 if task:
2680 tasks |= task
2681 if sq_revdeps_squash[point]:
2682 tasks |= sq_revdeps_squash[point]
2683 if point not in rqdata.runq_setscene_tids:
2684 for t in tasks:
2685 sq_collated_deps[t].add(point)
2686 sq_revdeps_squash[point] = set()
2687 if point in rqdata.runq_setscene_tids:
2688 sq_revdeps_squash[point] = tasks
2689 tasks = set()
2690 continue
2691 for dep in rqdata.runtaskentries[point].depends:
2692 if point in sq_revdeps[dep]:
2693 sq_revdeps[dep].remove(point)
2694 if tasks:
2695 sq_revdeps_squash[dep] |= tasks
2696 if len(sq_revdeps[dep]) == 0 and dep not in rqdata.runq_setscene_tids:
2697 newendpoints[dep] = task
2698 if len(newendpoints) != 0:
2699 process_endpoints(newendpoints)
2700
2701 process_endpoints(endpoints)
2702
2703 rqdata.init_progress_reporter.next_stage()
2704
Brad Bishop08902b02019-08-20 09:16:51 -04002705 # Build a list of tasks which are "unskippable"
2706 # These are direct endpoints referenced by the build upto and including setscene tasks
Brad Bishop96ff1982019-08-19 13:50:42 -04002707 # Take the build endpoints (no revdeps) and find the sstate tasks they depend upon
2708 new = True
2709 for tid in rqdata.runtaskentries:
2710 if len(rqdata.runtaskentries[tid].revdeps) == 0:
2711 sqdata.unskippable.add(tid)
Brad Bishop08902b02019-08-20 09:16:51 -04002712 sqdata.unskippable |= sqrq.cantskip
Brad Bishop96ff1982019-08-19 13:50:42 -04002713 while new:
2714 new = False
Brad Bishop08902b02019-08-20 09:16:51 -04002715 orig = sqdata.unskippable.copy()
2716 for tid in sorted(orig, reverse=True):
Brad Bishop96ff1982019-08-19 13:50:42 -04002717 if tid in rqdata.runq_setscene_tids:
2718 continue
Brad Bishop96ff1982019-08-19 13:50:42 -04002719 if len(rqdata.runtaskentries[tid].depends) == 0:
2720 # These are tasks which have no setscene tasks in their chain, need to mark as directly buildable
Brad Bishop96ff1982019-08-19 13:50:42 -04002721 sqrq.setbuildable(tid)
Brad Bishop96ff1982019-08-19 13:50:42 -04002722 sqdata.unskippable |= rqdata.runtaskentries[tid].depends
Brad Bishop08902b02019-08-20 09:16:51 -04002723 if sqdata.unskippable != orig:
2724 new = True
2725
2726 sqrq.tasks_scenequeue_done |= sqdata.unskippable.difference(rqdata.runq_setscene_tids)
Brad Bishop96ff1982019-08-19 13:50:42 -04002727
2728 rqdata.init_progress_reporter.next_stage(len(rqdata.runtaskentries))
2729
2730 # Sanity check all dependencies could be changed to setscene task references
2731 for taskcounter, tid in enumerate(rqdata.runtaskentries):
2732 if tid in rqdata.runq_setscene_tids:
2733 pass
2734 elif len(sq_revdeps_squash[tid]) != 0:
2735 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
2736 else:
2737 del sq_revdeps_squash[tid]
2738 rqdata.init_progress_reporter.update(taskcounter)
2739
2740 rqdata.init_progress_reporter.next_stage()
2741
2742 # Resolve setscene inter-task dependencies
2743 # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
2744 # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
2745 for tid in rqdata.runq_setscene_tids:
2746 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2747 realtid = tid + "_setscene"
2748 idepends = rqdata.taskData[mc].taskentries[realtid].idepends
2749 sqdata.stamps[tid] = bb.build.stampfile(taskname + "_setscene", rqdata.dataCaches[mc], taskfn, noextra=True)
2750 for (depname, idependtask) in idepends:
2751
2752 if depname not in rqdata.taskData[mc].build_targets:
2753 continue
2754
2755 depfn = rqdata.taskData[mc].build_targets[depname][0]
2756 if depfn is None:
2757 continue
2758 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2759 if deptid not in rqdata.runtaskentries:
2760 bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
2761
2762 if not deptid in sqdata.sq_harddeps:
2763 sqdata.sq_harddeps[deptid] = set()
2764 sqdata.sq_harddeps[deptid].add(tid)
2765
2766 sq_revdeps_squash[tid].add(deptid)
2767 # Have to zero this to avoid circular dependencies
2768 sq_revdeps_squash[deptid] = set()
2769
2770 rqdata.init_progress_reporter.next_stage()
2771
2772 for task in sqdata.sq_harddeps:
2773 for dep in sqdata.sq_harddeps[task]:
2774 sq_revdeps_squash[dep].add(task)
2775
2776 rqdata.init_progress_reporter.next_stage()
2777
2778 #for tid in sq_revdeps_squash:
2779 # data = ""
2780 # for dep in sq_revdeps_squash[tid]:
2781 # data = data + "\n %s" % dep
2782 # bb.warn("Task %s_setscene: is %s " % (tid, data))
2783
2784 sqdata.sq_revdeps = sq_revdeps_squash
Brad Bishop96ff1982019-08-19 13:50:42 -04002785 sqdata.sq_covered_tasks = sq_collated_deps
2786
2787 # Build reverse version of revdeps to populate deps structure
2788 for tid in sqdata.sq_revdeps:
2789 sqdata.sq_deps[tid] = set()
2790 for tid in sqdata.sq_revdeps:
2791 for dep in sqdata.sq_revdeps[tid]:
2792 sqdata.sq_deps[dep].add(tid)
2793
2794 rqdata.init_progress_reporter.next_stage()
2795
Brad Bishop00e122a2019-10-05 11:10:57 -04002796 sqdata.multiconfigs = set()
Brad Bishop96ff1982019-08-19 13:50:42 -04002797 for tid in sqdata.sq_revdeps:
Brad Bishop00e122a2019-10-05 11:10:57 -04002798 sqdata.multiconfigs.add(mc_from_tid(tid))
Brad Bishop96ff1982019-08-19 13:50:42 -04002799 if len(sqdata.sq_revdeps[tid]) == 0:
2800 sqrq.sq_buildable.add(tid)
2801
2802 rqdata.init_progress_reporter.finish()
2803
Brad Bishop00e122a2019-10-05 11:10:57 -04002804 sqdata.noexec = set()
2805 sqdata.stamppresent = set()
2806 sqdata.valid = set()
Brad Bishop96ff1982019-08-19 13:50:42 -04002807
Patrick Williams213cb262021-08-07 19:21:33 -05002808 sqdata.hashes = {}
2809 sqrq.sq_deferred = {}
2810 for mc in sorted(sqdata.multiconfigs):
2811 for tid in sorted(sqdata.sq_revdeps):
2812 if mc_from_tid(tid) != mc:
2813 continue
2814 h = pending_hash_index(tid, rqdata)
2815 if h not in sqdata.hashes:
2816 sqdata.hashes[h] = tid
2817 else:
2818 sqrq.sq_deferred[tid] = sqdata.hashes[h]
2819 bb.note("Deferring %s after %s" % (tid, sqdata.hashes[h]))
2820
Brad Bishop1d80a2e2019-11-15 16:35:03 -05002821 update_scenequeue_data(sqdata.sq_revdeps, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True)
Brad Bishop00e122a2019-10-05 11:10:57 -04002822
Andrew Geissler95ac1b82021-03-31 14:34:31 -05002823 # Compute a list of 'stale' sstate tasks where the current hash does not match the one
2824 # in any stamp files. Pass the list out to metadata as an event.
2825 found = {}
2826 for tid in rqdata.runq_setscene_tids:
2827 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2828 stamps = bb.build.find_stale_stamps(taskname, rqdata.dataCaches[mc], taskfn)
2829 if stamps:
2830 if mc not in found:
2831 found[mc] = {}
2832 found[mc][tid] = stamps
2833 for mc in found:
2834 event = bb.event.StaleSetSceneTasks(found[mc])
2835 bb.event.fire(event, cooker.databuilder.mcdata[mc])
2836
Andrew Geissler3b8a17c2021-04-15 15:55:55 -05002837def check_setscene_stamps(tid, rqdata, rq, stampcache, noexecstamp=False):
2838
2839 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2840
2841 taskdep = rqdata.dataCaches[mc].task_deps[taskfn]
2842
2843 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2844 bb.build.make_stamp(taskname + "_setscene", rqdata.dataCaches[mc], taskfn)
2845 return True, False
2846
2847 if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache):
2848 logger.debug2('Setscene stamp current for task %s', tid)
2849 return False, True
2850
2851 if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache):
2852 logger.debug2('Normal stamp current for task %s', tid)
2853 return False, True
2854
2855 return False, False
2856
Brad Bishop1d80a2e2019-11-15 16:35:03 -05002857def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True):
Brad Bishop00e122a2019-10-05 11:10:57 -04002858
2859 tocheck = set()
2860
2861 for tid in sorted(tids):
2862 if tid in sqdata.stamppresent:
2863 sqdata.stamppresent.remove(tid)
2864 if tid in sqdata.valid:
2865 sqdata.valid.remove(tid)
Andrew Geisslerc926e172021-05-07 16:11:35 -05002866 if tid in sqdata.outrightfail:
2867 sqdata.outrightfail.remove(tid)
Brad Bishop00e122a2019-10-05 11:10:57 -04002868
Andrew Geissler3b8a17c2021-04-15 15:55:55 -05002869 noexec, stamppresent = check_setscene_stamps(tid, rqdata, rq, stampcache, noexecstamp=True)
Brad Bishop00e122a2019-10-05 11:10:57 -04002870
Andrew Geissler3b8a17c2021-04-15 15:55:55 -05002871 if noexec:
Brad Bishop00e122a2019-10-05 11:10:57 -04002872 sqdata.noexec.add(tid)
2873 sqrq.sq_task_skip(tid)
Brad Bishop00e122a2019-10-05 11:10:57 -04002874 continue
2875
Andrew Geissler3b8a17c2021-04-15 15:55:55 -05002876 if stamppresent:
Brad Bishop00e122a2019-10-05 11:10:57 -04002877 sqdata.stamppresent.add(tid)
2878 sqrq.sq_task_skip(tid)
2879 continue
2880
2881 tocheck.add(tid)
2882
Brad Bishop1d80a2e2019-11-15 16:35:03 -05002883 sqdata.valid |= rq.validate_hashes(tocheck, cooker.data, len(sqdata.stamppresent), False, summary=summary)
Brad Bishop00e122a2019-10-05 11:10:57 -04002884
Patrick Williams213cb262021-08-07 19:21:33 -05002885 for tid in tids:
2886 if tid in sqdata.stamppresent:
2887 continue
2888 if tid in sqdata.valid:
2889 continue
2890 if tid in sqdata.noexec:
2891 continue
2892 if tid in sqrq.scenequeue_covered:
2893 continue
2894 if tid in sqrq.scenequeue_notcovered:
2895 continue
2896 if tid in sqrq.sq_deferred:
2897 continue
2898 sqdata.outrightfail.add(tid)
Brad Bishop96ff1982019-08-19 13:50:42 -04002899
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002900class TaskFailure(Exception):
2901 """
2902 Exception raised when a task in a runqueue fails
2903 """
2904 def __init__(self, x):
2905 self.args = x
2906
2907
2908class runQueueExitWait(bb.event.Event):
2909 """
2910 Event when waiting for task processes to exit
2911 """
2912
2913 def __init__(self, remain):
2914 self.remain = remain
2915 self.message = "Waiting for %s active tasks to finish" % remain
2916 bb.event.Event.__init__(self)
2917
2918class runQueueEvent(bb.event.Event):
2919 """
2920 Base runQueue event class
2921 """
2922 def __init__(self, task, stats, rq):
2923 self.taskid = task
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002924 self.taskstring = task
2925 self.taskname = taskname_from_tid(task)
2926 self.taskfile = fn_from_tid(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002927 self.taskhash = rq.rqdata.get_task_hash(task)
2928 self.stats = stats.copy()
2929 bb.event.Event.__init__(self)
2930
2931class sceneQueueEvent(runQueueEvent):
2932 """
2933 Base sceneQueue event class
2934 """
2935 def __init__(self, task, stats, rq, noexec=False):
2936 runQueueEvent.__init__(self, task, stats, rq)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002937 self.taskstring = task + "_setscene"
2938 self.taskname = taskname_from_tid(task) + "_setscene"
2939 self.taskfile = fn_from_tid(task)
2940 self.taskhash = rq.rqdata.get_task_hash(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002941
2942class runQueueTaskStarted(runQueueEvent):
2943 """
2944 Event notifying a task was started
2945 """
2946 def __init__(self, task, stats, rq, noexec=False):
2947 runQueueEvent.__init__(self, task, stats, rq)
2948 self.noexec = noexec
2949
2950class sceneQueueTaskStarted(sceneQueueEvent):
2951 """
2952 Event notifying a setscene task was started
2953 """
2954 def __init__(self, task, stats, rq, noexec=False):
2955 sceneQueueEvent.__init__(self, task, stats, rq)
2956 self.noexec = noexec
2957
2958class runQueueTaskFailed(runQueueEvent):
2959 """
2960 Event notifying a task failed
2961 """
Andrew Geissler95ac1b82021-03-31 14:34:31 -05002962 def __init__(self, task, stats, exitcode, rq, fakeroot_log=None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002963 runQueueEvent.__init__(self, task, stats, rq)
2964 self.exitcode = exitcode
Andrew Geissler95ac1b82021-03-31 14:34:31 -05002965 self.fakeroot_log = fakeroot_log
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002966
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002967 def __str__(self):
Andrew Geissler95ac1b82021-03-31 14:34:31 -05002968 if self.fakeroot_log:
2969 return "Task (%s) failed with exit code '%s' \nPseudo log:\n%s" % (self.taskstring, self.exitcode, self.fakeroot_log)
2970 else:
2971 return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002972
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002973class sceneQueueTaskFailed(sceneQueueEvent):
2974 """
2975 Event notifying a setscene task failed
2976 """
2977 def __init__(self, task, stats, exitcode, rq):
2978 sceneQueueEvent.__init__(self, task, stats, rq)
2979 self.exitcode = exitcode
2980
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002981 def __str__(self):
2982 return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
2983
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002984class sceneQueueComplete(sceneQueueEvent):
2985 """
2986 Event when all the sceneQueue tasks are complete
2987 """
2988 def __init__(self, stats, rq):
2989 self.stats = stats.copy()
2990 bb.event.Event.__init__(self)
2991
2992class runQueueTaskCompleted(runQueueEvent):
2993 """
2994 Event notifying a task completed
2995 """
2996
2997class sceneQueueTaskCompleted(sceneQueueEvent):
2998 """
2999 Event notifying a setscene task completed
3000 """
3001
3002class runQueueTaskSkipped(runQueueEvent):
3003 """
3004 Event notifying a task was skipped
3005 """
3006 def __init__(self, task, stats, rq, reason):
3007 runQueueEvent.__init__(self, task, stats, rq)
3008 self.reason = reason
3009
Brad Bishop08902b02019-08-20 09:16:51 -04003010class taskUniHashUpdate(bb.event.Event):
3011 """
3012 Base runQueue event class
3013 """
3014 def __init__(self, task, unihash):
3015 self.taskid = task
3016 self.unihash = unihash
3017 bb.event.Event.__init__(self)
3018
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003019class runQueuePipe():
3020 """
3021 Abstraction for a pipe between a worker thread and the server
3022 """
Andrew Geissler95ac1b82021-03-31 14:34:31 -05003023 def __init__(self, pipein, pipeout, d, rq, rqexec, fakerootlogs=None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003024 self.input = pipein
3025 if pipeout:
3026 pipeout.close()
3027 bb.utils.nonblockingfd(self.input)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003028 self.queue = b""
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003029 self.d = d
3030 self.rq = rq
3031 self.rqexec = rqexec
Andrew Geissler95ac1b82021-03-31 14:34:31 -05003032 self.fakerootlogs = fakerootlogs
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003033
3034 def setrunqueueexec(self, rqexec):
3035 self.rqexec = rqexec
3036
3037 def read(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003038 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
3039 for worker in workers.values():
3040 worker.process.poll()
3041 if worker.process.returncode is not None and not self.rq.teardown:
3042 bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
3043 self.rq.finish_runqueue(True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003044
3045 start = len(self.queue)
3046 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003047 self.queue = self.queue + (self.input.read(102400) or b"")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003048 except (OSError, IOError) as e:
3049 if e.errno != errno.EAGAIN:
3050 raise
3051 end = len(self.queue)
3052 found = True
3053 while found and len(self.queue):
3054 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003055 index = self.queue.find(b"</event>")
3056 while index != -1 and self.queue.startswith(b"<event>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003057 try:
3058 event = pickle.loads(self.queue[7:index])
Andrew Geissler475cb722020-07-10 16:00:51 -05003059 except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e:
3060 if isinstance(e, pickle.UnpicklingError) and "truncated" in str(e):
3061 # The pickled data could contain "</event>" so search for the next occurance
3062 # unpickling again, this should be the only way an unpickle error could occur
3063 index = self.queue.find(b"</event>", index + 1)
3064 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003065 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
3066 bb.event.fire_from_worker(event, self.d)
Brad Bishop08902b02019-08-20 09:16:51 -04003067 if isinstance(event, taskUniHashUpdate):
3068 self.rqexec.updated_taskhash_queue.append((event.taskid, event.unihash))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003069 found = True
3070 self.queue = self.queue[index+8:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003071 index = self.queue.find(b"</event>")
3072 index = self.queue.find(b"</exitcode>")
3073 while index != -1 and self.queue.startswith(b"<exitcode>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003074 try:
3075 task, status = pickle.loads(self.queue[10:index])
Andrew Geissler475cb722020-07-10 16:00:51 -05003076 except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003077 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
Andrew Geissler95ac1b82021-03-31 14:34:31 -05003078 (_, _, _, taskfn) = split_tid_mcfn(task)
3079 fakerootlog = None
3080 if self.fakerootlogs and taskfn and taskfn in self.fakerootlogs:
3081 fakerootlog = self.fakerootlogs[taskfn]
3082 self.rqexec.runqueue_process_waitpid(task, status, fakerootlog=fakerootlog)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003083 found = True
3084 self.queue = self.queue[index+11:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003085 index = self.queue.find(b"</exitcode>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05003086 return (end > start)
3087
3088 def close(self):
3089 while self.read():
3090 continue
3091 if len(self.queue) > 0:
3092 print("Warning, worker left partial message: %s" % self.queue)
3093 self.input.close()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003094
Andrew Geisslerc9f78652020-09-18 14:11:35 -05003095def get_setscene_enforce_whitelist(d, targets):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05003096 if d.getVar('BB_SETSCENE_ENFORCE') != '1':
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003097 return None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05003098 whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003099 outlist = []
3100 for item in whitelist[:]:
3101 if item.startswith('%:'):
Andrew Geisslerc9f78652020-09-18 14:11:35 -05003102 for (mc, target, task, fn) in targets:
3103 outlist.append(target + ':' + item.split(':')[1])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003104 else:
3105 outlist.append(item)
3106 return outlist
3107
3108def check_setscene_enforce_whitelist(pn, taskname, whitelist):
3109 import fnmatch
Brad Bishopd7bf8c12018-02-25 22:55:05 -05003110 if whitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06003111 item = '%s:%s' % (pn, taskname)
3112 for whitelist_item in whitelist:
3113 if fnmatch.fnmatch(item, whitelist_item):
3114 return True
3115 return False
3116 return True