blob: 010b0850102a2fa1e12fcf2c8b4c2e54ebd6cbc4 [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001#!/usr/bin/env python
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002"""
3BitBake 'RunQueue' implementation
4
5Handles preparation and execution of a queue of tasks
6"""
7
8# Copyright (C) 2006-2007 Richard Purdie
9#
Brad Bishopc342db32019-05-15 21:57:59 -040010# SPDX-License-Identifier: GPL-2.0-only
Patrick Williamsc124f4f2015-09-15 14:41:29 -050011#
Patrick Williamsc124f4f2015-09-15 14:41:29 -050012
13import copy
14import os
15import sys
16import signal
17import stat
18import fcntl
19import errno
20import logging
21import re
22import bb
23from bb import msg, data, event
24from bb import monitordisk
25import subprocess
Patrick Williamsc0f7c042017-02-23 20:41:17 -060026import pickle
Brad Bishop6e60e8b2018-02-01 10:27:11 -050027from multiprocessing import Process
Brad Bishop19323692019-04-05 15:28:33 -040028import shlex
Patrick Williamsc124f4f2015-09-15 14:41:29 -050029
30bblogger = logging.getLogger("BitBake")
31logger = logging.getLogger("BitBake.RunQueue")
32
Brad Bishop19323692019-04-05 15:28:33 -040033__find_sha256__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{64}(?![a-z0-9])' )
Patrick Williamsc124f4f2015-09-15 14:41:29 -050034
Patrick Williamsc0f7c042017-02-23 20:41:17 -060035def fn_from_tid(tid):
36 return tid.rsplit(":", 1)[0]
37
38def taskname_from_tid(tid):
39 return tid.rsplit(":", 1)[1]
40
Andrew Geissler99467da2019-02-25 18:54:23 -060041def mc_from_tid(tid):
Brad Bishop15ae2502019-06-18 21:44:24 -040042 if tid.startswith('mc:'):
Andrew Geissler99467da2019-02-25 18:54:23 -060043 return tid.split(':')[1]
44 return ""
45
Patrick Williamsc0f7c042017-02-23 20:41:17 -060046def split_tid(tid):
47 (mc, fn, taskname, _) = split_tid_mcfn(tid)
48 return (mc, fn, taskname)
49
50def split_tid_mcfn(tid):
Brad Bishop15ae2502019-06-18 21:44:24 -040051 if tid.startswith('mc:'):
Patrick Williamsc0f7c042017-02-23 20:41:17 -060052 elems = tid.split(':')
53 mc = elems[1]
54 fn = ":".join(elems[2:-1])
55 taskname = elems[-1]
Brad Bishop15ae2502019-06-18 21:44:24 -040056 mcfn = "mc:" + mc + ":" + fn
Patrick Williamsc0f7c042017-02-23 20:41:17 -060057 else:
58 tid = tid.rsplit(":", 1)
59 mc = ""
60 fn = tid[0]
61 taskname = tid[1]
62 mcfn = fn
63
64 return (mc, fn, taskname, mcfn)
65
66def build_tid(mc, fn, taskname):
67 if mc:
Brad Bishop15ae2502019-06-18 21:44:24 -040068 return "mc:" + mc + ":" + fn + ":" + taskname
Patrick Williamsc0f7c042017-02-23 20:41:17 -060069 return fn + ":" + taskname
70
Patrick Williamsc124f4f2015-09-15 14:41:29 -050071class RunQueueStats:
72 """
73 Holds statistics on the tasks handled by the associated runQueue
74 """
75 def __init__(self, total):
76 self.completed = 0
77 self.skipped = 0
78 self.failed = 0
79 self.active = 0
80 self.total = total
81
82 def copy(self):
83 obj = self.__class__(self.total)
84 obj.__dict__.update(self.__dict__)
85 return obj
86
87 def taskFailed(self):
88 self.active = self.active - 1
89 self.failed = self.failed + 1
90
Brad Bishop1a4b7ee2018-12-16 17:11:34 -080091 def taskCompleted(self):
92 self.active = self.active - 1
93 self.completed = self.completed + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -050094
Brad Bishop1a4b7ee2018-12-16 17:11:34 -080095 def taskSkipped(self):
96 self.active = self.active + 1
97 self.skipped = self.skipped + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -050098
99 def taskActive(self):
100 self.active = self.active + 1
101
102# These values indicate the next step due to be run in the
103# runQueue state machine
104runQueuePrepare = 2
105runQueueSceneInit = 3
106runQueueSceneRun = 4
107runQueueRunInit = 5
108runQueueRunning = 6
109runQueueFailed = 7
110runQueueCleanUp = 8
111runQueueComplete = 9
112
113class RunQueueScheduler(object):
114 """
115 Control the order tasks are scheduled in.
116 """
117 name = "basic"
118
119 def __init__(self, runqueue, rqdata):
120 """
121 The default scheduler just returns the first buildable task (the
122 priority map is sorted by task number)
123 """
124 self.rq = runqueue
125 self.rqdata = rqdata
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600126 self.numTasks = len(self.rqdata.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500127
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600128 self.prio_map = [self.rqdata.runtaskentries.keys()]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500129
130 self.buildable = []
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800131 self.skip_maxthread = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500132 self.stamps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600133 for tid in self.rqdata.runtaskentries:
134 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
135 self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
136 if tid in self.rq.runq_buildable:
137 self.buildable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500138
139 self.rev_prio_map = None
140
141 def next_buildable_task(self):
142 """
143 Return the id of the first task we find that is buildable
144 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600145 self.buildable = [x for x in self.buildable if x not in self.rq.runq_running]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500146 if not self.buildable:
147 return None
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800148
149 # Filter out tasks that have a max number of threads that have been exceeded
150 skip_buildable = {}
151 for running in self.rq.runq_running.difference(self.rq.runq_complete):
152 rtaskname = taskname_from_tid(running)
153 if rtaskname not in self.skip_maxthread:
154 self.skip_maxthread[rtaskname] = self.rq.cfgData.getVarFlag(rtaskname, "number_threads")
155 if not self.skip_maxthread[rtaskname]:
156 continue
157 if rtaskname in skip_buildable:
158 skip_buildable[rtaskname] += 1
159 else:
160 skip_buildable[rtaskname] = 1
161
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500162 if len(self.buildable) == 1:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600163 tid = self.buildable[0]
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800164 taskname = taskname_from_tid(tid)
165 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
166 return None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600167 stamp = self.stamps[tid]
168 if stamp not in self.rq.build_stamps.values():
169 return tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500170
171 if not self.rev_prio_map:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600172 self.rev_prio_map = {}
173 for tid in self.rqdata.runtaskentries:
174 self.rev_prio_map[tid] = self.prio_map.index(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500175
176 best = None
177 bestprio = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600178 for tid in self.buildable:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800179 taskname = taskname_from_tid(tid)
180 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
181 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600182 prio = self.rev_prio_map[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500183 if bestprio is None or bestprio > prio:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600184 stamp = self.stamps[tid]
185 if stamp in self.rq.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500186 continue
187 bestprio = prio
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600188 best = tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500189
190 return best
191
192 def next(self):
193 """
194 Return the id of the task we should build next
195 """
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800196 if self.rq.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500197 return self.next_buildable_task()
198
Brad Bishop316dfdd2018-06-25 12:45:53 -0400199 def newbuildable(self, task):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500200 self.buildable.append(task)
201
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500202 def describe_task(self, taskid):
203 result = 'ID %s' % taskid
204 if self.rev_prio_map:
205 result = result + (' pri %d' % self.rev_prio_map[taskid])
206 return result
207
208 def dump_prio(self, comment):
209 bb.debug(3, '%s (most important first):\n%s' %
210 (comment,
211 '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
212 index, taskid in enumerate(self.prio_map)])))
213
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500214class RunQueueSchedulerSpeed(RunQueueScheduler):
215 """
216 A scheduler optimised for speed. The priority map is sorted by task weight,
217 heavier weighted tasks (tasks needed by the most other tasks) are run first.
218 """
219 name = "speed"
220
221 def __init__(self, runqueue, rqdata):
222 """
223 The priority map is sorted by task weight.
224 """
225 RunQueueScheduler.__init__(self, runqueue, rqdata)
226
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600227 weights = {}
228 for tid in self.rqdata.runtaskentries:
229 weight = self.rqdata.runtaskentries[tid].weight
230 if not weight in weights:
231 weights[weight] = []
232 weights[weight].append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500233
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600234 self.prio_map = []
235 for weight in sorted(weights):
236 for w in weights[weight]:
237 self.prio_map.append(w)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500238
239 self.prio_map.reverse()
240
241class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
242 """
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500243 A scheduler optimised to complete .bb files as quickly as possible. The
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500244 priority map is sorted by task weight, but then reordered so once a given
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500245 .bb file starts to build, it's completed as quickly as possible by
246 running all tasks related to the same .bb file one after the after.
247 This works well where disk space is at a premium and classes like OE's
248 rm_work are in force.
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500249 """
250 name = "completion"
251
252 def __init__(self, runqueue, rqdata):
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500253 super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500254
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500255 # Extract list of tasks for each recipe, with tasks sorted
256 # ascending from "must run first" (typically do_fetch) to
257 # "runs last" (do_build). The speed scheduler prioritizes
258 # tasks that must run first before the ones that run later;
259 # this is what we depend on here.
260 task_lists = {}
261 for taskid in self.prio_map:
262 fn, taskname = taskid.rsplit(':', 1)
263 task_lists.setdefault(fn, []).append(taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500264
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500265 # Now unify the different task lists. The strategy is that
266 # common tasks get skipped and new ones get inserted after the
267 # preceeding common one(s) as they are found. Because task
268 # lists should differ only by their number of tasks, but not
269 # the ordering of the common tasks, this should result in a
270 # deterministic result that is a superset of the individual
271 # task ordering.
272 all_tasks = []
273 for recipe, new_tasks in task_lists.items():
274 index = 0
275 old_task = all_tasks[index] if index < len(all_tasks) else None
276 for new_task in new_tasks:
277 if old_task == new_task:
278 # Common task, skip it. This is the fast-path which
279 # avoids a full search.
280 index += 1
281 old_task = all_tasks[index] if index < len(all_tasks) else None
282 else:
283 try:
284 index = all_tasks.index(new_task)
285 # Already present, just not at the current
286 # place. We re-synchronized by changing the
287 # index so that it matches again. Now
288 # move on to the next existing task.
289 index += 1
290 old_task = all_tasks[index] if index < len(all_tasks) else None
291 except ValueError:
292 # Not present. Insert before old_task, which
293 # remains the same (but gets shifted back).
294 all_tasks.insert(index, new_task)
295 index += 1
296 bb.debug(3, 'merged task list: %s' % all_tasks)
297
298 # Now reverse the order so that tasks that finish the work on one
299 # recipe are considered more imporant (= come first). The ordering
300 # is now so that do_build is most important.
301 all_tasks.reverse()
302
303 # Group tasks of the same kind before tasks of less important
304 # kinds at the head of the queue (because earlier = lower
305 # priority number = runs earlier), while preserving the
306 # ordering by recipe. If recipe foo is more important than
307 # bar, then the goal is to work on foo's do_populate_sysroot
308 # before bar's do_populate_sysroot and on the more important
309 # tasks of foo before any of the less important tasks in any
310 # other recipe (if those other recipes are more important than
311 # foo).
312 #
313 # All of this only applies when tasks are runable. Explicit
314 # dependencies still override this ordering by priority.
315 #
316 # Here's an example why this priority re-ordering helps with
317 # minimizing disk usage. Consider a recipe foo with a higher
318 # priority than bar where foo DEPENDS on bar. Then the
319 # implicit rule (from base.bbclass) is that foo's do_configure
320 # depends on bar's do_populate_sysroot. This ensures that
321 # bar's do_populate_sysroot gets done first. Normally the
322 # tasks from foo would continue to run once that is done, and
323 # bar only gets completed and cleaned up later. By ordering
324 # bar's task that depend on bar's do_populate_sysroot before foo's
325 # do_configure, that problem gets avoided.
326 task_index = 0
327 self.dump_prio('original priorities')
328 for task in all_tasks:
329 for index in range(task_index, self.numTasks):
330 taskid = self.prio_map[index]
331 taskname = taskid.rsplit(':', 1)[1]
332 if taskname == task:
333 del self.prio_map[index]
334 self.prio_map.insert(task_index, taskid)
335 task_index += 1
336 self.dump_prio('completion priorities')
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500337
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600338class RunTaskEntry(object):
339 def __init__(self):
340 self.depends = set()
341 self.revdeps = set()
342 self.hash = None
Brad Bishop19323692019-04-05 15:28:33 -0400343 self.unihash = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600344 self.task = None
345 self.weight = 1
346
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500347class RunQueueData:
348 """
349 BitBake Run Queue implementation
350 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600351 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500352 self.cooker = cooker
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600353 self.dataCaches = dataCaches
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500354 self.taskData = taskData
355 self.targets = targets
356 self.rq = rq
357 self.warn_multi_bb = False
358
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500359 self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
360 self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600361 self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
362 self.setscenewhitelist_checked = False
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500363 self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600364 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500365
366 self.reset()
367
368 def reset(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600369 self.runtaskentries = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500370
371 def runq_depends_names(self, ids):
372 import re
373 ret = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600374 for id in ids:
375 nam = os.path.basename(id)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500376 nam = re.sub("_[^,]*,", ",", nam)
377 ret.extend([nam])
378 return ret
379
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600380 def get_task_hash(self, tid):
381 return self.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500382
Brad Bishop19323692019-04-05 15:28:33 -0400383 def get_task_unihash(self, tid):
384 return self.runtaskentries[tid].unihash
385
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600386 def get_user_idstring(self, tid, task_name_suffix = ""):
387 return tid + task_name_suffix
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500388
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500389 def get_short_user_idstring(self, task, task_name_suffix = ""):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500390 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
391 pn = self.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600392 taskname = taskname_from_tid(task) + task_name_suffix
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500393 return "%s:%s" % (pn, taskname)
394
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500395 def circular_depchains_handler(self, tasks):
396 """
397 Some tasks aren't buildable, likely due to circular dependency issues.
398 Identify the circular dependencies and print them in a user readable format.
399 """
400 from copy import deepcopy
401
402 valid_chains = []
403 explored_deps = {}
404 msgs = []
405
Andrew Geissler99467da2019-02-25 18:54:23 -0600406 class TooManyLoops(Exception):
407 pass
408
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500409 def chain_reorder(chain):
410 """
411 Reorder a dependency chain so the lowest task id is first
412 """
413 lowest = 0
414 new_chain = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600415 for entry in range(len(chain)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500416 if chain[entry] < chain[lowest]:
417 lowest = entry
418 new_chain.extend(chain[lowest:])
419 new_chain.extend(chain[:lowest])
420 return new_chain
421
422 def chain_compare_equal(chain1, chain2):
423 """
424 Compare two dependency chains and see if they're the same
425 """
426 if len(chain1) != len(chain2):
427 return False
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600428 for index in range(len(chain1)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500429 if chain1[index] != chain2[index]:
430 return False
431 return True
432
433 def chain_array_contains(chain, chain_array):
434 """
435 Return True if chain_array contains chain
436 """
437 for ch in chain_array:
438 if chain_compare_equal(ch, chain):
439 return True
440 return False
441
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600442 def find_chains(tid, prev_chain):
443 prev_chain.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500444 total_deps = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600445 total_deps.extend(self.runtaskentries[tid].revdeps)
446 for revdep in self.runtaskentries[tid].revdeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500447 if revdep in prev_chain:
448 idx = prev_chain.index(revdep)
449 # To prevent duplicates, reorder the chain to start with the lowest taskid
450 # and search through an array of those we've already printed
451 chain = prev_chain[idx:]
452 new_chain = chain_reorder(chain)
453 if not chain_array_contains(new_chain, valid_chains):
454 valid_chains.append(new_chain)
455 msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
456 for dep in new_chain:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600457 msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500458 msgs.append("\n")
459 if len(valid_chains) > 10:
460 msgs.append("Aborted dependency loops search after 10 matches.\n")
Andrew Geissler99467da2019-02-25 18:54:23 -0600461 raise TooManyLoops
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500462 continue
463 scan = False
464 if revdep not in explored_deps:
465 scan = True
466 elif revdep in explored_deps[revdep]:
467 scan = True
468 else:
469 for dep in prev_chain:
470 if dep in explored_deps[revdep]:
471 scan = True
472 if scan:
473 find_chains(revdep, copy.deepcopy(prev_chain))
474 for dep in explored_deps[revdep]:
475 if dep not in total_deps:
476 total_deps.append(dep)
477
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600478 explored_deps[tid] = total_deps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500479
Andrew Geissler99467da2019-02-25 18:54:23 -0600480 try:
481 for task in tasks:
482 find_chains(task, [])
483 except TooManyLoops:
484 pass
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500485
486 return msgs
487
488 def calculate_task_weights(self, endpoints):
489 """
490 Calculate a number representing the "weight" of each task. Heavier weighted tasks
491 have more dependencies and hence should be executed sooner for maximum speed.
492
493 This function also sanity checks the task list finding tasks that are not
494 possible to execute due to circular dependencies.
495 """
496
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600497 numTasks = len(self.runtaskentries)
498 weight = {}
499 deps_left = {}
500 task_done = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500501
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600502 for tid in self.runtaskentries:
503 task_done[tid] = False
504 weight[tid] = 1
505 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500506
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600507 for tid in endpoints:
508 weight[tid] = 10
509 task_done[tid] = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500510
511 while True:
512 next_points = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600513 for tid in endpoints:
514 for revdep in self.runtaskentries[tid].depends:
515 weight[revdep] = weight[revdep] + weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500516 deps_left[revdep] = deps_left[revdep] - 1
517 if deps_left[revdep] == 0:
518 next_points.append(revdep)
519 task_done[revdep] = True
520 endpoints = next_points
521 if len(next_points) == 0:
522 break
523
524 # Circular dependency sanity check
525 problem_tasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600526 for tid in self.runtaskentries:
527 if task_done[tid] is False or deps_left[tid] != 0:
528 problem_tasks.append(tid)
529 logger.debug(2, "Task %s is not buildable", tid)
530 logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
531 self.runtaskentries[tid].weight = weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500532
533 if problem_tasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600534 message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500535 message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
536 message = message + "Identifying dependency loops (this may take a short while)...\n"
537 logger.error(message)
538
539 msgs = self.circular_depchains_handler(problem_tasks)
540
541 message = "\n"
542 for msg in msgs:
543 message = message + msg
544 bb.msg.fatal("RunQueue", message)
545
546 return weight
547
548 def prepare(self):
549 """
550 Turn a set of taskData into a RunQueue and compute data needed
551 to optimise the execution order.
552 """
553
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600554 runq_build = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500555 recursivetasks = {}
556 recursiveitasks = {}
557 recursivetasksselfref = set()
558
559 taskData = self.taskData
560
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600561 found = False
562 for mc in self.taskData:
563 if len(taskData[mc].taskentries) > 0:
564 found = True
565 break
566 if not found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500567 # Nothing to do
568 return 0
569
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600570 self.init_progress_reporter.start()
571 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500572
573 # Step A - Work out a list of tasks to run
574 #
575 # Taskdata gives us a list of possible providers for every build and run
576 # target ordered by priority. It also gives information on each of those
577 # providers.
578 #
579 # To create the actual list of tasks to execute we fix the list of
580 # providers and then resolve the dependencies into task IDs. This
581 # process is repeated for each type of dependency (tdepends, deptask,
582 # rdeptast, recrdeptask, idepends).
583
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600584 def add_build_dependencies(depids, tasknames, depends, mc):
585 for depname in depids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500586 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600587 if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500588 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600589 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500590 if depdata is None:
591 continue
592 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600593 t = depdata + ":" + taskname
594 if t in taskData[mc].taskentries:
595 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500596
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600597 def add_runtime_dependencies(depids, tasknames, depends, mc):
598 for depname in depids:
599 if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500600 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600601 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500602 if depdata is None:
603 continue
604 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600605 t = depdata + ":" + taskname
606 if t in taskData[mc].taskentries:
607 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500608
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800609 def add_mc_dependencies(mc, tid):
610 mcdeps = taskData[mc].get_mcdepends()
611 for dep in mcdeps:
612 mcdependency = dep.split(':')
613 pn = mcdependency[3]
614 frommc = mcdependency[1]
615 mcdep = mcdependency[2]
616 deptask = mcdependency[4]
617 if mc == frommc:
618 fn = taskData[mcdep].build_targets[pn][0]
619 newdep = '%s:%s' % (fn,deptask)
620 taskData[mc].taskentries[tid].tdepends.append(newdep)
621
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600622 for mc in taskData:
623 for tid in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500624
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600625 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
626 #runtid = build_tid(mc, fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500627
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600628 #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
629
630 depends = set()
631 task_deps = self.dataCaches[mc].task_deps[taskfn]
632
633 self.runtaskentries[tid] = RunTaskEntry()
634
635 if fn in taskData[mc].failed_fns:
636 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500637
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800638 # We add multiconfig dependencies before processing internal task deps (tdepends)
639 if 'mcdepends' in task_deps and taskname in task_deps['mcdepends']:
640 add_mc_dependencies(mc, tid)
641
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500642 # Resolve task internal dependencies
643 #
644 # e.g. addtask before X after Y
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600645 for t in taskData[mc].taskentries[tid].tdepends:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800646 (depmc, depfn, deptaskname, _) = split_tid_mcfn(t)
647 depends.add(build_tid(depmc, depfn, deptaskname))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500648
649 # Resolve 'deptask' dependencies
650 #
651 # e.g. do_sometask[deptask] = "do_someothertask"
652 # (makes sure sometask runs after someothertask of all DEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600653 if 'deptask' in task_deps and taskname in task_deps['deptask']:
654 tasknames = task_deps['deptask'][taskname].split()
655 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500656
657 # Resolve 'rdeptask' dependencies
658 #
659 # e.g. do_sometask[rdeptask] = "do_someothertask"
660 # (makes sure sometask runs after someothertask of all RDEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600661 if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
662 tasknames = task_deps['rdeptask'][taskname].split()
663 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500664
665 # Resolve inter-task dependencies
666 #
667 # e.g. do_sometask[depends] = "targetname:do_someothertask"
668 # (makes sure sometask runs after targetname's someothertask)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600669 idepends = taskData[mc].taskentries[tid].idepends
670 for (depname, idependtask) in idepends:
671 if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500672 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600673 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500674 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600675 t = depdata + ":" + idependtask
676 depends.add(t)
677 if t not in taskData[mc].taskentries:
678 bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
679 irdepends = taskData[mc].taskentries[tid].irdepends
680 for (depname, idependtask) in irdepends:
681 if depname in taskData[mc].run_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500682 # Won't be in run_targets if ASSUME_PROVIDED
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500683 if not taskData[mc].run_targets[depname]:
684 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600685 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500686 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600687 t = depdata + ":" + idependtask
688 depends.add(t)
689 if t not in taskData[mc].taskentries:
690 bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500691
692 # Resolve recursive 'recrdeptask' dependencies (Part A)
693 #
694 # e.g. do_sometask[recrdeptask] = "do_someothertask"
695 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
696 # We cover the recursive part of the dependencies below
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600697 if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
698 tasknames = task_deps['recrdeptask'][taskname].split()
699 recursivetasks[tid] = tasknames
700 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
701 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
702 if taskname in tasknames:
703 recursivetasksselfref.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500704
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600705 if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
706 recursiveitasks[tid] = []
707 for t in task_deps['recideptask'][taskname].split():
708 newdep = build_tid(mc, fn, t)
709 recursiveitasks[tid].append(newdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500710
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600711 self.runtaskentries[tid].depends = depends
Brad Bishop316dfdd2018-06-25 12:45:53 -0400712 # Remove all self references
713 self.runtaskentries[tid].depends.discard(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500714
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600715 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500716
Brad Bishop316dfdd2018-06-25 12:45:53 -0400717 self.init_progress_reporter.next_stage()
718
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500719 # Resolve recursive 'recrdeptask' dependencies (Part B)
720 #
721 # e.g. do_sometask[recrdeptask] = "do_someothertask"
722 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600723 # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600724
Brad Bishop316dfdd2018-06-25 12:45:53 -0400725 # Generating/interating recursive lists of dependencies is painful and potentially slow
726 # Precompute recursive task dependencies here by:
727 # a) create a temp list of reverse dependencies (revdeps)
728 # b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0)
729 # c) combine the total list of dependencies in cumulativedeps
730 # d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500731
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500732
Brad Bishop316dfdd2018-06-25 12:45:53 -0400733 revdeps = {}
734 deps = {}
735 cumulativedeps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600736 for tid in self.runtaskentries:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400737 deps[tid] = set(self.runtaskentries[tid].depends)
738 revdeps[tid] = set()
739 cumulativedeps[tid] = set()
740 # Generate a temp list of reverse dependencies
741 for tid in self.runtaskentries:
742 for dep in self.runtaskentries[tid].depends:
743 revdeps[dep].add(tid)
744 # Find the dependency chain endpoints
745 endpoints = set()
746 for tid in self.runtaskentries:
747 if len(deps[tid]) == 0:
748 endpoints.add(tid)
749 # Iterate the chains collating dependencies
750 while endpoints:
751 next = set()
752 for tid in endpoints:
753 for dep in revdeps[tid]:
754 cumulativedeps[dep].add(fn_from_tid(tid))
755 cumulativedeps[dep].update(cumulativedeps[tid])
756 if tid in deps[dep]:
757 deps[dep].remove(tid)
758 if len(deps[dep]) == 0:
759 next.add(dep)
760 endpoints = next
761 #for tid in deps:
762 # if len(deps[tid]) != 0:
763 # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
764
765 # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
766 # resolve these recursively until we aren't adding any further extra dependencies
767 extradeps = True
768 while extradeps:
769 extradeps = 0
770 for tid in recursivetasks:
771 tasknames = recursivetasks[tid]
772
773 totaldeps = set(self.runtaskentries[tid].depends)
774 if tid in recursiveitasks:
775 totaldeps.update(recursiveitasks[tid])
776 for dep in recursiveitasks[tid]:
777 if dep not in self.runtaskentries:
778 continue
779 totaldeps.update(self.runtaskentries[dep].depends)
780
781 deps = set()
782 for dep in totaldeps:
783 if dep in cumulativedeps:
784 deps.update(cumulativedeps[dep])
785
786 for t in deps:
787 for taskname in tasknames:
788 newtid = t + ":" + taskname
789 if newtid == tid:
790 continue
791 if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
792 extradeps += 1
793 self.runtaskentries[tid].depends.add(newtid)
794
795 # Handle recursive tasks which depend upon other recursive tasks
796 deps = set()
797 for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
798 deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
799 for newtid in deps:
800 for taskname in tasknames:
801 if not newtid.endswith(":" + taskname):
802 continue
803 if newtid in self.runtaskentries:
804 extradeps += 1
805 self.runtaskentries[tid].depends.add(newtid)
806
807 bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps)
808
809 # Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work
810 for tid in recursivetasksselfref:
811 self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600812
813 self.init_progress_reporter.next_stage()
814
815 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500816
817 # Step B - Mark all active tasks
818 #
819 # Start with the tasks we were asked to run and mark all dependencies
820 # as active too. If the task is to be 'forced', clear its stamp. Once
821 # all active tasks are marked, prune the ones we don't need.
822
823 logger.verbose("Marking Active Tasks")
824
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600825 def mark_active(tid, depth):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500826 """
827 Mark an item as active along with its depends
828 (calls itself recursively)
829 """
830
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600831 if tid in runq_build:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500832 return
833
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600834 runq_build[tid] = 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500835
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600836 depends = self.runtaskentries[tid].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500837 for depend in depends:
838 mark_active(depend, depth+1)
839
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600840 self.target_tids = []
841 for (mc, target, task, fn) in self.targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500842
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600843 if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500844 continue
845
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600846 if target in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500847 continue
848
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500849 parents = False
850 if task.endswith('-'):
851 parents = True
852 task = task[:-1]
853
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600854 if fn in taskData[mc].failed_fns:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500855 continue
856
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600857 # fn already has mc prefix
858 tid = fn + ":" + task
859 self.target_tids.append(tid)
860 if tid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500861 import difflib
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600862 tasks = []
863 for x in taskData[mc].taskentries:
864 if x.startswith(fn + ":"):
865 tasks.append(taskname_from_tid(x))
866 close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500867 if close_matches:
868 extra = ". Close matches:\n %s" % "\n ".join(close_matches)
869 else:
870 extra = ""
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600871 bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
872
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500873 # For tasks called "XXXX-", ony run their dependencies
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500874 if parents:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600875 for i in self.runtaskentries[tid].depends:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500876 mark_active(i, 1)
877 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600878 mark_active(tid, 1)
879
880 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500881
882 # Step C - Prune all inactive tasks
883 #
884 # Once all active tasks are marked, prune the ones we don't need.
885
Brad Bishop316dfdd2018-06-25 12:45:53 -0400886 delcount = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600887 for tid in list(self.runtaskentries.keys()):
888 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400889 delcount[tid] = self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600890 del self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600891
Brad Bishop316dfdd2018-06-25 12:45:53 -0400892 # Handle --runall
893 if self.cooker.configuration.runall:
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500894 # re-run the mark_active and then drop unused tasks from new list
895 runq_build = {}
Brad Bishop316dfdd2018-06-25 12:45:53 -0400896
897 for task in self.cooker.configuration.runall:
898 runall_tids = set()
899 for tid in list(self.runtaskentries):
900 wanttid = fn_from_tid(tid) + ":do_%s" % task
901 if wanttid in delcount:
902 self.runtaskentries[wanttid] = delcount[wanttid]
903 if wanttid in self.runtaskentries:
904 runall_tids.add(wanttid)
905
906 for tid in list(runall_tids):
907 mark_active(tid,1)
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500908
909 for tid in list(self.runtaskentries.keys()):
910 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400911 delcount[tid] = self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500912 del self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500913
914 if len(self.runtaskentries) == 0:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400915 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
916
917 self.init_progress_reporter.next_stage()
918
919 # Handle runonly
920 if self.cooker.configuration.runonly:
921 # re-run the mark_active and then drop unused tasks from new list
922 runq_build = {}
923
924 for task in self.cooker.configuration.runonly:
925 runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == "do_%s" % task }
926
927 for tid in list(runonly_tids):
928 mark_active(tid,1)
929
930 for tid in list(self.runtaskentries.keys()):
931 if tid not in runq_build:
932 delcount[tid] = self.runtaskentries[tid]
933 del self.runtaskentries[tid]
934
935 if len(self.runtaskentries) == 0:
936 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500937
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500938 #
939 # Step D - Sanity checks and computation
940 #
941
942 # Check to make sure we still have tasks to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600943 if len(self.runtaskentries) == 0:
944 if not taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500945 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
946 else:
947 bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
948
Brad Bishop316dfdd2018-06-25 12:45:53 -0400949 logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500950
951 logger.verbose("Assign Weightings")
952
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600953 self.init_progress_reporter.next_stage()
954
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500955 # Generate a list of reverse dependencies to ease future calculations
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600956 for tid in self.runtaskentries:
957 for dep in self.runtaskentries[tid].depends:
958 self.runtaskentries[dep].revdeps.add(tid)
959
960 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500961
962 # Identify tasks at the end of dependency chains
963 # Error on circular dependency loops (length two)
964 endpoints = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600965 for tid in self.runtaskentries:
966 revdeps = self.runtaskentries[tid].revdeps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500967 if len(revdeps) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600968 endpoints.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500969 for dep in revdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600970 if dep in self.runtaskentries[tid].depends:
971 bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
972
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500973
974 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
975
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600976 self.init_progress_reporter.next_stage()
977
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500978 # Calculate task weights
979 # Check of higher length circular dependencies
980 self.runq_weight = self.calculate_task_weights(endpoints)
981
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600982 self.init_progress_reporter.next_stage()
983
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500984 # Sanity Check - Check for multiple tasks building the same provider
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600985 for mc in self.dataCaches:
986 prov_list = {}
987 seen_fn = []
988 for tid in self.runtaskentries:
989 (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
990 if taskfn in seen_fn:
991 continue
992 if mc != tidmc:
993 continue
994 seen_fn.append(taskfn)
995 for prov in self.dataCaches[mc].fn_provides[taskfn]:
996 if prov not in prov_list:
997 prov_list[prov] = [taskfn]
998 elif taskfn not in prov_list[prov]:
999 prov_list[prov].append(taskfn)
1000 for prov in prov_list:
1001 if len(prov_list[prov]) < 2:
1002 continue
1003 if prov in self.multi_provider_whitelist:
1004 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001005 seen_pn = []
1006 # If two versions of the same PN are being built its fatal, we don't support it.
1007 for fn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001008 pn = self.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001009 if pn not in seen_pn:
1010 seen_pn.append(pn)
1011 else:
1012 bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001013 msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
1014 #
1015 # Construct a list of things which uniquely depend on each provider
1016 # since this may help the user figure out which dependency is triggering this warning
1017 #
1018 msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
1019 deplist = {}
1020 commondeps = None
1021 for provfn in prov_list[prov]:
1022 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001023 for tid in self.runtaskentries:
1024 fn = fn_from_tid(tid)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001025 if fn != provfn:
1026 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001027 for dep in self.runtaskentries[tid].revdeps:
1028 fn = fn_from_tid(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001029 if fn == provfn:
1030 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001031 deps.add(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001032 if not commondeps:
1033 commondeps = set(deps)
1034 else:
1035 commondeps &= deps
1036 deplist[provfn] = deps
1037 for provfn in deplist:
1038 msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
1039 #
1040 # Construct a list of provides and runtime providers for each recipe
1041 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
1042 #
1043 msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
1044 provide_results = {}
1045 rprovide_results = {}
1046 commonprovs = None
1047 commonrprovs = None
1048 for provfn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001049 provides = set(self.dataCaches[mc].fn_provides[provfn])
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001050 rprovides = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001051 for rprovide in self.dataCaches[mc].rproviders:
1052 if provfn in self.dataCaches[mc].rproviders[rprovide]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001053 rprovides.add(rprovide)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001054 for package in self.dataCaches[mc].packages:
1055 if provfn in self.dataCaches[mc].packages[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001056 rprovides.add(package)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001057 for package in self.dataCaches[mc].packages_dynamic:
1058 if provfn in self.dataCaches[mc].packages_dynamic[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001059 rprovides.add(package)
1060 if not commonprovs:
1061 commonprovs = set(provides)
1062 else:
1063 commonprovs &= provides
1064 provide_results[provfn] = provides
1065 if not commonrprovs:
1066 commonrprovs = set(rprovides)
1067 else:
1068 commonrprovs &= rprovides
1069 rprovide_results[provfn] = rprovides
1070 #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
1071 #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
1072 for provfn in prov_list[prov]:
1073 msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
1074 msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
1075
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001076 if self.warn_multi_bb:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001077 logger.verbnote(msg)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001078 else:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001079 logger.error(msg)
1080
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001081 self.init_progress_reporter.next_stage()
1082
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001083 # Create a whitelist usable by the stamp checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001084 self.stampfnwhitelist = {}
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001085 for mc in self.taskData:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001086 self.stampfnwhitelist[mc] = []
1087 for entry in self.stampwhitelist.split():
1088 if entry not in self.taskData[mc].build_targets:
1089 continue
1090 fn = self.taskData.build_targets[entry][0]
1091 self.stampfnwhitelist[mc].append(fn)
1092
1093 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001094
1095 # Iterate over the task list looking for tasks with a 'setscene' function
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001096 self.runq_setscene_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001097 if not self.cooker.configuration.nosetscene:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001098 for tid in self.runtaskentries:
1099 (mc, fn, taskname, _) = split_tid_mcfn(tid)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001100 setscenetid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001101 if setscenetid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001102 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001103 self.runq_setscene_tids.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001104
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001105 def invalidate_task(tid, error_nostamp):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001106 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1107 taskdep = self.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001108 if fn + ":" + taskname not in taskData[mc].taskentries:
1109 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001110 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1111 if error_nostamp:
1112 bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
1113 else:
1114 bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
1115 else:
1116 logger.verbose("Invalidate task %s, %s", taskname, fn)
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001117 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001118
1119 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001120
1121 # Invalidate task if force mode active
1122 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001123 for tid in self.target_tids:
1124 invalidate_task(tid, False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001125
1126 # Invalidate task if invalidate mode active
1127 if self.cooker.configuration.invalidate_stamp:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001128 for tid in self.target_tids:
1129 fn = fn_from_tid(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001130 for st in self.cooker.configuration.invalidate_stamp.split(','):
1131 if not st.startswith("do_"):
1132 st = "do_%s" % st
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001133 invalidate_task(fn + ":" + st, True)
1134
1135 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001136
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001137 # Create and print to the logs a virtual/xxxx -> PN (fn) table
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001138 for mc in taskData:
1139 virtmap = taskData[mc].get_providermap(prefix="virtual/")
1140 virtpnmap = {}
1141 for v in virtmap:
1142 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
1143 bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
1144 if hasattr(bb.parse.siggen, "tasks_resolved"):
1145 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1146
1147 self.init_progress_reporter.next_stage()
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001148
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001149 # Iterate over the task list and call into the siggen code
1150 dealtwith = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001151 todeal = set(self.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001152 while len(todeal) > 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001153 for tid in todeal.copy():
1154 if len(self.runtaskentries[tid].depends - dealtwith) == 0:
1155 dealtwith.add(tid)
1156 todeal.remove(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001157 self.prepare_task_hash(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001158
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001159 bb.parse.siggen.writeout_file_checksum_cache()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001160
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001161 #self.dump_data()
1162 return len(self.runtaskentries)
1163
Brad Bishop19323692019-04-05 15:28:33 -04001164 def prepare_task_hash(self, tid):
1165 procdep = []
1166 for dep in self.runtaskentries[tid].depends:
1167 procdep.append(fn_from_tid(dep) + "." + taskname_from_tid(dep))
1168 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1169 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(taskfn, taskname, procdep, self.dataCaches[mc])
1170 self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(taskfn + "." + taskname)
1171
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001172 def dump_data(self):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001173 """
1174 Dump some debug information on the internal data structures
1175 """
1176 logger.debug(3, "run_tasks:")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001177 for tid in self.runtaskentries:
1178 logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
1179 self.runtaskentries[tid].weight,
1180 self.runtaskentries[tid].depends,
1181 self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001182
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001183class RunQueueWorker():
1184 def __init__(self, process, pipe):
1185 self.process = process
1186 self.pipe = pipe
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001187
1188class RunQueue:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001189 def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001190
1191 self.cooker = cooker
1192 self.cfgData = cfgData
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001193 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001194
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001195 self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
1196 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
1197 self.setsceneverify = cfgData.getVar("BB_SETSCENE_VERIFY_FUNCTION2") or None
1198 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001199
1200 self.state = runQueuePrepare
1201
1202 # For disk space monitor
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001203 # Invoked at regular time intervals via the bitbake heartbeat event
1204 # while the build is running. We generate a unique name for the handler
1205 # here, just in case that there ever is more than one RunQueue instance,
1206 # start the handler when reaching runQueueSceneRun, and stop it when
1207 # done with the build.
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001208 self.dm = monitordisk.diskMonitor(cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001209 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
1210 self.dm_event_handler_registered = False
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001211 self.rqexe = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001212 self.worker = {}
1213 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001214
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001215 def _start_worker(self, mc, fakeroot = False, rqexec = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001216 logger.debug(1, "Starting bitbake-worker")
1217 magic = "decafbad"
1218 if self.cooker.configuration.profile:
1219 magic = "decafbadbad"
1220 if fakeroot:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001221 magic = magic + "beef"
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001222 mcdata = self.cooker.databuilder.mcdata[mc]
Brad Bishop19323692019-04-05 15:28:33 -04001223 fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD"))
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001224 fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001225 env = os.environ.copy()
1226 for key, value in (var.split('=') for var in fakerootenv):
1227 env[key] = value
Brad Bishop19323692019-04-05 15:28:33 -04001228 worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001229 else:
1230 worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
1231 bb.utils.nonblockingfd(worker.stdout)
1232 workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
1233
1234 workerdata = {
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001235 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1236 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1237 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1238 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001239 "sigdata" : bb.parse.siggen.get_taskdata(),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001240 "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
1241 "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
1242 "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
1243 "logdefaultdomain" : bb.msg.loggerDefaultDomains,
1244 "prhost" : self.cooker.prhost,
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001245 "buildname" : self.cfgData.getVar("BUILDNAME"),
1246 "date" : self.cfgData.getVar("DATE"),
1247 "time" : self.cfgData.getVar("TIME"),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001248 }
1249
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001250 worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001251 worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001252 worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001253 worker.stdin.flush()
1254
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001255 return RunQueueWorker(worker, workerpipe)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001256
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001257 def _teardown_worker(self, worker):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001258 if not worker:
1259 return
1260 logger.debug(1, "Teardown for bitbake-worker")
1261 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001262 worker.process.stdin.write(b"<quit></quit>")
1263 worker.process.stdin.flush()
1264 worker.process.stdin.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001265 except IOError:
1266 pass
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001267 while worker.process.returncode is None:
1268 worker.pipe.read()
1269 worker.process.poll()
1270 while worker.pipe.read():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001271 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001272 worker.pipe.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001273
1274 def start_worker(self):
1275 if self.worker:
1276 self.teardown_workers()
1277 self.teardown = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001278 for mc in self.rqdata.dataCaches:
1279 self.worker[mc] = self._start_worker(mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001280
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001281 def start_fakeworker(self, rqexec, mc):
1282 if not mc in self.fakeworker:
1283 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001284
1285 def teardown_workers(self):
1286 self.teardown = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001287 for mc in self.worker:
1288 self._teardown_worker(self.worker[mc])
1289 self.worker = {}
1290 for mc in self.fakeworker:
1291 self._teardown_worker(self.fakeworker[mc])
1292 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001293
1294 def read_workers(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001295 for mc in self.worker:
1296 self.worker[mc].pipe.read()
1297 for mc in self.fakeworker:
1298 self.fakeworker[mc].pipe.read()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001299
1300 def active_fds(self):
1301 fds = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001302 for mc in self.worker:
1303 fds.append(self.worker[mc].pipe.input)
1304 for mc in self.fakeworker:
1305 fds.append(self.fakeworker[mc].pipe.input)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001306 return fds
1307
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001308 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001309 def get_timestamp(f):
1310 try:
1311 if not os.access(f, os.F_OK):
1312 return None
1313 return os.stat(f)[stat.ST_MTIME]
1314 except:
1315 return None
1316
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001317 (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
1318 if taskname is None:
1319 taskname = tn
1320
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001321 if self.stamppolicy == "perfile":
1322 fulldeptree = False
1323 else:
1324 fulldeptree = True
1325 stampwhitelist = []
1326 if self.stamppolicy == "whitelist":
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001327 stampwhitelist = self.rqdata.stampfnwhitelist[mc]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001328
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001329 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001330
1331 # If the stamp is missing, it's not current
1332 if not os.access(stampfile, os.F_OK):
1333 logger.debug(2, "Stampfile %s not available", stampfile)
1334 return False
1335 # If it's a 'nostamp' task, it's not current
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001336 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001337 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1338 logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
1339 return False
1340
1341 if taskname != "do_setscene" and taskname.endswith("_setscene"):
1342 return True
1343
1344 if cache is None:
1345 cache = {}
1346
1347 iscurrent = True
1348 t1 = get_timestamp(stampfile)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001349 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001350 if iscurrent:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001351 (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
1352 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1353 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001354 t2 = get_timestamp(stampfile2)
1355 t3 = get_timestamp(stampfile3)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001356 if t3 and not t2:
1357 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001358 if t3 and t3 > t2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001359 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001360 if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
1361 if not t2:
1362 logger.debug(2, 'Stampfile %s does not exist', stampfile2)
1363 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001364 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001365 if t1 < t2:
1366 logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
1367 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001368 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001369 if recurse and iscurrent:
1370 if dep in cache:
1371 iscurrent = cache[dep]
1372 if not iscurrent:
1373 logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
1374 else:
1375 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1376 cache[dep] = iscurrent
1377 if recurse:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001378 cache[tid] = iscurrent
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001379 return iscurrent
1380
Brad Bishop19323692019-04-05 15:28:33 -04001381 def validate_hash(self, *, sq_fn, sq_task, sq_hash, sq_hashfn, siginfo, sq_unihash, d):
1382 locs = {"sq_fn" : sq_fn, "sq_task" : sq_task, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn,
1383 "sq_unihash" : sq_unihash, "siginfo" : siginfo, "d" : d}
1384
1385 hashvalidate_args = ("(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=siginfo, sq_unihash=sq_unihash)",
1386 "(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=siginfo)",
1387 "(sq_fn, sq_task, sq_hash, sq_hashfn, d)")
1388
1389 for args in hashvalidate_args[:-1]:
1390 try:
1391 call = self.hashvalidate + args
1392 return bb.utils.better_eval(call, locs)
1393 except TypeError:
1394 continue
1395
1396 # Call the last entry without a try...catch to propagate any thrown
1397 # TypeError
1398 call = self.hashvalidate + hashvalidate_args[-1]
1399 return bb.utils.better_eval(call, locs)
1400
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001401 def _execute_runqueue(self):
1402 """
1403 Run the tasks in a queue prepared by rqdata.prepare()
1404 Upon failure, optionally try to recover the build using any alternate providers
1405 (if the abort on failure configuration option isn't set)
1406 """
1407
1408 retval = True
1409
1410 if self.state is runQueuePrepare:
1411 self.rqexe = RunQueueExecuteDummy(self)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001412 # NOTE: if you add, remove or significantly refactor the stages of this
1413 # process then you should recalculate the weightings here. This is quite
1414 # easy to do - just change the next line temporarily to pass debug=True as
1415 # the last parameter and you'll get a printout of the weightings as well
1416 # as a map to the lines where next_stage() was called. Of course this isn't
1417 # critical, but it helps to keep the progress reporting accurate.
1418 self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
1419 "Initialising tasks",
1420 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001421 if self.rqdata.prepare() == 0:
1422 self.state = runQueueComplete
1423 else:
1424 self.state = runQueueSceneInit
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001425 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001426
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001427 # we are ready to run, emit dependency info to any UI or class which
1428 # needs it
1429 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1430 self.rqdata.init_progress_reporter.next_stage()
1431 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001432
1433 if self.state is runQueueSceneInit:
Brad Bishope2d5b612018-11-23 10:55:50 +13001434 if not self.dm_event_handler_registered:
1435 res = bb.event.register(self.dm_event_handler_name,
1436 lambda x: self.dm.check(self) if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp] else False,
1437 ('bb.event.HeartbeatEvent',))
1438 self.dm_event_handler_registered = True
1439
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001440 dump = self.cooker.configuration.dump_signatures
1441 if dump:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001442 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001443 if 'printdiff' in dump:
1444 invalidtasks = self.print_diffscenetasks()
1445 self.dump_signatures(dump)
1446 if 'printdiff' in dump:
1447 self.write_diffscenetasks(invalidtasks)
1448 self.state = runQueueComplete
1449 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001450 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001451 self.start_worker()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001452 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001453 self.rqexe = RunQueueExecuteScenequeue(self)
1454
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001455 if self.state is runQueueSceneRun:
1456 retval = self.rqexe.execute()
1457
1458 if self.state is runQueueRunInit:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001459 if self.cooker.configuration.setsceneonly:
1460 self.state = runQueueComplete
1461 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001462 # Just in case we didn't setscene
1463 self.rqdata.init_progress_reporter.finish()
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001464 logger.info("Executing RunQueue Tasks")
1465 self.rqexe = RunQueueExecuteTasks(self)
1466 self.state = runQueueRunning
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001467
1468 if self.state is runQueueRunning:
1469 retval = self.rqexe.execute()
1470
1471 if self.state is runQueueCleanUp:
1472 retval = self.rqexe.finish()
1473
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001474 build_done = self.state is runQueueComplete or self.state is runQueueFailed
1475
1476 if build_done and self.dm_event_handler_registered:
1477 bb.event.remove(self.dm_event_handler_name, None)
1478 self.dm_event_handler_registered = False
1479
1480 if build_done and self.rqexe:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001481 self.teardown_workers()
1482 if self.rqexe.stats.failed:
1483 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
1484 else:
1485 # Let's avoid the word "failed" if nothing actually did
1486 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
1487
1488 if self.state is runQueueFailed:
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001489 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001490
1491 if self.state is runQueueComplete:
1492 # All done
1493 return False
1494
1495 # Loop
1496 return retval
1497
1498 def execute_runqueue(self):
1499 # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
1500 try:
1501 return self._execute_runqueue()
1502 except bb.runqueue.TaskFailure:
1503 raise
1504 except SystemExit:
1505 raise
1506 except bb.BBHandledException:
1507 try:
1508 self.teardown_workers()
1509 except:
1510 pass
1511 self.state = runQueueComplete
1512 raise
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001513 except Exception as err:
1514 logger.exception("An uncaught exception occurred in runqueue")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001515 try:
1516 self.teardown_workers()
1517 except:
1518 pass
1519 self.state = runQueueComplete
1520 raise
1521
1522 def finish_runqueue(self, now = False):
1523 if not self.rqexe:
1524 self.state = runQueueComplete
1525 return
1526
1527 if now:
1528 self.rqexe.finish_now()
1529 else:
1530 self.rqexe.finish()
1531
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001532 def rq_dump_sigfn(self, fn, options):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001533 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001534 the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
1535 siggen = bb.parse.siggen
1536 dataCaches = self.rqdata.dataCaches
1537 siggen.dump_sigfn(fn, dataCaches, options)
1538
1539 def dump_signatures(self, options):
1540 fns = set()
1541 bb.note("Reparsing files to collect dependency data")
1542
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001543 for tid in self.rqdata.runtaskentries:
1544 fn = fn_from_tid(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001545 fns.add(fn)
1546
1547 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1548 # We cannot use the real multiprocessing.Pool easily due to some local data
1549 # that can't be pickled. This is a cheap multi-process solution.
1550 launched = []
1551 while fns:
1552 if len(launched) < max_process:
1553 p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
1554 p.start()
1555 launched.append(p)
1556 for q in launched:
1557 # The finished processes are joined when calling is_alive()
1558 if not q.is_alive():
1559 launched.remove(q)
1560 for p in launched:
1561 p.join()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001562
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001563 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001564
1565 return
1566
1567 def print_diffscenetasks(self):
1568
1569 valid = []
1570 sq_hash = []
1571 sq_hashfn = []
Brad Bishop19323692019-04-05 15:28:33 -04001572 sq_unihash = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001573 sq_fn = []
1574 sq_taskname = []
1575 sq_task = []
1576 noexec = []
1577 stamppresent = []
1578 valid_new = set()
1579
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001580 for tid in self.rqdata.runtaskentries:
1581 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1582 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001583
1584 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001585 noexec.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001586 continue
1587
1588 sq_fn.append(fn)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001589 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001590 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
Brad Bishop19323692019-04-05 15:28:33 -04001591 sq_unihash.append(self.rqdata.runtaskentries[tid].unihash)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001592 sq_taskname.append(taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001593 sq_task.append(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001594
1595 valid = self.validate_hash(sq_fn=sq_fn, sq_task=sq_taskname, sq_hash=sq_hash, sq_hashfn=sq_hashfn,
1596 siginfo=True, sq_unihash=sq_unihash, d=self.cooker.data)
1597
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001598 for v in valid:
1599 valid_new.add(sq_task[v])
1600
1601 # Tasks which are both setscene and noexec never care about dependencies
1602 # We therefore find tasks which are setscene and noexec and mark their
1603 # unique dependencies as valid.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001604 for tid in noexec:
1605 if tid not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001606 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001607 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001608 hasnoexecparents = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001609 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1610 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001611 continue
1612 hasnoexecparents = False
1613 break
1614 if hasnoexecparents:
1615 valid_new.add(dep)
1616
1617 invalidtasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001618 for tid in self.rqdata.runtaskentries:
1619 if tid not in valid_new and tid not in noexec:
1620 invalidtasks.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001621
1622 found = set()
1623 processed = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001624 for tid in invalidtasks:
1625 toprocess = set([tid])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001626 while toprocess:
1627 next = set()
1628 for t in toprocess:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001629 for dep in self.rqdata.runtaskentries[t].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001630 if dep in invalidtasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001631 found.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001632 if dep not in processed:
1633 processed.add(dep)
1634 next.add(dep)
1635 toprocess = next
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001636 if tid in found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001637 toprocess = set()
1638
1639 tasklist = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001640 for tid in invalidtasks.difference(found):
1641 tasklist.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001642
1643 if tasklist:
1644 bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
1645
1646 return invalidtasks.difference(found)
1647
1648 def write_diffscenetasks(self, invalidtasks):
1649
1650 # Define recursion callback
1651 def recursecb(key, hash1, hash2):
1652 hashes = [hash1, hash2]
1653 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1654
1655 recout = []
1656 if len(hashfiles) == 2:
1657 out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
Brad Bishopc342db32019-05-15 21:57:59 -04001658 recout.extend(list(' ' + l for l in out2))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001659 else:
1660 recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
1661
1662 return recout
1663
1664
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001665 for tid in invalidtasks:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001666 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1667 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001668 h = self.rqdata.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001669 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
1670 match = None
1671 for m in matches:
1672 if h in m:
1673 match = m
1674 if match is None:
1675 bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001676 matches = {k : v for k, v in iter(matches.items()) if h not in k}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001677 if matches:
1678 latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
Brad Bishop19323692019-04-05 15:28:33 -04001679 prevh = __find_sha256__.search(latestmatch).group(0)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001680 output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
1681 bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
1682
1683class RunQueueExecute:
1684
1685 def __init__(self, rq):
1686 self.rq = rq
1687 self.cooker = rq.cooker
1688 self.cfgData = rq.cfgData
1689 self.rqdata = rq.rqdata
1690
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001691 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
1692 self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001693
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001694 self.runq_buildable = set()
1695 self.runq_running = set()
1696 self.runq_complete = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001697
1698 self.build_stamps = {}
1699 self.build_stamps2 = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001700 self.failed_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001701
1702 self.stampcache = {}
1703
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001704 for mc in rq.worker:
1705 rq.worker[mc].pipe.setrunqueueexec(self)
1706 for mc in rq.fakeworker:
1707 rq.fakeworker[mc].pipe.setrunqueueexec(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001708
1709 if self.number_tasks <= 0:
1710 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1711
1712 def runqueue_process_waitpid(self, task, status):
1713
1714 # self.build_stamps[pid] may not exist when use shared work directory.
1715 if task in self.build_stamps:
1716 self.build_stamps2.remove(self.build_stamps[task])
1717 del self.build_stamps[task]
1718
1719 if status != 0:
1720 self.task_fail(task, status)
1721 else:
1722 self.task_complete(task)
1723 return True
1724
1725 def finish_now(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001726 for mc in self.rq.worker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001727 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001728 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1729 self.rq.worker[mc].process.stdin.flush()
1730 except IOError:
1731 # worker must have died?
1732 pass
1733 for mc in self.rq.fakeworker:
1734 try:
1735 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1736 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001737 except IOError:
1738 # worker must have died?
1739 pass
1740
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001741 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001742 self.rq.state = runQueueFailed
1743 return
1744
1745 self.rq.state = runQueueComplete
1746 return
1747
1748 def finish(self):
1749 self.rq.state = runQueueCleanUp
1750
1751 if self.stats.active > 0:
1752 bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData)
1753 self.rq.read_workers()
1754 return self.rq.active_fds()
1755
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001756 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001757 self.rq.state = runQueueFailed
1758 return True
1759
1760 self.rq.state = runQueueComplete
1761 return True
1762
1763 def check_dependencies(self, task, taskdeps, setscene = False):
1764 if not self.rq.depvalidate:
1765 return False
1766
1767 taskdata = {}
1768 taskdeps.add(task)
1769 for dep in taskdeps:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001770 (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
1771 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001772 taskdata[dep] = [pn, taskname, fn]
1773 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001774 locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001775 valid = bb.utils.better_eval(call, locs)
1776 return valid
1777
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001778 def can_start_task(self):
1779 can_start = self.stats.active < self.number_tasks
1780 return can_start
1781
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001782class RunQueueExecuteDummy(RunQueueExecute):
1783 def __init__(self, rq):
1784 self.rq = rq
1785 self.stats = RunQueueStats(0)
1786
1787 def finish(self):
1788 self.rq.state = runQueueComplete
1789 return
1790
1791class RunQueueExecuteTasks(RunQueueExecute):
1792 def __init__(self, rq):
1793 RunQueueExecute.__init__(self, rq)
1794
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001795 self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001796
1797 self.stampcache = {}
1798
1799 initial_covered = self.rq.scenequeue_covered.copy()
1800
1801 # Mark initial buildable tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001802 for tid in self.rqdata.runtaskentries:
1803 if len(self.rqdata.runtaskentries[tid].depends) == 0:
1804 self.runq_buildable.add(tid)
1805 if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
1806 self.rq.scenequeue_covered.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001807
1808 found = True
1809 while found:
1810 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001811 for tid in self.rqdata.runtaskentries:
1812 if tid in self.rq.scenequeue_covered:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001813 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001814 logger.debug(1, 'Considering %s: %s' % (tid, str(self.rqdata.runtaskentries[tid].revdeps)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001815
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001816 if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
1817 if tid in self.rq.scenequeue_notcovered:
1818 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001819 found = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001820 self.rq.scenequeue_covered.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001821
1822 logger.debug(1, 'Skip list (pre setsceneverify) %s', sorted(self.rq.scenequeue_covered))
1823
1824 # Allow the metadata to elect for setscene tasks to run anyway
1825 covered_remove = set()
1826 if self.rq.setsceneverify:
1827 invalidtasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001828 tasknames = {}
1829 fns = {}
1830 for tid in self.rqdata.runtaskentries:
1831 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1832 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
1833 fns[tid] = taskfn
1834 tasknames[tid] = taskname
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001835 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1836 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001837 if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
1838 logger.debug(2, 'Setscene stamp current for task %s', tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001839 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001840 if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
1841 logger.debug(2, 'Normal stamp current for task %s', tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001842 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001843 invalidtasks.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001844
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001845 call = self.rq.setsceneverify + "(covered, tasknames, fns, d, invalidtasks=invalidtasks)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001846 locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : tasknames, "fns" : fns, "d" : self.cooker.data, "invalidtasks" : invalidtasks }
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001847 covered_remove = bb.utils.better_eval(call, locs)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001848
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001849 def removecoveredtask(tid):
1850 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1851 taskname = taskname + '_setscene'
1852 bb.build.del_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
1853 self.rq.scenequeue_covered.remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001854
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001855 toremove = covered_remove | self.rq.scenequeue_notcovered
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001856 for task in toremove:
1857 logger.debug(1, 'Not skipping task %s due to setsceneverify', task)
1858 while toremove:
1859 covered_remove = []
1860 for task in toremove:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001861 if task in self.rq.scenequeue_covered:
1862 removecoveredtask(task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001863 for deptask in self.rqdata.runtaskentries[task].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001864 if deptask not in self.rq.scenequeue_covered:
1865 continue
1866 if deptask in toremove or deptask in covered_remove or deptask in initial_covered:
1867 continue
1868 logger.debug(1, 'Task %s depends on task %s so not skipping' % (task, deptask))
1869 covered_remove.append(deptask)
1870 toremove = covered_remove
1871
1872 logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered)
1873
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001874
1875 for mc in self.rqdata.dataCaches:
1876 target_pairs = []
1877 for tid in self.rqdata.target_tids:
1878 (tidmc, fn, taskname, _) = split_tid_mcfn(tid)
1879 if tidmc == mc:
1880 target_pairs.append((fn, taskname))
1881
1882 event.fire(bb.event.StampUpdate(target_pairs, self.rqdata.dataCaches[mc].stamp), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001883
1884 schedulers = self.get_schedulers()
1885 for scheduler in schedulers:
1886 if self.scheduler == scheduler.name:
1887 self.sched = scheduler(self, self.rqdata)
1888 logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
1889 break
1890 else:
1891 bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
1892 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1893
1894 def get_schedulers(self):
1895 schedulers = set(obj for obj in globals().values()
1896 if type(obj) is type and
1897 issubclass(obj, RunQueueScheduler))
1898
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001899 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001900 if user_schedulers:
1901 for sched in user_schedulers.split():
1902 if not "." in sched:
1903 bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
1904 continue
1905
1906 modname, name = sched.rsplit(".", 1)
1907 try:
1908 module = __import__(modname, fromlist=(name,))
1909 except ImportError as exc:
1910 logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
1911 raise SystemExit(1)
1912 else:
1913 schedulers.add(getattr(module, name))
1914 return schedulers
1915
1916 def setbuildable(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001917 self.runq_buildable.add(task)
Brad Bishop316dfdd2018-06-25 12:45:53 -04001918 self.sched.newbuildable(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001919
1920 def task_completeoutright(self, task):
1921 """
1922 Mark a task as completed
1923 Look at the reverse dependencies and mark any task with
1924 completed dependencies as buildable
1925 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001926 self.runq_complete.add(task)
1927 for revdep in self.rqdata.runtaskentries[task].revdeps:
1928 if revdep in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001929 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001930 if revdep in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001931 continue
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001932 alldeps = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001933 for dep in self.rqdata.runtaskentries[revdep].depends:
1934 if dep not in self.runq_complete:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001935 alldeps = False
1936 break
1937 if alldeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001938 self.setbuildable(revdep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001939 logger.debug(1, "Marking task %s as buildable", revdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001940
1941 def task_complete(self, task):
1942 self.stats.taskCompleted()
1943 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1944 self.task_completeoutright(task)
1945
1946 def task_fail(self, task, exitcode):
1947 """
1948 Called when a task has failed
1949 Updates the state engine with the failure
1950 """
1951 self.stats.taskFailed()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001952 self.failed_tids.append(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001953 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001954 if self.rqdata.taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001955 self.rq.state = runQueueCleanUp
1956
1957 def task_skip(self, task, reason):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001958 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001959 self.setbuildable(task)
1960 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
1961 self.task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001962 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001963 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001964
1965 def execute(self):
1966 """
1967 Run the tasks in a queue prepared by rqdata.prepare()
1968 """
1969
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001970 if self.rqdata.setscenewhitelist is not None and not self.rqdata.setscenewhitelist_checked:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001971 self.rqdata.setscenewhitelist_checked = True
1972
1973 # Check tasks that are going to run against the whitelist
1974 def check_norun_task(tid, showerror=False):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001975 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001976 # Ignore covered tasks
1977 if tid in self.rq.scenequeue_covered:
1978 return False
1979 # Ignore stamped tasks
1980 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
1981 return False
1982 # Ignore noexec tasks
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001983 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001984 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1985 return False
1986
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001987 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001988 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
1989 if showerror:
1990 if tid in self.rqdata.runq_setscene_tids:
1991 logger.error('Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname))
1992 else:
1993 logger.error('Task %s.%s attempted to execute unexpectedly' % (pn, taskname))
1994 return True
1995 return False
1996 # Look to see if any tasks that we think shouldn't run are going to
1997 unexpected = False
1998 for tid in self.rqdata.runtaskentries:
1999 if check_norun_task(tid):
2000 unexpected = True
2001 break
2002 if unexpected:
2003 # Run through the tasks in the rough order they'd have executed and print errors
2004 # (since the order can be useful - usually missing sstate for the last few tasks
2005 # is the cause of the problem)
2006 task = self.sched.next()
2007 while task is not None:
2008 check_norun_task(task, showerror=True)
2009 self.task_skip(task, 'Setscene enforcement check')
2010 task = self.sched.next()
2011
2012 self.rq.state = runQueueCleanUp
2013 return True
2014
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002015 self.rq.read_workers()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002016
2017 if self.stats.total == 0:
2018 # nothing to do
2019 self.rq.state = runQueueCleanUp
2020
2021 task = self.sched.next()
2022 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002023 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002024
2025 if task in self.rq.scenequeue_covered:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002026 logger.debug(2, "Setscene covered task %s", task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002027 self.task_skip(task, "covered")
2028 return True
2029
2030 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002031 logger.debug(2, "Stamp current task %s", task)
2032
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002033 self.task_skip(task, "existing")
2034 return True
2035
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002036 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002037 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2038 startevent = runQueueTaskStarted(task, self.stats, self.rq,
2039 noexec=True)
2040 bb.event.fire(startevent, self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002041 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002042 self.stats.taskActive()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002043 if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002044 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002045 self.task_complete(task)
2046 return True
2047 else:
2048 startevent = runQueueTaskStarted(task, self.stats, self.rq)
2049 bb.event.fire(startevent, self.cfgData)
2050
2051 taskdepdata = self.build_taskdepdata(task)
2052
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002053 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop19323692019-04-05 15:28:33 -04002054 taskhash = self.rqdata.get_task_hash(task)
2055 unihash = self.rqdata.get_task_unihash(task)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002056 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002057 if not mc in self.rq.fakeworker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002058 try:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002059 self.rq.start_fakeworker(self, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002060 except OSError as exc:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002061 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002062 self.rq.state = runQueueFailed
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002063 self.stats.taskFailed()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002064 return True
Brad Bishop19323692019-04-05 15:28:33 -04002065 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002066 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002067 else:
Brad Bishop19323692019-04-05 15:28:33 -04002068 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002069 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002070
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002071 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2072 self.build_stamps2.append(self.build_stamps[task])
2073 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002074 self.stats.taskActive()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002075 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002076 return True
2077
2078 if self.stats.active > 0:
2079 self.rq.read_workers()
2080 return self.rq.active_fds()
2081
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002082 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002083 self.rq.state = runQueueFailed
2084 return True
2085
2086 # Sanity Checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002087 for task in self.rqdata.runtaskentries:
2088 if task not in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002089 logger.error("Task %s never buildable!", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002090 if task not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002091 logger.error("Task %s never ran!", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002092 if task not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002093 logger.error("Task %s never completed!", task)
2094 self.rq.state = runQueueComplete
2095
2096 return True
2097
Andrew Geissler99467da2019-02-25 18:54:23 -06002098 def filtermcdeps(self, task, deps):
2099 ret = set()
2100 mainmc = mc_from_tid(task)
2101 for dep in deps:
2102 mc = mc_from_tid(dep)
2103 if mc != mainmc:
2104 continue
2105 ret.add(dep)
2106 return ret
2107
2108 # We filter out multiconfig dependencies from taskdepdata we pass to the tasks
2109 # as most code can't handle them
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002110 def build_taskdepdata(self, task):
2111 taskdepdata = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002112 next = self.rqdata.runtaskentries[task].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002113 next.add(task)
Andrew Geissler99467da2019-02-25 18:54:23 -06002114 next = self.filtermcdeps(task, next)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002115 while next:
2116 additional = []
2117 for revdep in next:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002118 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2119 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2120 deps = self.rqdata.runtaskentries[revdep].depends
2121 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002122 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002123 unihash = self.rqdata.runtaskentries[revdep].unihash
Andrew Geissler99467da2019-02-25 18:54:23 -06002124 deps = self.filtermcdeps(task, deps)
Brad Bishop19323692019-04-05 15:28:33 -04002125 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002126 for revdep2 in deps:
2127 if revdep2 not in taskdepdata:
2128 additional.append(revdep2)
2129 next = additional
2130
2131 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2132 return taskdepdata
2133
2134class RunQueueExecuteScenequeue(RunQueueExecute):
2135 def __init__(self, rq):
2136 RunQueueExecute.__init__(self, rq)
2137
2138 self.scenequeue_covered = set()
2139 self.scenequeue_notcovered = set()
2140 self.scenequeue_notneeded = set()
2141
2142 # If we don't have any setscene functions, skip this step
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002143 if len(self.rqdata.runq_setscene_tids) == 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002144 rq.scenequeue_covered = set()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002145 rq.scenequeue_notcovered = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002146 rq.state = runQueueRunInit
2147 return
2148
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002149 self.stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002150
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002151 sq_revdeps = {}
2152 sq_revdeps_new = {}
2153 sq_revdeps_squash = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002154 self.sq_harddeps = {}
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002155 self.stamps = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002156
2157 # We need to construct a dependency graph for the setscene functions. Intermediate
2158 # dependencies between the setscene tasks only complicate the code. This code
2159 # therefore aims to collapse the huge runqueue dependency tree into a smaller one
2160 # only containing the setscene functions.
2161
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002162 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002163
2164 # First process the chains up to the first setscene task.
2165 endpoints = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002166 for tid in self.rqdata.runtaskentries:
2167 sq_revdeps[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
2168 sq_revdeps_new[tid] = set()
2169 if (len(sq_revdeps[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
2170 #bb.warn("Added endpoint %s" % (tid))
2171 endpoints[tid] = set()
2172
2173 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002174
2175 # Secondly process the chains between setscene tasks.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002176 for tid in self.rqdata.runq_setscene_tids:
2177 #bb.warn("Added endpoint 2 %s" % (tid))
2178 for dep in self.rqdata.runtaskentries[tid].depends:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002179 if tid in sq_revdeps[dep]:
2180 sq_revdeps[dep].remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002181 if dep not in endpoints:
2182 endpoints[dep] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002183 #bb.warn(" Added endpoint 3 %s" % (dep))
2184 endpoints[dep].add(tid)
2185
2186 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002187
2188 def process_endpoints(endpoints):
2189 newendpoints = {}
2190 for point, task in endpoints.items():
2191 tasks = set()
2192 if task:
2193 tasks |= task
2194 if sq_revdeps_new[point]:
2195 tasks |= sq_revdeps_new[point]
2196 sq_revdeps_new[point] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002197 if point in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002198 sq_revdeps_new[point] = tasks
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05002199 tasks = set()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002200 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002201 for dep in self.rqdata.runtaskentries[point].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002202 if point in sq_revdeps[dep]:
2203 sq_revdeps[dep].remove(point)
2204 if tasks:
2205 sq_revdeps_new[dep] |= tasks
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002206 if len(sq_revdeps[dep]) == 0 and dep not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002207 newendpoints[dep] = task
2208 if len(newendpoints) != 0:
2209 process_endpoints(newendpoints)
2210
2211 process_endpoints(endpoints)
2212
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002213 self.rqdata.init_progress_reporter.next_stage()
2214
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002215 # Build a list of setscene tasks which are "unskippable"
2216 # These are direct endpoints referenced by the build
2217 endpoints2 = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002218 sq_revdeps2 = {}
2219 sq_revdeps_new2 = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002220 def process_endpoints2(endpoints):
2221 newendpoints = {}
2222 for point, task in endpoints.items():
2223 tasks = set([point])
2224 if task:
2225 tasks |= task
2226 if sq_revdeps_new2[point]:
2227 tasks |= sq_revdeps_new2[point]
2228 sq_revdeps_new2[point] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002229 if point in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002230 sq_revdeps_new2[point] = tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002231 for dep in self.rqdata.runtaskentries[point].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002232 if point in sq_revdeps2[dep]:
2233 sq_revdeps2[dep].remove(point)
2234 if tasks:
2235 sq_revdeps_new2[dep] |= tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002236 if (len(sq_revdeps2[dep]) == 0 or len(sq_revdeps_new2[dep]) != 0) and dep not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002237 newendpoints[dep] = tasks
2238 if len(newendpoints) != 0:
2239 process_endpoints2(newendpoints)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002240 for tid in self.rqdata.runtaskentries:
2241 sq_revdeps2[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
2242 sq_revdeps_new2[tid] = set()
2243 if (len(sq_revdeps2[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
2244 endpoints2[tid] = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002245 process_endpoints2(endpoints2)
2246 self.unskippable = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002247 for tid in self.rqdata.runq_setscene_tids:
2248 if sq_revdeps_new2[tid]:
2249 self.unskippable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002250
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002251 self.rqdata.init_progress_reporter.next_stage(len(self.rqdata.runtaskentries))
2252
2253 for taskcounter, tid in enumerate(self.rqdata.runtaskentries):
2254 if tid in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002255 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002256 for dep in sq_revdeps_new[tid]:
2257 deps.add(dep)
2258 sq_revdeps_squash[tid] = deps
2259 elif len(sq_revdeps_new[tid]) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002260 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002261 self.rqdata.init_progress_reporter.update(taskcounter)
2262
2263 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002264
2265 # Resolve setscene inter-task dependencies
2266 # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
2267 # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002268 for tid in self.rqdata.runq_setscene_tids:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002269 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2270 realtid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002271 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002272 self.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002273 for (depname, idependtask) in idepends:
2274
2275 if depname not in self.rqdata.taskData[mc].build_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002276 continue
2277
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002278 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2279 if depfn is None:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002280 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002281 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2282 if deptid not in self.rqdata.runtaskentries:
2283 bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002284
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002285 if not deptid in self.sq_harddeps:
2286 self.sq_harddeps[deptid] = set()
2287 self.sq_harddeps[deptid].add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002288
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002289 sq_revdeps_squash[tid].add(deptid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002290 # Have to zero this to avoid circular dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002291 sq_revdeps_squash[deptid] = set()
2292
2293 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002294
2295 for task in self.sq_harddeps:
2296 for dep in self.sq_harddeps[task]:
2297 sq_revdeps_squash[dep].add(task)
2298
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002299 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002300
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002301 #for tid in sq_revdeps_squash:
2302 # for dep in sq_revdeps_squash[tid]:
2303 # data = data + "\n %s" % dep
2304 # bb.warn("Task %s_setscene: is %s " % (tid, data
2305
2306 self.sq_deps = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002307 self.sq_revdeps = sq_revdeps_squash
2308 self.sq_revdeps2 = copy.deepcopy(self.sq_revdeps)
2309
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002310 for tid in self.sq_revdeps:
2311 self.sq_deps[tid] = set()
2312 for tid in self.sq_revdeps:
2313 for dep in self.sq_revdeps[tid]:
2314 self.sq_deps[dep].add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002315
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002316 self.rqdata.init_progress_reporter.next_stage()
2317
2318 for tid in self.sq_revdeps:
2319 if len(self.sq_revdeps[tid]) == 0:
2320 self.runq_buildable.add(tid)
2321
2322 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002323
2324 self.outrightfail = []
2325 if self.rq.hashvalidate:
2326 sq_hash = []
2327 sq_hashfn = []
Brad Bishop19323692019-04-05 15:28:33 -04002328 sq_unihash = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002329 sq_fn = []
2330 sq_taskname = []
2331 sq_task = []
2332 noexec = []
2333 stamppresent = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002334 for tid in self.sq_revdeps:
2335 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2336
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002337 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002338
2339 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002340 noexec.append(tid)
2341 self.task_skip(tid)
2342 bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002343 continue
2344
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002345 if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
2346 logger.debug(2, 'Setscene stamp current for task %s', tid)
2347 stamppresent.append(tid)
2348 self.task_skip(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002349 continue
2350
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002351 if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
2352 logger.debug(2, 'Normal stamp current for task %s', tid)
2353 stamppresent.append(tid)
2354 self.task_skip(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002355 continue
2356
2357 sq_fn.append(fn)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002358 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002359 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
Brad Bishop19323692019-04-05 15:28:33 -04002360 sq_unihash.append(self.rqdata.runtaskentries[tid].unihash)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002361 sq_taskname.append(taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002362 sq_task.append(tid)
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002363
2364 self.cooker.data.setVar("BB_SETSCENE_STAMPCURRENT_COUNT", len(stamppresent))
2365
Brad Bishop19323692019-04-05 15:28:33 -04002366 valid = self.rq.validate_hash(sq_fn=sq_fn, sq_task=sq_taskname, sq_hash=sq_hash, sq_hashfn=sq_hashfn,
2367 siginfo=False, sq_unihash=sq_unihash, d=self.cooker.data)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002368
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002369 self.cooker.data.delVar("BB_SETSCENE_STAMPCURRENT_COUNT")
2370
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002371 valid_new = stamppresent
2372 for v in valid:
2373 valid_new.append(sq_task[v])
2374
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002375 for tid in self.sq_revdeps:
2376 if tid not in valid_new and tid not in noexec:
2377 logger.debug(2, 'No package found, so skipping setscene task %s', tid)
2378 self.outrightfail.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002379
2380 logger.info('Executing SetScene Tasks')
2381
2382 self.rq.state = runQueueSceneRun
2383
2384 def scenequeue_updatecounters(self, task, fail = False):
2385 for dep in self.sq_deps[task]:
2386 if fail and task in self.sq_harddeps and dep in self.sq_harddeps[task]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002387 logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002388 self.scenequeue_updatecounters(dep, fail)
2389 continue
2390 if task not in self.sq_revdeps2[dep]:
2391 # May already have been removed by the fail case above
2392 continue
2393 self.sq_revdeps2[dep].remove(task)
2394 if len(self.sq_revdeps2[dep]) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002395 self.runq_buildable.add(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002396
2397 def task_completeoutright(self, task):
2398 """
2399 Mark a task as completed
2400 Look at the reverse dependencies and mark any task with
2401 completed dependencies as buildable
2402 """
2403
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002404 logger.debug(1, 'Found task %s which could be accelerated', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002405 self.scenequeue_covered.add(task)
2406 self.scenequeue_updatecounters(task)
2407
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002408 def check_taskfail(self, task):
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002409 if self.rqdata.setscenewhitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002410 realtask = task.split('_setscene')[0]
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002411 (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
2412 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002413 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2414 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2415 self.rq.state = runQueueCleanUp
2416
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002417 def task_complete(self, task):
2418 self.stats.taskCompleted()
2419 bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
2420 self.task_completeoutright(task)
2421
2422 def task_fail(self, task, result):
2423 self.stats.taskFailed()
2424 bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
2425 self.scenequeue_notcovered.add(task)
2426 self.scenequeue_updatecounters(task, True)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002427 self.check_taskfail(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002428
2429 def task_failoutright(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002430 self.runq_running.add(task)
2431 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002432 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002433 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002434 self.scenequeue_notcovered.add(task)
2435 self.scenequeue_updatecounters(task, True)
2436
2437 def task_skip(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002438 self.runq_running.add(task)
2439 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002440 self.task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002441 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002442 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002443
2444 def execute(self):
2445 """
2446 Run the tasks in a queue prepared by prepare_runqueue
2447 """
2448
2449 self.rq.read_workers()
2450
2451 task = None
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002452 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002453 # Find the next setscene to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002454 for nexttask in self.rqdata.runq_setscene_tids:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002455 if nexttask in self.runq_buildable and nexttask not in self.runq_running and self.stamps[nexttask] not in self.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002456 if nexttask in self.unskippable:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002457 logger.debug(2, "Setscene task %s is unskippable" % nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002458 if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002459 fn = fn_from_tid(nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002460 foundtarget = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002461
2462 if nexttask in self.rqdata.target_tids:
2463 foundtarget = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002464 if not foundtarget:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002465 logger.debug(2, "Skipping setscene for task %s" % nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002466 self.task_skip(nexttask)
2467 self.scenequeue_notneeded.add(nexttask)
2468 return True
2469 if nexttask in self.outrightfail:
2470 self.task_failoutright(nexttask)
2471 return True
2472 task = nexttask
2473 break
2474 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002475 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
2476 taskname = taskname + "_setscene"
2477 if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
2478 logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002479 self.task_failoutright(task)
2480 return True
2481
2482 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002483 if task in self.rqdata.target_tids:
2484 self.task_failoutright(task)
2485 return True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002486
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002487 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
2488 logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002489 self.task_skip(task)
2490 return True
2491
2492 startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
2493 bb.event.fire(startevent, self.cfgData)
2494
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002495 taskdepdata = self.build_taskdepdata(task)
2496
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002497 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop19323692019-04-05 15:28:33 -04002498 taskhash = self.rqdata.get_task_hash(task)
2499 unihash = self.rqdata.get_task_unihash(task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002500 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002501 if not mc in self.rq.fakeworker:
2502 self.rq.start_fakeworker(self, mc)
Brad Bishop19323692019-04-05 15:28:33 -04002503 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002504 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002505 else:
Brad Bishop19323692019-04-05 15:28:33 -04002506 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002507 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002508
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002509 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2510 self.build_stamps2.append(self.build_stamps[task])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002511 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002512 self.stats.taskActive()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002513 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002514 return True
2515
2516 if self.stats.active > 0:
2517 self.rq.read_workers()
2518 return self.rq.active_fds()
2519
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002520 #for tid in self.sq_revdeps:
2521 # if tid not in self.runq_running:
2522 # buildable = tid in self.runq_buildable
2523 # revdeps = self.sq_revdeps[tid]
2524 # bb.warn("Found we didn't run %s %s %s" % (tid, buildable, str(revdeps)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002525
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002526 self.rq.scenequeue_covered = self.scenequeue_covered
2527 self.rq.scenequeue_notcovered = self.scenequeue_notcovered
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002528
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002529 logger.debug(1, 'We can skip tasks %s', "\n".join(sorted(self.rq.scenequeue_covered)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002530
2531 self.rq.state = runQueueRunInit
2532
2533 completeevent = sceneQueueComplete(self.stats, self.rq)
2534 bb.event.fire(completeevent, self.cfgData)
2535
2536 return True
2537
2538 def runqueue_process_waitpid(self, task, status):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002539 RunQueueExecute.runqueue_process_waitpid(self, task, status)
2540
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002541
2542 def build_taskdepdata(self, task):
2543 def getsetscenedeps(tid):
2544 deps = set()
2545 (mc, fn, taskname, _) = split_tid_mcfn(tid)
2546 realtid = tid + "_setscene"
2547 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
2548 for (depname, idependtask) in idepends:
2549 if depname not in self.rqdata.taskData[mc].build_targets:
2550 continue
2551
2552 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2553 if depfn is None:
2554 continue
2555 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2556 deps.add(deptid)
2557 return deps
2558
2559 taskdepdata = {}
2560 next = getsetscenedeps(task)
2561 next.add(task)
2562 while next:
2563 additional = []
2564 for revdep in next:
2565 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2566 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2567 deps = getsetscenedeps(revdep)
2568 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2569 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002570 unihash = self.rqdata.runtaskentries[revdep].unihash
2571 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002572 for revdep2 in deps:
2573 if revdep2 not in taskdepdata:
2574 additional.append(revdep2)
2575 next = additional
2576
2577 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2578 return taskdepdata
2579
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002580class TaskFailure(Exception):
2581 """
2582 Exception raised when a task in a runqueue fails
2583 """
2584 def __init__(self, x):
2585 self.args = x
2586
2587
2588class runQueueExitWait(bb.event.Event):
2589 """
2590 Event when waiting for task processes to exit
2591 """
2592
2593 def __init__(self, remain):
2594 self.remain = remain
2595 self.message = "Waiting for %s active tasks to finish" % remain
2596 bb.event.Event.__init__(self)
2597
2598class runQueueEvent(bb.event.Event):
2599 """
2600 Base runQueue event class
2601 """
2602 def __init__(self, task, stats, rq):
2603 self.taskid = task
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002604 self.taskstring = task
2605 self.taskname = taskname_from_tid(task)
2606 self.taskfile = fn_from_tid(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002607 self.taskhash = rq.rqdata.get_task_hash(task)
2608 self.stats = stats.copy()
2609 bb.event.Event.__init__(self)
2610
2611class sceneQueueEvent(runQueueEvent):
2612 """
2613 Base sceneQueue event class
2614 """
2615 def __init__(self, task, stats, rq, noexec=False):
2616 runQueueEvent.__init__(self, task, stats, rq)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002617 self.taskstring = task + "_setscene"
2618 self.taskname = taskname_from_tid(task) + "_setscene"
2619 self.taskfile = fn_from_tid(task)
2620 self.taskhash = rq.rqdata.get_task_hash(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002621
2622class runQueueTaskStarted(runQueueEvent):
2623 """
2624 Event notifying a task was started
2625 """
2626 def __init__(self, task, stats, rq, noexec=False):
2627 runQueueEvent.__init__(self, task, stats, rq)
2628 self.noexec = noexec
2629
2630class sceneQueueTaskStarted(sceneQueueEvent):
2631 """
2632 Event notifying a setscene task was started
2633 """
2634 def __init__(self, task, stats, rq, noexec=False):
2635 sceneQueueEvent.__init__(self, task, stats, rq)
2636 self.noexec = noexec
2637
2638class runQueueTaskFailed(runQueueEvent):
2639 """
2640 Event notifying a task failed
2641 """
2642 def __init__(self, task, stats, exitcode, rq):
2643 runQueueEvent.__init__(self, task, stats, rq)
2644 self.exitcode = exitcode
2645
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002646 def __str__(self):
2647 return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
2648
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002649class sceneQueueTaskFailed(sceneQueueEvent):
2650 """
2651 Event notifying a setscene task failed
2652 """
2653 def __init__(self, task, stats, exitcode, rq):
2654 sceneQueueEvent.__init__(self, task, stats, rq)
2655 self.exitcode = exitcode
2656
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002657 def __str__(self):
2658 return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
2659
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002660class sceneQueueComplete(sceneQueueEvent):
2661 """
2662 Event when all the sceneQueue tasks are complete
2663 """
2664 def __init__(self, stats, rq):
2665 self.stats = stats.copy()
2666 bb.event.Event.__init__(self)
2667
2668class runQueueTaskCompleted(runQueueEvent):
2669 """
2670 Event notifying a task completed
2671 """
2672
2673class sceneQueueTaskCompleted(sceneQueueEvent):
2674 """
2675 Event notifying a setscene task completed
2676 """
2677
2678class runQueueTaskSkipped(runQueueEvent):
2679 """
2680 Event notifying a task was skipped
2681 """
2682 def __init__(self, task, stats, rq, reason):
2683 runQueueEvent.__init__(self, task, stats, rq)
2684 self.reason = reason
2685
2686class runQueuePipe():
2687 """
2688 Abstraction for a pipe between a worker thread and the server
2689 """
2690 def __init__(self, pipein, pipeout, d, rq, rqexec):
2691 self.input = pipein
2692 if pipeout:
2693 pipeout.close()
2694 bb.utils.nonblockingfd(self.input)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002695 self.queue = b""
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002696 self.d = d
2697 self.rq = rq
2698 self.rqexec = rqexec
2699
2700 def setrunqueueexec(self, rqexec):
2701 self.rqexec = rqexec
2702
2703 def read(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002704 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
2705 for worker in workers.values():
2706 worker.process.poll()
2707 if worker.process.returncode is not None and not self.rq.teardown:
2708 bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
2709 self.rq.finish_runqueue(True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002710
2711 start = len(self.queue)
2712 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002713 self.queue = self.queue + (self.input.read(102400) or b"")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002714 except (OSError, IOError) as e:
2715 if e.errno != errno.EAGAIN:
2716 raise
2717 end = len(self.queue)
2718 found = True
2719 while found and len(self.queue):
2720 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002721 index = self.queue.find(b"</event>")
2722 while index != -1 and self.queue.startswith(b"<event>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002723 try:
2724 event = pickle.loads(self.queue[7:index])
2725 except ValueError as e:
2726 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
2727 bb.event.fire_from_worker(event, self.d)
2728 found = True
2729 self.queue = self.queue[index+8:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002730 index = self.queue.find(b"</event>")
2731 index = self.queue.find(b"</exitcode>")
2732 while index != -1 and self.queue.startswith(b"<exitcode>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002733 try:
2734 task, status = pickle.loads(self.queue[10:index])
2735 except ValueError as e:
2736 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
2737 self.rqexec.runqueue_process_waitpid(task, status)
2738 found = True
2739 self.queue = self.queue[index+11:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002740 index = self.queue.find(b"</exitcode>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002741 return (end > start)
2742
2743 def close(self):
2744 while self.read():
2745 continue
2746 if len(self.queue) > 0:
2747 print("Warning, worker left partial message: %s" % self.queue)
2748 self.input.close()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002749
2750def get_setscene_enforce_whitelist(d):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002751 if d.getVar('BB_SETSCENE_ENFORCE') != '1':
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002752 return None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002753 whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002754 outlist = []
2755 for item in whitelist[:]:
2756 if item.startswith('%:'):
2757 for target in sys.argv[1:]:
2758 if not target.startswith('-'):
2759 outlist.append(target.split(':')[0] + ':' + item.split(':')[1])
2760 else:
2761 outlist.append(item)
2762 return outlist
2763
2764def check_setscene_enforce_whitelist(pn, taskname, whitelist):
2765 import fnmatch
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002766 if whitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002767 item = '%s:%s' % (pn, taskname)
2768 for whitelist_item in whitelist:
2769 if fnmatch.fnmatch(item, whitelist_item):
2770 return True
2771 return False
2772 return True