blob: 480a851ef911263cfb04184c9e75691a598b72d4 [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001#!/usr/bin/env python
2# ex:ts=4:sw=4:sts=4:et
3# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
4"""
5BitBake 'RunQueue' implementation
6
7Handles preparation and execution of a queue of tasks
8"""
9
10# Copyright (C) 2006-2007 Richard Purdie
11#
12# This program is free software; you can redistribute it and/or modify
13# it under the terms of the GNU General Public License version 2 as
14# published by the Free Software Foundation.
15#
16# This program is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19# GNU General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License along
22# with this program; if not, write to the Free Software Foundation, Inc.,
23# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24
25import copy
26import os
27import sys
28import signal
29import stat
30import fcntl
31import errno
32import logging
33import re
34import bb
35from bb import msg, data, event
36from bb import monitordisk
37import subprocess
Patrick Williamsc0f7c042017-02-23 20:41:17 -060038import pickle
Brad Bishop6e60e8b2018-02-01 10:27:11 -050039from multiprocessing import Process
Patrick Williamsc124f4f2015-09-15 14:41:29 -050040
41bblogger = logging.getLogger("BitBake")
42logger = logging.getLogger("BitBake.RunQueue")
43
44__find_md5__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{32}(?![a-z0-9])' )
45
Patrick Williamsc0f7c042017-02-23 20:41:17 -060046def fn_from_tid(tid):
47 return tid.rsplit(":", 1)[0]
48
49def taskname_from_tid(tid):
50 return tid.rsplit(":", 1)[1]
51
52def split_tid(tid):
53 (mc, fn, taskname, _) = split_tid_mcfn(tid)
54 return (mc, fn, taskname)
55
56def split_tid_mcfn(tid):
57 if tid.startswith('multiconfig:'):
58 elems = tid.split(':')
59 mc = elems[1]
60 fn = ":".join(elems[2:-1])
61 taskname = elems[-1]
62 mcfn = "multiconfig:" + mc + ":" + fn
63 else:
64 tid = tid.rsplit(":", 1)
65 mc = ""
66 fn = tid[0]
67 taskname = tid[1]
68 mcfn = fn
69
70 return (mc, fn, taskname, mcfn)
71
72def build_tid(mc, fn, taskname):
73 if mc:
74 return "multiconfig:" + mc + ":" + fn + ":" + taskname
75 return fn + ":" + taskname
76
Patrick Williamsc124f4f2015-09-15 14:41:29 -050077class RunQueueStats:
78 """
79 Holds statistics on the tasks handled by the associated runQueue
80 """
81 def __init__(self, total):
82 self.completed = 0
83 self.skipped = 0
84 self.failed = 0
85 self.active = 0
86 self.total = total
87
88 def copy(self):
89 obj = self.__class__(self.total)
90 obj.__dict__.update(self.__dict__)
91 return obj
92
93 def taskFailed(self):
94 self.active = self.active - 1
95 self.failed = self.failed + 1
96
97 def taskCompleted(self, number = 1):
98 self.active = self.active - number
99 self.completed = self.completed + number
100
101 def taskSkipped(self, number = 1):
102 self.active = self.active + number
103 self.skipped = self.skipped + number
104
105 def taskActive(self):
106 self.active = self.active + 1
107
108# These values indicate the next step due to be run in the
109# runQueue state machine
110runQueuePrepare = 2
111runQueueSceneInit = 3
112runQueueSceneRun = 4
113runQueueRunInit = 5
114runQueueRunning = 6
115runQueueFailed = 7
116runQueueCleanUp = 8
117runQueueComplete = 9
118
119class RunQueueScheduler(object):
120 """
121 Control the order tasks are scheduled in.
122 """
123 name = "basic"
124
125 def __init__(self, runqueue, rqdata):
126 """
127 The default scheduler just returns the first buildable task (the
128 priority map is sorted by task number)
129 """
130 self.rq = runqueue
131 self.rqdata = rqdata
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600132 self.numTasks = len(self.rqdata.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500133
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600134 self.prio_map = [self.rqdata.runtaskentries.keys()]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500135
136 self.buildable = []
137 self.stamps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600138 for tid in self.rqdata.runtaskentries:
139 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
140 self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
141 if tid in self.rq.runq_buildable:
142 self.buildable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500143
144 self.rev_prio_map = None
145
146 def next_buildable_task(self):
147 """
148 Return the id of the first task we find that is buildable
149 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600150 self.buildable = [x for x in self.buildable if x not in self.rq.runq_running]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500151 if not self.buildable:
152 return None
153 if len(self.buildable) == 1:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600154 tid = self.buildable[0]
155 stamp = self.stamps[tid]
156 if stamp not in self.rq.build_stamps.values():
157 return tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500158
159 if not self.rev_prio_map:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600160 self.rev_prio_map = {}
161 for tid in self.rqdata.runtaskentries:
162 self.rev_prio_map[tid] = self.prio_map.index(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500163
164 best = None
165 bestprio = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600166 for tid in self.buildable:
167 prio = self.rev_prio_map[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500168 if bestprio is None or bestprio > prio:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600169 stamp = self.stamps[tid]
170 if stamp in self.rq.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500171 continue
172 bestprio = prio
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600173 best = tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500174
175 return best
176
177 def next(self):
178 """
179 Return the id of the task we should build next
180 """
181 if self.rq.stats.active < self.rq.number_tasks:
182 return self.next_buildable_task()
183
Brad Bishop316dfdd2018-06-25 12:45:53 -0400184 def newbuildable(self, task):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500185 self.buildable.append(task)
186
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500187 def describe_task(self, taskid):
188 result = 'ID %s' % taskid
189 if self.rev_prio_map:
190 result = result + (' pri %d' % self.rev_prio_map[taskid])
191 return result
192
193 def dump_prio(self, comment):
194 bb.debug(3, '%s (most important first):\n%s' %
195 (comment,
196 '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
197 index, taskid in enumerate(self.prio_map)])))
198
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500199class RunQueueSchedulerSpeed(RunQueueScheduler):
200 """
201 A scheduler optimised for speed. The priority map is sorted by task weight,
202 heavier weighted tasks (tasks needed by the most other tasks) are run first.
203 """
204 name = "speed"
205
206 def __init__(self, runqueue, rqdata):
207 """
208 The priority map is sorted by task weight.
209 """
210 RunQueueScheduler.__init__(self, runqueue, rqdata)
211
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600212 weights = {}
213 for tid in self.rqdata.runtaskentries:
214 weight = self.rqdata.runtaskentries[tid].weight
215 if not weight in weights:
216 weights[weight] = []
217 weights[weight].append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500218
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600219 self.prio_map = []
220 for weight in sorted(weights):
221 for w in weights[weight]:
222 self.prio_map.append(w)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500223
224 self.prio_map.reverse()
225
226class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
227 """
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500228 A scheduler optimised to complete .bb files as quickly as possible. The
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500229 priority map is sorted by task weight, but then reordered so once a given
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500230 .bb file starts to build, it's completed as quickly as possible by
231 running all tasks related to the same .bb file one after the after.
232 This works well where disk space is at a premium and classes like OE's
233 rm_work are in force.
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500234 """
235 name = "completion"
236
237 def __init__(self, runqueue, rqdata):
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500238 super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500239
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500240 # Extract list of tasks for each recipe, with tasks sorted
241 # ascending from "must run first" (typically do_fetch) to
242 # "runs last" (do_build). The speed scheduler prioritizes
243 # tasks that must run first before the ones that run later;
244 # this is what we depend on here.
245 task_lists = {}
246 for taskid in self.prio_map:
247 fn, taskname = taskid.rsplit(':', 1)
248 task_lists.setdefault(fn, []).append(taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500249
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500250 # Now unify the different task lists. The strategy is that
251 # common tasks get skipped and new ones get inserted after the
252 # preceeding common one(s) as they are found. Because task
253 # lists should differ only by their number of tasks, but not
254 # the ordering of the common tasks, this should result in a
255 # deterministic result that is a superset of the individual
256 # task ordering.
257 all_tasks = []
258 for recipe, new_tasks in task_lists.items():
259 index = 0
260 old_task = all_tasks[index] if index < len(all_tasks) else None
261 for new_task in new_tasks:
262 if old_task == new_task:
263 # Common task, skip it. This is the fast-path which
264 # avoids a full search.
265 index += 1
266 old_task = all_tasks[index] if index < len(all_tasks) else None
267 else:
268 try:
269 index = all_tasks.index(new_task)
270 # Already present, just not at the current
271 # place. We re-synchronized by changing the
272 # index so that it matches again. Now
273 # move on to the next existing task.
274 index += 1
275 old_task = all_tasks[index] if index < len(all_tasks) else None
276 except ValueError:
277 # Not present. Insert before old_task, which
278 # remains the same (but gets shifted back).
279 all_tasks.insert(index, new_task)
280 index += 1
281 bb.debug(3, 'merged task list: %s' % all_tasks)
282
283 # Now reverse the order so that tasks that finish the work on one
284 # recipe are considered more imporant (= come first). The ordering
285 # is now so that do_build is most important.
286 all_tasks.reverse()
287
288 # Group tasks of the same kind before tasks of less important
289 # kinds at the head of the queue (because earlier = lower
290 # priority number = runs earlier), while preserving the
291 # ordering by recipe. If recipe foo is more important than
292 # bar, then the goal is to work on foo's do_populate_sysroot
293 # before bar's do_populate_sysroot and on the more important
294 # tasks of foo before any of the less important tasks in any
295 # other recipe (if those other recipes are more important than
296 # foo).
297 #
298 # All of this only applies when tasks are runable. Explicit
299 # dependencies still override this ordering by priority.
300 #
301 # Here's an example why this priority re-ordering helps with
302 # minimizing disk usage. Consider a recipe foo with a higher
303 # priority than bar where foo DEPENDS on bar. Then the
304 # implicit rule (from base.bbclass) is that foo's do_configure
305 # depends on bar's do_populate_sysroot. This ensures that
306 # bar's do_populate_sysroot gets done first. Normally the
307 # tasks from foo would continue to run once that is done, and
308 # bar only gets completed and cleaned up later. By ordering
309 # bar's task that depend on bar's do_populate_sysroot before foo's
310 # do_configure, that problem gets avoided.
311 task_index = 0
312 self.dump_prio('original priorities')
313 for task in all_tasks:
314 for index in range(task_index, self.numTasks):
315 taskid = self.prio_map[index]
316 taskname = taskid.rsplit(':', 1)[1]
317 if taskname == task:
318 del self.prio_map[index]
319 self.prio_map.insert(task_index, taskid)
320 task_index += 1
321 self.dump_prio('completion priorities')
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500322
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600323class RunTaskEntry(object):
324 def __init__(self):
325 self.depends = set()
326 self.revdeps = set()
327 self.hash = None
328 self.task = None
329 self.weight = 1
330
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500331class RunQueueData:
332 """
333 BitBake Run Queue implementation
334 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600335 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500336 self.cooker = cooker
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600337 self.dataCaches = dataCaches
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500338 self.taskData = taskData
339 self.targets = targets
340 self.rq = rq
341 self.warn_multi_bb = False
342
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500343 self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
344 self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600345 self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
346 self.setscenewhitelist_checked = False
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500347 self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600348 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500349
350 self.reset()
351
352 def reset(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600353 self.runtaskentries = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500354
355 def runq_depends_names(self, ids):
356 import re
357 ret = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600358 for id in ids:
359 nam = os.path.basename(id)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500360 nam = re.sub("_[^,]*,", ",", nam)
361 ret.extend([nam])
362 return ret
363
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600364 def get_task_hash(self, tid):
365 return self.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500366
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600367 def get_user_idstring(self, tid, task_name_suffix = ""):
368 return tid + task_name_suffix
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500369
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500370 def get_short_user_idstring(self, task, task_name_suffix = ""):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500371 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
372 pn = self.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600373 taskname = taskname_from_tid(task) + task_name_suffix
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500374 return "%s:%s" % (pn, taskname)
375
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500376 def circular_depchains_handler(self, tasks):
377 """
378 Some tasks aren't buildable, likely due to circular dependency issues.
379 Identify the circular dependencies and print them in a user readable format.
380 """
381 from copy import deepcopy
382
383 valid_chains = []
384 explored_deps = {}
385 msgs = []
386
387 def chain_reorder(chain):
388 """
389 Reorder a dependency chain so the lowest task id is first
390 """
391 lowest = 0
392 new_chain = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600393 for entry in range(len(chain)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500394 if chain[entry] < chain[lowest]:
395 lowest = entry
396 new_chain.extend(chain[lowest:])
397 new_chain.extend(chain[:lowest])
398 return new_chain
399
400 def chain_compare_equal(chain1, chain2):
401 """
402 Compare two dependency chains and see if they're the same
403 """
404 if len(chain1) != len(chain2):
405 return False
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600406 for index in range(len(chain1)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500407 if chain1[index] != chain2[index]:
408 return False
409 return True
410
411 def chain_array_contains(chain, chain_array):
412 """
413 Return True if chain_array contains chain
414 """
415 for ch in chain_array:
416 if chain_compare_equal(ch, chain):
417 return True
418 return False
419
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600420 def find_chains(tid, prev_chain):
421 prev_chain.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500422 total_deps = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600423 total_deps.extend(self.runtaskentries[tid].revdeps)
424 for revdep in self.runtaskentries[tid].revdeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500425 if revdep in prev_chain:
426 idx = prev_chain.index(revdep)
427 # To prevent duplicates, reorder the chain to start with the lowest taskid
428 # and search through an array of those we've already printed
429 chain = prev_chain[idx:]
430 new_chain = chain_reorder(chain)
431 if not chain_array_contains(new_chain, valid_chains):
432 valid_chains.append(new_chain)
433 msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
434 for dep in new_chain:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600435 msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500436 msgs.append("\n")
437 if len(valid_chains) > 10:
438 msgs.append("Aborted dependency loops search after 10 matches.\n")
439 return msgs
440 continue
441 scan = False
442 if revdep not in explored_deps:
443 scan = True
444 elif revdep in explored_deps[revdep]:
445 scan = True
446 else:
447 for dep in prev_chain:
448 if dep in explored_deps[revdep]:
449 scan = True
450 if scan:
451 find_chains(revdep, copy.deepcopy(prev_chain))
452 for dep in explored_deps[revdep]:
453 if dep not in total_deps:
454 total_deps.append(dep)
455
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600456 explored_deps[tid] = total_deps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500457
458 for task in tasks:
459 find_chains(task, [])
460
461 return msgs
462
463 def calculate_task_weights(self, endpoints):
464 """
465 Calculate a number representing the "weight" of each task. Heavier weighted tasks
466 have more dependencies and hence should be executed sooner for maximum speed.
467
468 This function also sanity checks the task list finding tasks that are not
469 possible to execute due to circular dependencies.
470 """
471
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600472 numTasks = len(self.runtaskentries)
473 weight = {}
474 deps_left = {}
475 task_done = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500476
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600477 for tid in self.runtaskentries:
478 task_done[tid] = False
479 weight[tid] = 1
480 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500481
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600482 for tid in endpoints:
483 weight[tid] = 10
484 task_done[tid] = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500485
486 while True:
487 next_points = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600488 for tid in endpoints:
489 for revdep in self.runtaskentries[tid].depends:
490 weight[revdep] = weight[revdep] + weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500491 deps_left[revdep] = deps_left[revdep] - 1
492 if deps_left[revdep] == 0:
493 next_points.append(revdep)
494 task_done[revdep] = True
495 endpoints = next_points
496 if len(next_points) == 0:
497 break
498
499 # Circular dependency sanity check
500 problem_tasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600501 for tid in self.runtaskentries:
502 if task_done[tid] is False or deps_left[tid] != 0:
503 problem_tasks.append(tid)
504 logger.debug(2, "Task %s is not buildable", tid)
505 logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
506 self.runtaskentries[tid].weight = weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500507
508 if problem_tasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600509 message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500510 message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
511 message = message + "Identifying dependency loops (this may take a short while)...\n"
512 logger.error(message)
513
514 msgs = self.circular_depchains_handler(problem_tasks)
515
516 message = "\n"
517 for msg in msgs:
518 message = message + msg
519 bb.msg.fatal("RunQueue", message)
520
521 return weight
522
523 def prepare(self):
524 """
525 Turn a set of taskData into a RunQueue and compute data needed
526 to optimise the execution order.
527 """
528
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600529 runq_build = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500530 recursivetasks = {}
531 recursiveitasks = {}
532 recursivetasksselfref = set()
533
534 taskData = self.taskData
535
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600536 found = False
537 for mc in self.taskData:
538 if len(taskData[mc].taskentries) > 0:
539 found = True
540 break
541 if not found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500542 # Nothing to do
543 return 0
544
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600545 self.init_progress_reporter.start()
546 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500547
548 # Step A - Work out a list of tasks to run
549 #
550 # Taskdata gives us a list of possible providers for every build and run
551 # target ordered by priority. It also gives information on each of those
552 # providers.
553 #
554 # To create the actual list of tasks to execute we fix the list of
555 # providers and then resolve the dependencies into task IDs. This
556 # process is repeated for each type of dependency (tdepends, deptask,
557 # rdeptast, recrdeptask, idepends).
558
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600559 def add_build_dependencies(depids, tasknames, depends, mc):
560 for depname in depids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500561 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600562 if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500563 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600564 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500565 if depdata is None:
566 continue
567 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600568 t = depdata + ":" + taskname
569 if t in taskData[mc].taskentries:
570 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500571
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600572 def add_runtime_dependencies(depids, tasknames, depends, mc):
573 for depname in depids:
574 if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500575 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600576 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500577 if depdata is None:
578 continue
579 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600580 t = depdata + ":" + taskname
581 if t in taskData[mc].taskentries:
582 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500583
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600584 for mc in taskData:
585 for tid in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500586
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600587 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
588 #runtid = build_tid(mc, fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500589
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600590 #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
591
592 depends = set()
593 task_deps = self.dataCaches[mc].task_deps[taskfn]
594
595 self.runtaskentries[tid] = RunTaskEntry()
596
597 if fn in taskData[mc].failed_fns:
598 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500599
600 # Resolve task internal dependencies
601 #
602 # e.g. addtask before X after Y
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600603 for t in taskData[mc].taskentries[tid].tdepends:
604 (_, depfn, deptaskname, _) = split_tid_mcfn(t)
605 depends.add(build_tid(mc, depfn, deptaskname))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500606
607 # Resolve 'deptask' dependencies
608 #
609 # e.g. do_sometask[deptask] = "do_someothertask"
610 # (makes sure sometask runs after someothertask of all DEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600611 if 'deptask' in task_deps and taskname in task_deps['deptask']:
612 tasknames = task_deps['deptask'][taskname].split()
613 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500614
615 # Resolve 'rdeptask' dependencies
616 #
617 # e.g. do_sometask[rdeptask] = "do_someothertask"
618 # (makes sure sometask runs after someothertask of all RDEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600619 if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
620 tasknames = task_deps['rdeptask'][taskname].split()
621 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500622
623 # Resolve inter-task dependencies
624 #
625 # e.g. do_sometask[depends] = "targetname:do_someothertask"
626 # (makes sure sometask runs after targetname's someothertask)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600627 idepends = taskData[mc].taskentries[tid].idepends
628 for (depname, idependtask) in idepends:
629 if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500630 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600631 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500632 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600633 t = depdata + ":" + idependtask
634 depends.add(t)
635 if t not in taskData[mc].taskentries:
636 bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
637 irdepends = taskData[mc].taskentries[tid].irdepends
638 for (depname, idependtask) in irdepends:
639 if depname in taskData[mc].run_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500640 # Won't be in run_targets if ASSUME_PROVIDED
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500641 if not taskData[mc].run_targets[depname]:
642 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600643 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500644 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600645 t = depdata + ":" + idependtask
646 depends.add(t)
647 if t not in taskData[mc].taskentries:
648 bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500649
650 # Resolve recursive 'recrdeptask' dependencies (Part A)
651 #
652 # e.g. do_sometask[recrdeptask] = "do_someothertask"
653 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
654 # We cover the recursive part of the dependencies below
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600655 if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
656 tasknames = task_deps['recrdeptask'][taskname].split()
657 recursivetasks[tid] = tasknames
658 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
659 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
660 if taskname in tasknames:
661 recursivetasksselfref.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500662
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600663 if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
664 recursiveitasks[tid] = []
665 for t in task_deps['recideptask'][taskname].split():
666 newdep = build_tid(mc, fn, t)
667 recursiveitasks[tid].append(newdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500668
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600669 self.runtaskentries[tid].depends = depends
Brad Bishop316dfdd2018-06-25 12:45:53 -0400670 # Remove all self references
671 self.runtaskentries[tid].depends.discard(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500672
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600673 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500674
Brad Bishop316dfdd2018-06-25 12:45:53 -0400675 self.init_progress_reporter.next_stage()
676
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500677 # Resolve recursive 'recrdeptask' dependencies (Part B)
678 #
679 # e.g. do_sometask[recrdeptask] = "do_someothertask"
680 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600681 # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600682
Brad Bishop316dfdd2018-06-25 12:45:53 -0400683 # Generating/interating recursive lists of dependencies is painful and potentially slow
684 # Precompute recursive task dependencies here by:
685 # a) create a temp list of reverse dependencies (revdeps)
686 # b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0)
687 # c) combine the total list of dependencies in cumulativedeps
688 # d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500689
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500690
Brad Bishop316dfdd2018-06-25 12:45:53 -0400691 revdeps = {}
692 deps = {}
693 cumulativedeps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600694 for tid in self.runtaskentries:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400695 deps[tid] = set(self.runtaskentries[tid].depends)
696 revdeps[tid] = set()
697 cumulativedeps[tid] = set()
698 # Generate a temp list of reverse dependencies
699 for tid in self.runtaskentries:
700 for dep in self.runtaskentries[tid].depends:
701 revdeps[dep].add(tid)
702 # Find the dependency chain endpoints
703 endpoints = set()
704 for tid in self.runtaskentries:
705 if len(deps[tid]) == 0:
706 endpoints.add(tid)
707 # Iterate the chains collating dependencies
708 while endpoints:
709 next = set()
710 for tid in endpoints:
711 for dep in revdeps[tid]:
712 cumulativedeps[dep].add(fn_from_tid(tid))
713 cumulativedeps[dep].update(cumulativedeps[tid])
714 if tid in deps[dep]:
715 deps[dep].remove(tid)
716 if len(deps[dep]) == 0:
717 next.add(dep)
718 endpoints = next
719 #for tid in deps:
720 # if len(deps[tid]) != 0:
721 # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
722
723 # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
724 # resolve these recursively until we aren't adding any further extra dependencies
725 extradeps = True
726 while extradeps:
727 extradeps = 0
728 for tid in recursivetasks:
729 tasknames = recursivetasks[tid]
730
731 totaldeps = set(self.runtaskentries[tid].depends)
732 if tid in recursiveitasks:
733 totaldeps.update(recursiveitasks[tid])
734 for dep in recursiveitasks[tid]:
735 if dep not in self.runtaskentries:
736 continue
737 totaldeps.update(self.runtaskentries[dep].depends)
738
739 deps = set()
740 for dep in totaldeps:
741 if dep in cumulativedeps:
742 deps.update(cumulativedeps[dep])
743
744 for t in deps:
745 for taskname in tasknames:
746 newtid = t + ":" + taskname
747 if newtid == tid:
748 continue
749 if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
750 extradeps += 1
751 self.runtaskentries[tid].depends.add(newtid)
752
753 # Handle recursive tasks which depend upon other recursive tasks
754 deps = set()
755 for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
756 deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
757 for newtid in deps:
758 for taskname in tasknames:
759 if not newtid.endswith(":" + taskname):
760 continue
761 if newtid in self.runtaskentries:
762 extradeps += 1
763 self.runtaskentries[tid].depends.add(newtid)
764
765 bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps)
766
767 # Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work
768 for tid in recursivetasksselfref:
769 self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600770
771 self.init_progress_reporter.next_stage()
772
773 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500774
775 # Step B - Mark all active tasks
776 #
777 # Start with the tasks we were asked to run and mark all dependencies
778 # as active too. If the task is to be 'forced', clear its stamp. Once
779 # all active tasks are marked, prune the ones we don't need.
780
781 logger.verbose("Marking Active Tasks")
782
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600783 def mark_active(tid, depth):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500784 """
785 Mark an item as active along with its depends
786 (calls itself recursively)
787 """
788
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600789 if tid in runq_build:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500790 return
791
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600792 runq_build[tid] = 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500793
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600794 depends = self.runtaskentries[tid].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500795 for depend in depends:
796 mark_active(depend, depth+1)
797
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600798 self.target_tids = []
799 for (mc, target, task, fn) in self.targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500800
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600801 if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500802 continue
803
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600804 if target in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500805 continue
806
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500807 parents = False
808 if task.endswith('-'):
809 parents = True
810 task = task[:-1]
811
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600812 if fn in taskData[mc].failed_fns:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500813 continue
814
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600815 # fn already has mc prefix
816 tid = fn + ":" + task
817 self.target_tids.append(tid)
818 if tid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500819 import difflib
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600820 tasks = []
821 for x in taskData[mc].taskentries:
822 if x.startswith(fn + ":"):
823 tasks.append(taskname_from_tid(x))
824 close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500825 if close_matches:
826 extra = ". Close matches:\n %s" % "\n ".join(close_matches)
827 else:
828 extra = ""
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600829 bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
830
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500831 # For tasks called "XXXX-", ony run their dependencies
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500832 if parents:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600833 for i in self.runtaskentries[tid].depends:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500834 mark_active(i, 1)
835 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600836 mark_active(tid, 1)
837
838 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500839
840 # Step C - Prune all inactive tasks
841 #
842 # Once all active tasks are marked, prune the ones we don't need.
843
Brad Bishop316dfdd2018-06-25 12:45:53 -0400844 delcount = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600845 for tid in list(self.runtaskentries.keys()):
846 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400847 delcount[tid] = self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600848 del self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600849
Brad Bishop316dfdd2018-06-25 12:45:53 -0400850 # Handle --runall
851 if self.cooker.configuration.runall:
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500852 # re-run the mark_active and then drop unused tasks from new list
853 runq_build = {}
Brad Bishop316dfdd2018-06-25 12:45:53 -0400854
855 for task in self.cooker.configuration.runall:
856 runall_tids = set()
857 for tid in list(self.runtaskentries):
858 wanttid = fn_from_tid(tid) + ":do_%s" % task
859 if wanttid in delcount:
860 self.runtaskentries[wanttid] = delcount[wanttid]
861 if wanttid in self.runtaskentries:
862 runall_tids.add(wanttid)
863
864 for tid in list(runall_tids):
865 mark_active(tid,1)
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500866
867 for tid in list(self.runtaskentries.keys()):
868 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400869 delcount[tid] = self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500870 del self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500871
872 if len(self.runtaskentries) == 0:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400873 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
874
875 self.init_progress_reporter.next_stage()
876
877 # Handle runonly
878 if self.cooker.configuration.runonly:
879 # re-run the mark_active and then drop unused tasks from new list
880 runq_build = {}
881
882 for task in self.cooker.configuration.runonly:
883 runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == "do_%s" % task }
884
885 for tid in list(runonly_tids):
886 mark_active(tid,1)
887
888 for tid in list(self.runtaskentries.keys()):
889 if tid not in runq_build:
890 delcount[tid] = self.runtaskentries[tid]
891 del self.runtaskentries[tid]
892
893 if len(self.runtaskentries) == 0:
894 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500895
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500896 #
897 # Step D - Sanity checks and computation
898 #
899
900 # Check to make sure we still have tasks to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600901 if len(self.runtaskentries) == 0:
902 if not taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500903 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
904 else:
905 bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
906
Brad Bishop316dfdd2018-06-25 12:45:53 -0400907 logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500908
909 logger.verbose("Assign Weightings")
910
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600911 self.init_progress_reporter.next_stage()
912
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500913 # Generate a list of reverse dependencies to ease future calculations
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600914 for tid in self.runtaskentries:
915 for dep in self.runtaskentries[tid].depends:
916 self.runtaskentries[dep].revdeps.add(tid)
917
918 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500919
920 # Identify tasks at the end of dependency chains
921 # Error on circular dependency loops (length two)
922 endpoints = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600923 for tid in self.runtaskentries:
924 revdeps = self.runtaskentries[tid].revdeps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500925 if len(revdeps) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600926 endpoints.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500927 for dep in revdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600928 if dep in self.runtaskentries[tid].depends:
929 bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
930
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500931
932 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
933
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600934 self.init_progress_reporter.next_stage()
935
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500936 # Calculate task weights
937 # Check of higher length circular dependencies
938 self.runq_weight = self.calculate_task_weights(endpoints)
939
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600940 self.init_progress_reporter.next_stage()
941
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500942 # Sanity Check - Check for multiple tasks building the same provider
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600943 for mc in self.dataCaches:
944 prov_list = {}
945 seen_fn = []
946 for tid in self.runtaskentries:
947 (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
948 if taskfn in seen_fn:
949 continue
950 if mc != tidmc:
951 continue
952 seen_fn.append(taskfn)
953 for prov in self.dataCaches[mc].fn_provides[taskfn]:
954 if prov not in prov_list:
955 prov_list[prov] = [taskfn]
956 elif taskfn not in prov_list[prov]:
957 prov_list[prov].append(taskfn)
958 for prov in prov_list:
959 if len(prov_list[prov]) < 2:
960 continue
961 if prov in self.multi_provider_whitelist:
962 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500963 seen_pn = []
964 # If two versions of the same PN are being built its fatal, we don't support it.
965 for fn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600966 pn = self.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500967 if pn not in seen_pn:
968 seen_pn.append(pn)
969 else:
970 bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500971 msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
972 #
973 # Construct a list of things which uniquely depend on each provider
974 # since this may help the user figure out which dependency is triggering this warning
975 #
976 msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
977 deplist = {}
978 commondeps = None
979 for provfn in prov_list[prov]:
980 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600981 for tid in self.runtaskentries:
982 fn = fn_from_tid(tid)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500983 if fn != provfn:
984 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600985 for dep in self.runtaskentries[tid].revdeps:
986 fn = fn_from_tid(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500987 if fn == provfn:
988 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600989 deps.add(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500990 if not commondeps:
991 commondeps = set(deps)
992 else:
993 commondeps &= deps
994 deplist[provfn] = deps
995 for provfn in deplist:
996 msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
997 #
998 # Construct a list of provides and runtime providers for each recipe
999 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
1000 #
1001 msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
1002 provide_results = {}
1003 rprovide_results = {}
1004 commonprovs = None
1005 commonrprovs = None
1006 for provfn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001007 provides = set(self.dataCaches[mc].fn_provides[provfn])
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001008 rprovides = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001009 for rprovide in self.dataCaches[mc].rproviders:
1010 if provfn in self.dataCaches[mc].rproviders[rprovide]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001011 rprovides.add(rprovide)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001012 for package in self.dataCaches[mc].packages:
1013 if provfn in self.dataCaches[mc].packages[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001014 rprovides.add(package)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001015 for package in self.dataCaches[mc].packages_dynamic:
1016 if provfn in self.dataCaches[mc].packages_dynamic[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001017 rprovides.add(package)
1018 if not commonprovs:
1019 commonprovs = set(provides)
1020 else:
1021 commonprovs &= provides
1022 provide_results[provfn] = provides
1023 if not commonrprovs:
1024 commonrprovs = set(rprovides)
1025 else:
1026 commonrprovs &= rprovides
1027 rprovide_results[provfn] = rprovides
1028 #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
1029 #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
1030 for provfn in prov_list[prov]:
1031 msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
1032 msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
1033
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001034 if self.warn_multi_bb:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001035 logger.warning(msg)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001036 else:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001037 logger.error(msg)
1038
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001039 self.init_progress_reporter.next_stage()
1040
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001041 # Create a whitelist usable by the stamp checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001042 self.stampfnwhitelist = {}
1043 for mc in self.taskData:
1044 self.stampfnwhitelist[mc] = []
1045 for entry in self.stampwhitelist.split():
1046 if entry not in self.taskData[mc].build_targets:
1047 continue
1048 fn = self.taskData.build_targets[entry][0]
1049 self.stampfnwhitelist[mc].append(fn)
1050
1051 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001052
1053 # Iterate over the task list looking for tasks with a 'setscene' function
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001054 self.runq_setscene_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001055 if not self.cooker.configuration.nosetscene:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001056 for tid in self.runtaskentries:
1057 (mc, fn, taskname, _) = split_tid_mcfn(tid)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001058 setscenetid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001059 if setscenetid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001060 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001061 self.runq_setscene_tids.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001062
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001063 def invalidate_task(tid, error_nostamp):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001064 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1065 taskdep = self.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001066 if fn + ":" + taskname not in taskData[mc].taskentries:
1067 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001068 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1069 if error_nostamp:
1070 bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
1071 else:
1072 bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
1073 else:
1074 logger.verbose("Invalidate task %s, %s", taskname, fn)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001075 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], fn)
1076
1077 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001078
1079 # Invalidate task if force mode active
1080 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001081 for tid in self.target_tids:
1082 invalidate_task(tid, False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001083
1084 # Invalidate task if invalidate mode active
1085 if self.cooker.configuration.invalidate_stamp:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001086 for tid in self.target_tids:
1087 fn = fn_from_tid(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001088 for st in self.cooker.configuration.invalidate_stamp.split(','):
1089 if not st.startswith("do_"):
1090 st = "do_%s" % st
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001091 invalidate_task(fn + ":" + st, True)
1092
1093 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001094
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001095 # Create and print to the logs a virtual/xxxx -> PN (fn) table
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001096 for mc in taskData:
1097 virtmap = taskData[mc].get_providermap(prefix="virtual/")
1098 virtpnmap = {}
1099 for v in virtmap:
1100 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
1101 bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
1102 if hasattr(bb.parse.siggen, "tasks_resolved"):
1103 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1104
1105 self.init_progress_reporter.next_stage()
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001106
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001107 # Iterate over the task list and call into the siggen code
1108 dealtwith = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001109 todeal = set(self.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001110 while len(todeal) > 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001111 for tid in todeal.copy():
1112 if len(self.runtaskentries[tid].depends - dealtwith) == 0:
1113 dealtwith.add(tid)
1114 todeal.remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001115 procdep = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001116 for dep in self.runtaskentries[tid].depends:
1117 procdep.append(fn_from_tid(dep) + "." + taskname_from_tid(dep))
1118 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1119 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(taskfn, taskname, procdep, self.dataCaches[mc])
1120 task = self.runtaskentries[tid].task
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001121
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001122 bb.parse.siggen.writeout_file_checksum_cache()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001123
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001124 #self.dump_data()
1125 return len(self.runtaskentries)
1126
1127 def dump_data(self):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001128 """
1129 Dump some debug information on the internal data structures
1130 """
1131 logger.debug(3, "run_tasks:")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001132 for tid in self.runtaskentries:
1133 logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
1134 self.runtaskentries[tid].weight,
1135 self.runtaskentries[tid].depends,
1136 self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001137
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001138class RunQueueWorker():
1139 def __init__(self, process, pipe):
1140 self.process = process
1141 self.pipe = pipe
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001142
1143class RunQueue:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001144 def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001145
1146 self.cooker = cooker
1147 self.cfgData = cfgData
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001148 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001149
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001150 self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
1151 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
1152 self.setsceneverify = cfgData.getVar("BB_SETSCENE_VERIFY_FUNCTION2") or None
1153 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001154
1155 self.state = runQueuePrepare
1156
1157 # For disk space monitor
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001158 # Invoked at regular time intervals via the bitbake heartbeat event
1159 # while the build is running. We generate a unique name for the handler
1160 # here, just in case that there ever is more than one RunQueue instance,
1161 # start the handler when reaching runQueueSceneRun, and stop it when
1162 # done with the build.
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001163 self.dm = monitordisk.diskMonitor(cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001164 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
1165 self.dm_event_handler_registered = False
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001166 self.rqexe = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001167 self.worker = {}
1168 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001169
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001170 def _start_worker(self, mc, fakeroot = False, rqexec = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001171 logger.debug(1, "Starting bitbake-worker")
1172 magic = "decafbad"
1173 if self.cooker.configuration.profile:
1174 magic = "decafbadbad"
1175 if fakeroot:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001176 magic = magic + "beef"
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001177 mcdata = self.cooker.databuilder.mcdata[mc]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001178 fakerootcmd = mcdata.getVar("FAKEROOTCMD")
1179 fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001180 env = os.environ.copy()
1181 for key, value in (var.split('=') for var in fakerootenv):
1182 env[key] = value
1183 worker = subprocess.Popen([fakerootcmd, "bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
1184 else:
1185 worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
1186 bb.utils.nonblockingfd(worker.stdout)
1187 workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
1188
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001189 runqhash = {}
1190 for tid in self.rqdata.runtaskentries:
1191 runqhash[tid] = self.rqdata.runtaskentries[tid].hash
1192
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001193 workerdata = {
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001194 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1195 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1196 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1197 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001198 "sigdata" : bb.parse.siggen.get_taskdata(),
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001199 "runq_hash" : runqhash,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001200 "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
1201 "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
1202 "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
1203 "logdefaultdomain" : bb.msg.loggerDefaultDomains,
1204 "prhost" : self.cooker.prhost,
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001205 "buildname" : self.cfgData.getVar("BUILDNAME"),
1206 "date" : self.cfgData.getVar("DATE"),
1207 "time" : self.cfgData.getVar("TIME"),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001208 }
1209
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001210 worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001211 worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001212 worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001213 worker.stdin.flush()
1214
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001215 return RunQueueWorker(worker, workerpipe)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001216
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001217 def _teardown_worker(self, worker):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001218 if not worker:
1219 return
1220 logger.debug(1, "Teardown for bitbake-worker")
1221 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001222 worker.process.stdin.write(b"<quit></quit>")
1223 worker.process.stdin.flush()
1224 worker.process.stdin.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001225 except IOError:
1226 pass
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001227 while worker.process.returncode is None:
1228 worker.pipe.read()
1229 worker.process.poll()
1230 while worker.pipe.read():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001231 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001232 worker.pipe.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001233
1234 def start_worker(self):
1235 if self.worker:
1236 self.teardown_workers()
1237 self.teardown = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001238 for mc in self.rqdata.dataCaches:
1239 self.worker[mc] = self._start_worker(mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001240
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001241 def start_fakeworker(self, rqexec, mc):
1242 if not mc in self.fakeworker:
1243 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001244
1245 def teardown_workers(self):
1246 self.teardown = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001247 for mc in self.worker:
1248 self._teardown_worker(self.worker[mc])
1249 self.worker = {}
1250 for mc in self.fakeworker:
1251 self._teardown_worker(self.fakeworker[mc])
1252 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001253
1254 def read_workers(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001255 for mc in self.worker:
1256 self.worker[mc].pipe.read()
1257 for mc in self.fakeworker:
1258 self.fakeworker[mc].pipe.read()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001259
1260 def active_fds(self):
1261 fds = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001262 for mc in self.worker:
1263 fds.append(self.worker[mc].pipe.input)
1264 for mc in self.fakeworker:
1265 fds.append(self.fakeworker[mc].pipe.input)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001266 return fds
1267
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001268 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001269 def get_timestamp(f):
1270 try:
1271 if not os.access(f, os.F_OK):
1272 return None
1273 return os.stat(f)[stat.ST_MTIME]
1274 except:
1275 return None
1276
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001277 (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
1278 if taskname is None:
1279 taskname = tn
1280
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001281 if self.stamppolicy == "perfile":
1282 fulldeptree = False
1283 else:
1284 fulldeptree = True
1285 stampwhitelist = []
1286 if self.stamppolicy == "whitelist":
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001287 stampwhitelist = self.rqdata.stampfnwhitelist[mc]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001288
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001289 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001290
1291 # If the stamp is missing, it's not current
1292 if not os.access(stampfile, os.F_OK):
1293 logger.debug(2, "Stampfile %s not available", stampfile)
1294 return False
1295 # If it's a 'nostamp' task, it's not current
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001296 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001297 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1298 logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
1299 return False
1300
1301 if taskname != "do_setscene" and taskname.endswith("_setscene"):
1302 return True
1303
1304 if cache is None:
1305 cache = {}
1306
1307 iscurrent = True
1308 t1 = get_timestamp(stampfile)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001309 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001310 if iscurrent:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001311 (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
1312 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1313 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001314 t2 = get_timestamp(stampfile2)
1315 t3 = get_timestamp(stampfile3)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001316 if t3 and not t2:
1317 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001318 if t3 and t3 > t2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001319 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001320 if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
1321 if not t2:
1322 logger.debug(2, 'Stampfile %s does not exist', stampfile2)
1323 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001324 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001325 if t1 < t2:
1326 logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
1327 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001328 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001329 if recurse and iscurrent:
1330 if dep in cache:
1331 iscurrent = cache[dep]
1332 if not iscurrent:
1333 logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
1334 else:
1335 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1336 cache[dep] = iscurrent
1337 if recurse:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001338 cache[tid] = iscurrent
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001339 return iscurrent
1340
1341 def _execute_runqueue(self):
1342 """
1343 Run the tasks in a queue prepared by rqdata.prepare()
1344 Upon failure, optionally try to recover the build using any alternate providers
1345 (if the abort on failure configuration option isn't set)
1346 """
1347
1348 retval = True
1349
1350 if self.state is runQueuePrepare:
1351 self.rqexe = RunQueueExecuteDummy(self)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001352 # NOTE: if you add, remove or significantly refactor the stages of this
1353 # process then you should recalculate the weightings here. This is quite
1354 # easy to do - just change the next line temporarily to pass debug=True as
1355 # the last parameter and you'll get a printout of the weightings as well
1356 # as a map to the lines where next_stage() was called. Of course this isn't
1357 # critical, but it helps to keep the progress reporting accurate.
1358 self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
1359 "Initialising tasks",
1360 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001361 if self.rqdata.prepare() == 0:
1362 self.state = runQueueComplete
1363 else:
1364 self.state = runQueueSceneInit
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001365 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001366
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001367 # we are ready to run, emit dependency info to any UI or class which
1368 # needs it
1369 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1370 self.rqdata.init_progress_reporter.next_stage()
1371 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001372
1373 if self.state is runQueueSceneInit:
Brad Bishope2d5b612018-11-23 10:55:50 +13001374 if not self.dm_event_handler_registered:
1375 res = bb.event.register(self.dm_event_handler_name,
1376 lambda x: self.dm.check(self) if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp] else False,
1377 ('bb.event.HeartbeatEvent',))
1378 self.dm_event_handler_registered = True
1379
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001380 dump = self.cooker.configuration.dump_signatures
1381 if dump:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001382 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001383 if 'printdiff' in dump:
1384 invalidtasks = self.print_diffscenetasks()
1385 self.dump_signatures(dump)
1386 if 'printdiff' in dump:
1387 self.write_diffscenetasks(invalidtasks)
1388 self.state = runQueueComplete
1389 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001390 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001391 self.start_worker()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001392 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001393 self.rqexe = RunQueueExecuteScenequeue(self)
1394
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001395 if self.state is runQueueSceneRun:
1396 retval = self.rqexe.execute()
1397
1398 if self.state is runQueueRunInit:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001399 if self.cooker.configuration.setsceneonly:
1400 self.state = runQueueComplete
1401 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001402 # Just in case we didn't setscene
1403 self.rqdata.init_progress_reporter.finish()
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001404 logger.info("Executing RunQueue Tasks")
1405 self.rqexe = RunQueueExecuteTasks(self)
1406 self.state = runQueueRunning
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001407
1408 if self.state is runQueueRunning:
1409 retval = self.rqexe.execute()
1410
1411 if self.state is runQueueCleanUp:
1412 retval = self.rqexe.finish()
1413
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001414 build_done = self.state is runQueueComplete or self.state is runQueueFailed
1415
1416 if build_done and self.dm_event_handler_registered:
1417 bb.event.remove(self.dm_event_handler_name, None)
1418 self.dm_event_handler_registered = False
1419
1420 if build_done and self.rqexe:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001421 self.teardown_workers()
1422 if self.rqexe.stats.failed:
1423 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
1424 else:
1425 # Let's avoid the word "failed" if nothing actually did
1426 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
1427
1428 if self.state is runQueueFailed:
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001429 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001430
1431 if self.state is runQueueComplete:
1432 # All done
1433 return False
1434
1435 # Loop
1436 return retval
1437
1438 def execute_runqueue(self):
1439 # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
1440 try:
1441 return self._execute_runqueue()
1442 except bb.runqueue.TaskFailure:
1443 raise
1444 except SystemExit:
1445 raise
1446 except bb.BBHandledException:
1447 try:
1448 self.teardown_workers()
1449 except:
1450 pass
1451 self.state = runQueueComplete
1452 raise
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001453 except Exception as err:
1454 logger.exception("An uncaught exception occurred in runqueue")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001455 try:
1456 self.teardown_workers()
1457 except:
1458 pass
1459 self.state = runQueueComplete
1460 raise
1461
1462 def finish_runqueue(self, now = False):
1463 if not self.rqexe:
1464 self.state = runQueueComplete
1465 return
1466
1467 if now:
1468 self.rqexe.finish_now()
1469 else:
1470 self.rqexe.finish()
1471
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001472 def rq_dump_sigfn(self, fn, options):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001473 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001474 the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
1475 siggen = bb.parse.siggen
1476 dataCaches = self.rqdata.dataCaches
1477 siggen.dump_sigfn(fn, dataCaches, options)
1478
1479 def dump_signatures(self, options):
1480 fns = set()
1481 bb.note("Reparsing files to collect dependency data")
1482
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001483 for tid in self.rqdata.runtaskentries:
1484 fn = fn_from_tid(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001485 fns.add(fn)
1486
1487 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1488 # We cannot use the real multiprocessing.Pool easily due to some local data
1489 # that can't be pickled. This is a cheap multi-process solution.
1490 launched = []
1491 while fns:
1492 if len(launched) < max_process:
1493 p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
1494 p.start()
1495 launched.append(p)
1496 for q in launched:
1497 # The finished processes are joined when calling is_alive()
1498 if not q.is_alive():
1499 launched.remove(q)
1500 for p in launched:
1501 p.join()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001502
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001503 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001504
1505 return
1506
1507 def print_diffscenetasks(self):
1508
1509 valid = []
1510 sq_hash = []
1511 sq_hashfn = []
1512 sq_fn = []
1513 sq_taskname = []
1514 sq_task = []
1515 noexec = []
1516 stamppresent = []
1517 valid_new = set()
1518
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001519 for tid in self.rqdata.runtaskentries:
1520 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1521 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001522
1523 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001524 noexec.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001525 continue
1526
1527 sq_fn.append(fn)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001528 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001529 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001530 sq_taskname.append(taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001531 sq_task.append(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001532 locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001533 try:
1534 call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=True)"
1535 valid = bb.utils.better_eval(call, locs)
1536 # Handle version with no siginfo parameter
1537 except TypeError:
1538 call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
1539 valid = bb.utils.better_eval(call, locs)
1540 for v in valid:
1541 valid_new.add(sq_task[v])
1542
1543 # Tasks which are both setscene and noexec never care about dependencies
1544 # We therefore find tasks which are setscene and noexec and mark their
1545 # unique dependencies as valid.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001546 for tid in noexec:
1547 if tid not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001548 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001549 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001550 hasnoexecparents = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001551 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1552 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001553 continue
1554 hasnoexecparents = False
1555 break
1556 if hasnoexecparents:
1557 valid_new.add(dep)
1558
1559 invalidtasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001560 for tid in self.rqdata.runtaskentries:
1561 if tid not in valid_new and tid not in noexec:
1562 invalidtasks.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001563
1564 found = set()
1565 processed = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001566 for tid in invalidtasks:
1567 toprocess = set([tid])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001568 while toprocess:
1569 next = set()
1570 for t in toprocess:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001571 for dep in self.rqdata.runtaskentries[t].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001572 if dep in invalidtasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001573 found.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001574 if dep not in processed:
1575 processed.add(dep)
1576 next.add(dep)
1577 toprocess = next
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001578 if tid in found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001579 toprocess = set()
1580
1581 tasklist = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001582 for tid in invalidtasks.difference(found):
1583 tasklist.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001584
1585 if tasklist:
1586 bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
1587
1588 return invalidtasks.difference(found)
1589
1590 def write_diffscenetasks(self, invalidtasks):
1591
1592 # Define recursion callback
1593 def recursecb(key, hash1, hash2):
1594 hashes = [hash1, hash2]
1595 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1596
1597 recout = []
1598 if len(hashfiles) == 2:
1599 out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
1600 recout.extend(list(' ' + l for l in out2))
1601 else:
1602 recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
1603
1604 return recout
1605
1606
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001607 for tid in invalidtasks:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001608 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1609 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001610 h = self.rqdata.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001611 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
1612 match = None
1613 for m in matches:
1614 if h in m:
1615 match = m
1616 if match is None:
1617 bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001618 matches = {k : v for k, v in iter(matches.items()) if h not in k}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001619 if matches:
1620 latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
1621 prevh = __find_md5__.search(latestmatch).group(0)
1622 output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
1623 bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
1624
1625class RunQueueExecute:
1626
1627 def __init__(self, rq):
1628 self.rq = rq
1629 self.cooker = rq.cooker
1630 self.cfgData = rq.cfgData
1631 self.rqdata = rq.rqdata
1632
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001633 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
1634 self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001635
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001636 self.runq_buildable = set()
1637 self.runq_running = set()
1638 self.runq_complete = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001639
1640 self.build_stamps = {}
1641 self.build_stamps2 = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001642 self.failed_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001643
1644 self.stampcache = {}
1645
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001646 for mc in rq.worker:
1647 rq.worker[mc].pipe.setrunqueueexec(self)
1648 for mc in rq.fakeworker:
1649 rq.fakeworker[mc].pipe.setrunqueueexec(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001650
1651 if self.number_tasks <= 0:
1652 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1653
1654 def runqueue_process_waitpid(self, task, status):
1655
1656 # self.build_stamps[pid] may not exist when use shared work directory.
1657 if task in self.build_stamps:
1658 self.build_stamps2.remove(self.build_stamps[task])
1659 del self.build_stamps[task]
1660
1661 if status != 0:
1662 self.task_fail(task, status)
1663 else:
1664 self.task_complete(task)
1665 return True
1666
1667 def finish_now(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001668 for mc in self.rq.worker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001669 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001670 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1671 self.rq.worker[mc].process.stdin.flush()
1672 except IOError:
1673 # worker must have died?
1674 pass
1675 for mc in self.rq.fakeworker:
1676 try:
1677 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1678 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001679 except IOError:
1680 # worker must have died?
1681 pass
1682
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001683 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001684 self.rq.state = runQueueFailed
1685 return
1686
1687 self.rq.state = runQueueComplete
1688 return
1689
1690 def finish(self):
1691 self.rq.state = runQueueCleanUp
1692
1693 if self.stats.active > 0:
1694 bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData)
1695 self.rq.read_workers()
1696 return self.rq.active_fds()
1697
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001698 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001699 self.rq.state = runQueueFailed
1700 return True
1701
1702 self.rq.state = runQueueComplete
1703 return True
1704
1705 def check_dependencies(self, task, taskdeps, setscene = False):
1706 if not self.rq.depvalidate:
1707 return False
1708
1709 taskdata = {}
1710 taskdeps.add(task)
1711 for dep in taskdeps:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001712 (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
1713 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001714 taskdata[dep] = [pn, taskname, fn]
1715 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001716 locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001717 valid = bb.utils.better_eval(call, locs)
1718 return valid
1719
1720class RunQueueExecuteDummy(RunQueueExecute):
1721 def __init__(self, rq):
1722 self.rq = rq
1723 self.stats = RunQueueStats(0)
1724
1725 def finish(self):
1726 self.rq.state = runQueueComplete
1727 return
1728
1729class RunQueueExecuteTasks(RunQueueExecute):
1730 def __init__(self, rq):
1731 RunQueueExecute.__init__(self, rq)
1732
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001733 self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001734
1735 self.stampcache = {}
1736
1737 initial_covered = self.rq.scenequeue_covered.copy()
1738
1739 # Mark initial buildable tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001740 for tid in self.rqdata.runtaskentries:
1741 if len(self.rqdata.runtaskentries[tid].depends) == 0:
1742 self.runq_buildable.add(tid)
1743 if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
1744 self.rq.scenequeue_covered.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001745
1746 found = True
1747 while found:
1748 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001749 for tid in self.rqdata.runtaskentries:
1750 if tid in self.rq.scenequeue_covered:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001751 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001752 logger.debug(1, 'Considering %s: %s' % (tid, str(self.rqdata.runtaskentries[tid].revdeps)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001753
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001754 if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
1755 if tid in self.rq.scenequeue_notcovered:
1756 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001757 found = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001758 self.rq.scenequeue_covered.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001759
1760 logger.debug(1, 'Skip list (pre setsceneverify) %s', sorted(self.rq.scenequeue_covered))
1761
1762 # Allow the metadata to elect for setscene tasks to run anyway
1763 covered_remove = set()
1764 if self.rq.setsceneverify:
1765 invalidtasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001766 tasknames = {}
1767 fns = {}
1768 for tid in self.rqdata.runtaskentries:
1769 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1770 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
1771 fns[tid] = taskfn
1772 tasknames[tid] = taskname
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001773 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1774 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001775 if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
1776 logger.debug(2, 'Setscene stamp current for task %s', tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001777 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001778 if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
1779 logger.debug(2, 'Normal stamp current for task %s', tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001780 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001781 invalidtasks.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001782
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001783 call = self.rq.setsceneverify + "(covered, tasknames, fns, d, invalidtasks=invalidtasks)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001784 locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : tasknames, "fns" : fns, "d" : self.cooker.data, "invalidtasks" : invalidtasks }
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001785 covered_remove = bb.utils.better_eval(call, locs)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001786
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001787 def removecoveredtask(tid):
1788 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1789 taskname = taskname + '_setscene'
1790 bb.build.del_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
1791 self.rq.scenequeue_covered.remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001792
1793 toremove = covered_remove
1794 for task in toremove:
1795 logger.debug(1, 'Not skipping task %s due to setsceneverify', task)
1796 while toremove:
1797 covered_remove = []
1798 for task in toremove:
1799 removecoveredtask(task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001800 for deptask in self.rqdata.runtaskentries[task].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001801 if deptask not in self.rq.scenequeue_covered:
1802 continue
1803 if deptask in toremove or deptask in covered_remove or deptask in initial_covered:
1804 continue
1805 logger.debug(1, 'Task %s depends on task %s so not skipping' % (task, deptask))
1806 covered_remove.append(deptask)
1807 toremove = covered_remove
1808
1809 logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered)
1810
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001811
1812 for mc in self.rqdata.dataCaches:
1813 target_pairs = []
1814 for tid in self.rqdata.target_tids:
1815 (tidmc, fn, taskname, _) = split_tid_mcfn(tid)
1816 if tidmc == mc:
1817 target_pairs.append((fn, taskname))
1818
1819 event.fire(bb.event.StampUpdate(target_pairs, self.rqdata.dataCaches[mc].stamp), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001820
1821 schedulers = self.get_schedulers()
1822 for scheduler in schedulers:
1823 if self.scheduler == scheduler.name:
1824 self.sched = scheduler(self, self.rqdata)
1825 logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
1826 break
1827 else:
1828 bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
1829 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1830
1831 def get_schedulers(self):
1832 schedulers = set(obj for obj in globals().values()
1833 if type(obj) is type and
1834 issubclass(obj, RunQueueScheduler))
1835
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001836 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001837 if user_schedulers:
1838 for sched in user_schedulers.split():
1839 if not "." in sched:
1840 bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
1841 continue
1842
1843 modname, name = sched.rsplit(".", 1)
1844 try:
1845 module = __import__(modname, fromlist=(name,))
1846 except ImportError as exc:
1847 logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
1848 raise SystemExit(1)
1849 else:
1850 schedulers.add(getattr(module, name))
1851 return schedulers
1852
1853 def setbuildable(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001854 self.runq_buildable.add(task)
Brad Bishop316dfdd2018-06-25 12:45:53 -04001855 self.sched.newbuildable(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001856
1857 def task_completeoutright(self, task):
1858 """
1859 Mark a task as completed
1860 Look at the reverse dependencies and mark any task with
1861 completed dependencies as buildable
1862 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001863 self.runq_complete.add(task)
1864 for revdep in self.rqdata.runtaskentries[task].revdeps:
1865 if revdep in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001866 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001867 if revdep in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001868 continue
1869 alldeps = 1
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001870 for dep in self.rqdata.runtaskentries[revdep].depends:
1871 if dep not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001872 alldeps = 0
1873 if alldeps == 1:
1874 self.setbuildable(revdep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001875 fn = fn_from_tid(revdep)
1876 taskname = taskname_from_tid(revdep)
1877 logger.debug(1, "Marking task %s as buildable", revdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001878
1879 def task_complete(self, task):
1880 self.stats.taskCompleted()
1881 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1882 self.task_completeoutright(task)
1883
1884 def task_fail(self, task, exitcode):
1885 """
1886 Called when a task has failed
1887 Updates the state engine with the failure
1888 """
1889 self.stats.taskFailed()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001890 self.failed_tids.append(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001891 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001892 if self.rqdata.taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001893 self.rq.state = runQueueCleanUp
1894
1895 def task_skip(self, task, reason):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001896 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001897 self.setbuildable(task)
1898 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
1899 self.task_completeoutright(task)
1900 self.stats.taskCompleted()
1901 self.stats.taskSkipped()
1902
1903 def execute(self):
1904 """
1905 Run the tasks in a queue prepared by rqdata.prepare()
1906 """
1907
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001908 if self.rqdata.setscenewhitelist is not None and not self.rqdata.setscenewhitelist_checked:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001909 self.rqdata.setscenewhitelist_checked = True
1910
1911 # Check tasks that are going to run against the whitelist
1912 def check_norun_task(tid, showerror=False):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001913 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001914 # Ignore covered tasks
1915 if tid in self.rq.scenequeue_covered:
1916 return False
1917 # Ignore stamped tasks
1918 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
1919 return False
1920 # Ignore noexec tasks
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001921 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001922 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1923 return False
1924
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001925 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001926 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
1927 if showerror:
1928 if tid in self.rqdata.runq_setscene_tids:
1929 logger.error('Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname))
1930 else:
1931 logger.error('Task %s.%s attempted to execute unexpectedly' % (pn, taskname))
1932 return True
1933 return False
1934 # Look to see if any tasks that we think shouldn't run are going to
1935 unexpected = False
1936 for tid in self.rqdata.runtaskentries:
1937 if check_norun_task(tid):
1938 unexpected = True
1939 break
1940 if unexpected:
1941 # Run through the tasks in the rough order they'd have executed and print errors
1942 # (since the order can be useful - usually missing sstate for the last few tasks
1943 # is the cause of the problem)
1944 task = self.sched.next()
1945 while task is not None:
1946 check_norun_task(task, showerror=True)
1947 self.task_skip(task, 'Setscene enforcement check')
1948 task = self.sched.next()
1949
1950 self.rq.state = runQueueCleanUp
1951 return True
1952
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001953 self.rq.read_workers()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001954
1955 if self.stats.total == 0:
1956 # nothing to do
1957 self.rq.state = runQueueCleanUp
1958
1959 task = self.sched.next()
1960 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001961 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001962
1963 if task in self.rq.scenequeue_covered:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001964 logger.debug(2, "Setscene covered task %s", task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001965 self.task_skip(task, "covered")
1966 return True
1967
1968 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001969 logger.debug(2, "Stamp current task %s", task)
1970
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001971 self.task_skip(task, "existing")
1972 return True
1973
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001974 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001975 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1976 startevent = runQueueTaskStarted(task, self.stats, self.rq,
1977 noexec=True)
1978 bb.event.fire(startevent, self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001979 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001980 self.stats.taskActive()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001981 if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001982 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001983 self.task_complete(task)
1984 return True
1985 else:
1986 startevent = runQueueTaskStarted(task, self.stats, self.rq)
1987 bb.event.fire(startevent, self.cfgData)
1988
1989 taskdepdata = self.build_taskdepdata(task)
1990
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001991 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001992 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001993 if not mc in self.rq.fakeworker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001994 try:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001995 self.rq.start_fakeworker(self, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001996 except OSError as exc:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001997 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001998 self.rq.state = runQueueFailed
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001999 self.stats.taskFailed()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002000 return True
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002001 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002002 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002003 else:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002004 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002005 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002006
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002007 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2008 self.build_stamps2.append(self.build_stamps[task])
2009 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002010 self.stats.taskActive()
2011 if self.stats.active < self.number_tasks:
2012 return True
2013
2014 if self.stats.active > 0:
2015 self.rq.read_workers()
2016 return self.rq.active_fds()
2017
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002018 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002019 self.rq.state = runQueueFailed
2020 return True
2021
2022 # Sanity Checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002023 for task in self.rqdata.runtaskentries:
2024 if task not in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002025 logger.error("Task %s never buildable!", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002026 if task not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002027 logger.error("Task %s never ran!", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002028 if task not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002029 logger.error("Task %s never completed!", task)
2030 self.rq.state = runQueueComplete
2031
2032 return True
2033
2034 def build_taskdepdata(self, task):
2035 taskdepdata = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002036 next = self.rqdata.runtaskentries[task].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002037 next.add(task)
2038 while next:
2039 additional = []
2040 for revdep in next:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002041 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2042 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2043 deps = self.rqdata.runtaskentries[revdep].depends
2044 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002045 taskhash = self.rqdata.runtaskentries[revdep].hash
2046 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002047 for revdep2 in deps:
2048 if revdep2 not in taskdepdata:
2049 additional.append(revdep2)
2050 next = additional
2051
2052 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2053 return taskdepdata
2054
2055class RunQueueExecuteScenequeue(RunQueueExecute):
2056 def __init__(self, rq):
2057 RunQueueExecute.__init__(self, rq)
2058
2059 self.scenequeue_covered = set()
2060 self.scenequeue_notcovered = set()
2061 self.scenequeue_notneeded = set()
2062
2063 # If we don't have any setscene functions, skip this step
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002064 if len(self.rqdata.runq_setscene_tids) == 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002065 rq.scenequeue_covered = set()
2066 rq.state = runQueueRunInit
2067 return
2068
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002069 self.stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002070
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002071 sq_revdeps = {}
2072 sq_revdeps_new = {}
2073 sq_revdeps_squash = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002074 self.sq_harddeps = {}
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002075 self.stamps = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002076
2077 # We need to construct a dependency graph for the setscene functions. Intermediate
2078 # dependencies between the setscene tasks only complicate the code. This code
2079 # therefore aims to collapse the huge runqueue dependency tree into a smaller one
2080 # only containing the setscene functions.
2081
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002082 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002083
2084 # First process the chains up to the first setscene task.
2085 endpoints = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002086 for tid in self.rqdata.runtaskentries:
2087 sq_revdeps[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
2088 sq_revdeps_new[tid] = set()
2089 if (len(sq_revdeps[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
2090 #bb.warn("Added endpoint %s" % (tid))
2091 endpoints[tid] = set()
2092
2093 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002094
2095 # Secondly process the chains between setscene tasks.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002096 for tid in self.rqdata.runq_setscene_tids:
2097 #bb.warn("Added endpoint 2 %s" % (tid))
2098 for dep in self.rqdata.runtaskentries[tid].depends:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002099 if tid in sq_revdeps[dep]:
2100 sq_revdeps[dep].remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002101 if dep not in endpoints:
2102 endpoints[dep] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002103 #bb.warn(" Added endpoint 3 %s" % (dep))
2104 endpoints[dep].add(tid)
2105
2106 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002107
2108 def process_endpoints(endpoints):
2109 newendpoints = {}
2110 for point, task in endpoints.items():
2111 tasks = set()
2112 if task:
2113 tasks |= task
2114 if sq_revdeps_new[point]:
2115 tasks |= sq_revdeps_new[point]
2116 sq_revdeps_new[point] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002117 if point in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002118 sq_revdeps_new[point] = tasks
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05002119 tasks = set()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002120 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002121 for dep in self.rqdata.runtaskentries[point].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002122 if point in sq_revdeps[dep]:
2123 sq_revdeps[dep].remove(point)
2124 if tasks:
2125 sq_revdeps_new[dep] |= tasks
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002126 if len(sq_revdeps[dep]) == 0 and dep not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002127 newendpoints[dep] = task
2128 if len(newendpoints) != 0:
2129 process_endpoints(newendpoints)
2130
2131 process_endpoints(endpoints)
2132
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002133 self.rqdata.init_progress_reporter.next_stage()
2134
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002135 # Build a list of setscene tasks which are "unskippable"
2136 # These are direct endpoints referenced by the build
2137 endpoints2 = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002138 sq_revdeps2 = {}
2139 sq_revdeps_new2 = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002140 def process_endpoints2(endpoints):
2141 newendpoints = {}
2142 for point, task in endpoints.items():
2143 tasks = set([point])
2144 if task:
2145 tasks |= task
2146 if sq_revdeps_new2[point]:
2147 tasks |= sq_revdeps_new2[point]
2148 sq_revdeps_new2[point] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002149 if point in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002150 sq_revdeps_new2[point] = tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002151 for dep in self.rqdata.runtaskentries[point].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002152 if point in sq_revdeps2[dep]:
2153 sq_revdeps2[dep].remove(point)
2154 if tasks:
2155 sq_revdeps_new2[dep] |= tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002156 if (len(sq_revdeps2[dep]) == 0 or len(sq_revdeps_new2[dep]) != 0) and dep not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002157 newendpoints[dep] = tasks
2158 if len(newendpoints) != 0:
2159 process_endpoints2(newendpoints)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002160 for tid in self.rqdata.runtaskentries:
2161 sq_revdeps2[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
2162 sq_revdeps_new2[tid] = set()
2163 if (len(sq_revdeps2[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
2164 endpoints2[tid] = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002165 process_endpoints2(endpoints2)
2166 self.unskippable = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002167 for tid in self.rqdata.runq_setscene_tids:
2168 if sq_revdeps_new2[tid]:
2169 self.unskippable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002170
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002171 self.rqdata.init_progress_reporter.next_stage(len(self.rqdata.runtaskentries))
2172
2173 for taskcounter, tid in enumerate(self.rqdata.runtaskentries):
2174 if tid in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002175 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002176 for dep in sq_revdeps_new[tid]:
2177 deps.add(dep)
2178 sq_revdeps_squash[tid] = deps
2179 elif len(sq_revdeps_new[tid]) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002180 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002181 self.rqdata.init_progress_reporter.update(taskcounter)
2182
2183 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002184
2185 # Resolve setscene inter-task dependencies
2186 # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
2187 # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002188 for tid in self.rqdata.runq_setscene_tids:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002189 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2190 realtid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002191 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002192 self.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002193 for (depname, idependtask) in idepends:
2194
2195 if depname not in self.rqdata.taskData[mc].build_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002196 continue
2197
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002198 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2199 if depfn is None:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002200 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002201 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2202 if deptid not in self.rqdata.runtaskentries:
2203 bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002204
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002205 if not deptid in self.sq_harddeps:
2206 self.sq_harddeps[deptid] = set()
2207 self.sq_harddeps[deptid].add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002208
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002209 sq_revdeps_squash[tid].add(deptid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002210 # Have to zero this to avoid circular dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002211 sq_revdeps_squash[deptid] = set()
2212
2213 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002214
2215 for task in self.sq_harddeps:
2216 for dep in self.sq_harddeps[task]:
2217 sq_revdeps_squash[dep].add(task)
2218
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002219 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002220
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002221 #for tid in sq_revdeps_squash:
2222 # for dep in sq_revdeps_squash[tid]:
2223 # data = data + "\n %s" % dep
2224 # bb.warn("Task %s_setscene: is %s " % (tid, data
2225
2226 self.sq_deps = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002227 self.sq_revdeps = sq_revdeps_squash
2228 self.sq_revdeps2 = copy.deepcopy(self.sq_revdeps)
2229
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002230 for tid in self.sq_revdeps:
2231 self.sq_deps[tid] = set()
2232 for tid in self.sq_revdeps:
2233 for dep in self.sq_revdeps[tid]:
2234 self.sq_deps[dep].add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002235
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002236 self.rqdata.init_progress_reporter.next_stage()
2237
2238 for tid in self.sq_revdeps:
2239 if len(self.sq_revdeps[tid]) == 0:
2240 self.runq_buildable.add(tid)
2241
2242 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002243
2244 self.outrightfail = []
2245 if self.rq.hashvalidate:
2246 sq_hash = []
2247 sq_hashfn = []
2248 sq_fn = []
2249 sq_taskname = []
2250 sq_task = []
2251 noexec = []
2252 stamppresent = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002253 for tid in self.sq_revdeps:
2254 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2255
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002256 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002257
2258 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002259 noexec.append(tid)
2260 self.task_skip(tid)
2261 bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002262 continue
2263
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002264 if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
2265 logger.debug(2, 'Setscene stamp current for task %s', tid)
2266 stamppresent.append(tid)
2267 self.task_skip(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002268 continue
2269
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002270 if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
2271 logger.debug(2, 'Normal stamp current for task %s', tid)
2272 stamppresent.append(tid)
2273 self.task_skip(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002274 continue
2275
2276 sq_fn.append(fn)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002277 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002278 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002279 sq_taskname.append(taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002280 sq_task.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002281 call = self.rq.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002282 locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002283 valid = bb.utils.better_eval(call, locs)
2284
2285 valid_new = stamppresent
2286 for v in valid:
2287 valid_new.append(sq_task[v])
2288
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002289 for tid in self.sq_revdeps:
2290 if tid not in valid_new and tid not in noexec:
2291 logger.debug(2, 'No package found, so skipping setscene task %s', tid)
2292 self.outrightfail.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002293
2294 logger.info('Executing SetScene Tasks')
2295
2296 self.rq.state = runQueueSceneRun
2297
2298 def scenequeue_updatecounters(self, task, fail = False):
2299 for dep in self.sq_deps[task]:
2300 if fail and task in self.sq_harddeps and dep in self.sq_harddeps[task]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002301 logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002302 self.scenequeue_updatecounters(dep, fail)
2303 continue
2304 if task not in self.sq_revdeps2[dep]:
2305 # May already have been removed by the fail case above
2306 continue
2307 self.sq_revdeps2[dep].remove(task)
2308 if len(self.sq_revdeps2[dep]) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002309 self.runq_buildable.add(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002310
2311 def task_completeoutright(self, task):
2312 """
2313 Mark a task as completed
2314 Look at the reverse dependencies and mark any task with
2315 completed dependencies as buildable
2316 """
2317
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002318 logger.debug(1, 'Found task %s which could be accelerated', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002319 self.scenequeue_covered.add(task)
2320 self.scenequeue_updatecounters(task)
2321
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002322 def check_taskfail(self, task):
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002323 if self.rqdata.setscenewhitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002324 realtask = task.split('_setscene')[0]
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002325 (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
2326 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002327 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2328 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2329 self.rq.state = runQueueCleanUp
2330
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002331 def task_complete(self, task):
2332 self.stats.taskCompleted()
2333 bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
2334 self.task_completeoutright(task)
2335
2336 def task_fail(self, task, result):
2337 self.stats.taskFailed()
2338 bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
2339 self.scenequeue_notcovered.add(task)
2340 self.scenequeue_updatecounters(task, True)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002341 self.check_taskfail(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002342
2343 def task_failoutright(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002344 self.runq_running.add(task)
2345 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002346 self.stats.taskCompleted()
2347 self.stats.taskSkipped()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002348 self.scenequeue_notcovered.add(task)
2349 self.scenequeue_updatecounters(task, True)
2350
2351 def task_skip(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002352 self.runq_running.add(task)
2353 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002354 self.task_completeoutright(task)
2355 self.stats.taskCompleted()
2356 self.stats.taskSkipped()
2357
2358 def execute(self):
2359 """
2360 Run the tasks in a queue prepared by prepare_runqueue
2361 """
2362
2363 self.rq.read_workers()
2364
2365 task = None
2366 if self.stats.active < self.number_tasks:
2367 # Find the next setscene to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002368 for nexttask in self.rqdata.runq_setscene_tids:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002369 if nexttask in self.runq_buildable and nexttask not in self.runq_running and self.stamps[nexttask] not in self.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002370 if nexttask in self.unskippable:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002371 logger.debug(2, "Setscene task %s is unskippable" % nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002372 if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002373 fn = fn_from_tid(nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002374 foundtarget = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002375
2376 if nexttask in self.rqdata.target_tids:
2377 foundtarget = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002378 if not foundtarget:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002379 logger.debug(2, "Skipping setscene for task %s" % nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002380 self.task_skip(nexttask)
2381 self.scenequeue_notneeded.add(nexttask)
2382 return True
2383 if nexttask in self.outrightfail:
2384 self.task_failoutright(nexttask)
2385 return True
2386 task = nexttask
2387 break
2388 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002389 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
2390 taskname = taskname + "_setscene"
2391 if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
2392 logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002393 self.task_failoutright(task)
2394 return True
2395
2396 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002397 if task in self.rqdata.target_tids:
2398 self.task_failoutright(task)
2399 return True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002400
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002401 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
2402 logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002403 self.task_skip(task)
2404 return True
2405
2406 startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
2407 bb.event.fire(startevent, self.cfgData)
2408
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002409 taskdepdata = self.build_taskdepdata(task)
2410
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002411 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2412 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002413 if not mc in self.rq.fakeworker:
2414 self.rq.start_fakeworker(self, mc)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002415 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002416 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002417 else:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002418 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002419 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002420
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002421 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2422 self.build_stamps2.append(self.build_stamps[task])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002423 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002424 self.stats.taskActive()
2425 if self.stats.active < self.number_tasks:
2426 return True
2427
2428 if self.stats.active > 0:
2429 self.rq.read_workers()
2430 return self.rq.active_fds()
2431
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002432 #for tid in self.sq_revdeps:
2433 # if tid not in self.runq_running:
2434 # buildable = tid in self.runq_buildable
2435 # revdeps = self.sq_revdeps[tid]
2436 # bb.warn("Found we didn't run %s %s %s" % (tid, buildable, str(revdeps)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002437
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002438 self.rq.scenequeue_covered = self.scenequeue_covered
2439 self.rq.scenequeue_notcovered = self.scenequeue_notcovered
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002440
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002441 logger.debug(1, 'We can skip tasks %s', "\n".join(sorted(self.rq.scenequeue_covered)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002442
2443 self.rq.state = runQueueRunInit
2444
2445 completeevent = sceneQueueComplete(self.stats, self.rq)
2446 bb.event.fire(completeevent, self.cfgData)
2447
2448 return True
2449
2450 def runqueue_process_waitpid(self, task, status):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002451 RunQueueExecute.runqueue_process_waitpid(self, task, status)
2452
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002453
2454 def build_taskdepdata(self, task):
2455 def getsetscenedeps(tid):
2456 deps = set()
2457 (mc, fn, taskname, _) = split_tid_mcfn(tid)
2458 realtid = tid + "_setscene"
2459 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
2460 for (depname, idependtask) in idepends:
2461 if depname not in self.rqdata.taskData[mc].build_targets:
2462 continue
2463
2464 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2465 if depfn is None:
2466 continue
2467 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2468 deps.add(deptid)
2469 return deps
2470
2471 taskdepdata = {}
2472 next = getsetscenedeps(task)
2473 next.add(task)
2474 while next:
2475 additional = []
2476 for revdep in next:
2477 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2478 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2479 deps = getsetscenedeps(revdep)
2480 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2481 taskhash = self.rqdata.runtaskentries[revdep].hash
2482 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash]
2483 for revdep2 in deps:
2484 if revdep2 not in taskdepdata:
2485 additional.append(revdep2)
2486 next = additional
2487
2488 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2489 return taskdepdata
2490
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002491class TaskFailure(Exception):
2492 """
2493 Exception raised when a task in a runqueue fails
2494 """
2495 def __init__(self, x):
2496 self.args = x
2497
2498
2499class runQueueExitWait(bb.event.Event):
2500 """
2501 Event when waiting for task processes to exit
2502 """
2503
2504 def __init__(self, remain):
2505 self.remain = remain
2506 self.message = "Waiting for %s active tasks to finish" % remain
2507 bb.event.Event.__init__(self)
2508
2509class runQueueEvent(bb.event.Event):
2510 """
2511 Base runQueue event class
2512 """
2513 def __init__(self, task, stats, rq):
2514 self.taskid = task
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002515 self.taskstring = task
2516 self.taskname = taskname_from_tid(task)
2517 self.taskfile = fn_from_tid(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002518 self.taskhash = rq.rqdata.get_task_hash(task)
2519 self.stats = stats.copy()
2520 bb.event.Event.__init__(self)
2521
2522class sceneQueueEvent(runQueueEvent):
2523 """
2524 Base sceneQueue event class
2525 """
2526 def __init__(self, task, stats, rq, noexec=False):
2527 runQueueEvent.__init__(self, task, stats, rq)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002528 self.taskstring = task + "_setscene"
2529 self.taskname = taskname_from_tid(task) + "_setscene"
2530 self.taskfile = fn_from_tid(task)
2531 self.taskhash = rq.rqdata.get_task_hash(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002532
2533class runQueueTaskStarted(runQueueEvent):
2534 """
2535 Event notifying a task was started
2536 """
2537 def __init__(self, task, stats, rq, noexec=False):
2538 runQueueEvent.__init__(self, task, stats, rq)
2539 self.noexec = noexec
2540
2541class sceneQueueTaskStarted(sceneQueueEvent):
2542 """
2543 Event notifying a setscene task was started
2544 """
2545 def __init__(self, task, stats, rq, noexec=False):
2546 sceneQueueEvent.__init__(self, task, stats, rq)
2547 self.noexec = noexec
2548
2549class runQueueTaskFailed(runQueueEvent):
2550 """
2551 Event notifying a task failed
2552 """
2553 def __init__(self, task, stats, exitcode, rq):
2554 runQueueEvent.__init__(self, task, stats, rq)
2555 self.exitcode = exitcode
2556
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002557 def __str__(self):
2558 return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
2559
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002560class sceneQueueTaskFailed(sceneQueueEvent):
2561 """
2562 Event notifying a setscene task failed
2563 """
2564 def __init__(self, task, stats, exitcode, rq):
2565 sceneQueueEvent.__init__(self, task, stats, rq)
2566 self.exitcode = exitcode
2567
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002568 def __str__(self):
2569 return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
2570
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002571class sceneQueueComplete(sceneQueueEvent):
2572 """
2573 Event when all the sceneQueue tasks are complete
2574 """
2575 def __init__(self, stats, rq):
2576 self.stats = stats.copy()
2577 bb.event.Event.__init__(self)
2578
2579class runQueueTaskCompleted(runQueueEvent):
2580 """
2581 Event notifying a task completed
2582 """
2583
2584class sceneQueueTaskCompleted(sceneQueueEvent):
2585 """
2586 Event notifying a setscene task completed
2587 """
2588
2589class runQueueTaskSkipped(runQueueEvent):
2590 """
2591 Event notifying a task was skipped
2592 """
2593 def __init__(self, task, stats, rq, reason):
2594 runQueueEvent.__init__(self, task, stats, rq)
2595 self.reason = reason
2596
2597class runQueuePipe():
2598 """
2599 Abstraction for a pipe between a worker thread and the server
2600 """
2601 def __init__(self, pipein, pipeout, d, rq, rqexec):
2602 self.input = pipein
2603 if pipeout:
2604 pipeout.close()
2605 bb.utils.nonblockingfd(self.input)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002606 self.queue = b""
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002607 self.d = d
2608 self.rq = rq
2609 self.rqexec = rqexec
2610
2611 def setrunqueueexec(self, rqexec):
2612 self.rqexec = rqexec
2613
2614 def read(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002615 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
2616 for worker in workers.values():
2617 worker.process.poll()
2618 if worker.process.returncode is not None and not self.rq.teardown:
2619 bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
2620 self.rq.finish_runqueue(True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002621
2622 start = len(self.queue)
2623 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002624 self.queue = self.queue + (self.input.read(102400) or b"")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002625 except (OSError, IOError) as e:
2626 if e.errno != errno.EAGAIN:
2627 raise
2628 end = len(self.queue)
2629 found = True
2630 while found and len(self.queue):
2631 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002632 index = self.queue.find(b"</event>")
2633 while index != -1 and self.queue.startswith(b"<event>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002634 try:
2635 event = pickle.loads(self.queue[7:index])
2636 except ValueError as e:
2637 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
2638 bb.event.fire_from_worker(event, self.d)
2639 found = True
2640 self.queue = self.queue[index+8:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002641 index = self.queue.find(b"</event>")
2642 index = self.queue.find(b"</exitcode>")
2643 while index != -1 and self.queue.startswith(b"<exitcode>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002644 try:
2645 task, status = pickle.loads(self.queue[10:index])
2646 except ValueError as e:
2647 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
2648 self.rqexec.runqueue_process_waitpid(task, status)
2649 found = True
2650 self.queue = self.queue[index+11:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002651 index = self.queue.find(b"</exitcode>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002652 return (end > start)
2653
2654 def close(self):
2655 while self.read():
2656 continue
2657 if len(self.queue) > 0:
2658 print("Warning, worker left partial message: %s" % self.queue)
2659 self.input.close()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002660
2661def get_setscene_enforce_whitelist(d):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002662 if d.getVar('BB_SETSCENE_ENFORCE') != '1':
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002663 return None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002664 whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002665 outlist = []
2666 for item in whitelist[:]:
2667 if item.startswith('%:'):
2668 for target in sys.argv[1:]:
2669 if not target.startswith('-'):
2670 outlist.append(target.split(':')[0] + ':' + item.split(':')[1])
2671 else:
2672 outlist.append(item)
2673 return outlist
2674
2675def check_setscene_enforce_whitelist(pn, taskname, whitelist):
2676 import fnmatch
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002677 if whitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002678 item = '%s:%s' % (pn, taskname)
2679 for whitelist_item in whitelist:
2680 if fnmatch.fnmatch(item, whitelist_item):
2681 return True
2682 return False
2683 return True