blob: f2e52cf758c83aef278843097fb8160c96493ba5 [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001#!/usr/bin/env python
2# ex:ts=4:sw=4:sts=4:et
3# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
4"""
5BitBake 'RunQueue' implementation
6
7Handles preparation and execution of a queue of tasks
8"""
9
10# Copyright (C) 2006-2007 Richard Purdie
11#
12# This program is free software; you can redistribute it and/or modify
13# it under the terms of the GNU General Public License version 2 as
14# published by the Free Software Foundation.
15#
16# This program is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19# GNU General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License along
22# with this program; if not, write to the Free Software Foundation, Inc.,
23# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24
25import copy
26import os
27import sys
28import signal
29import stat
30import fcntl
31import errno
32import logging
33import re
34import bb
35from bb import msg, data, event
36from bb import monitordisk
37import subprocess
Patrick Williamsc0f7c042017-02-23 20:41:17 -060038import pickle
Brad Bishop6e60e8b2018-02-01 10:27:11 -050039from multiprocessing import Process
Patrick Williamsc124f4f2015-09-15 14:41:29 -050040
41bblogger = logging.getLogger("BitBake")
42logger = logging.getLogger("BitBake.RunQueue")
43
44__find_md5__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{32}(?![a-z0-9])' )
45
Patrick Williamsc0f7c042017-02-23 20:41:17 -060046def fn_from_tid(tid):
47 return tid.rsplit(":", 1)[0]
48
49def taskname_from_tid(tid):
50 return tid.rsplit(":", 1)[1]
51
52def split_tid(tid):
53 (mc, fn, taskname, _) = split_tid_mcfn(tid)
54 return (mc, fn, taskname)
55
56def split_tid_mcfn(tid):
57 if tid.startswith('multiconfig:'):
58 elems = tid.split(':')
59 mc = elems[1]
60 fn = ":".join(elems[2:-1])
61 taskname = elems[-1]
62 mcfn = "multiconfig:" + mc + ":" + fn
63 else:
64 tid = tid.rsplit(":", 1)
65 mc = ""
66 fn = tid[0]
67 taskname = tid[1]
68 mcfn = fn
69
70 return (mc, fn, taskname, mcfn)
71
72def build_tid(mc, fn, taskname):
73 if mc:
74 return "multiconfig:" + mc + ":" + fn + ":" + taskname
75 return fn + ":" + taskname
76
Patrick Williamsc124f4f2015-09-15 14:41:29 -050077class RunQueueStats:
78 """
79 Holds statistics on the tasks handled by the associated runQueue
80 """
81 def __init__(self, total):
82 self.completed = 0
83 self.skipped = 0
84 self.failed = 0
85 self.active = 0
86 self.total = total
87
88 def copy(self):
89 obj = self.__class__(self.total)
90 obj.__dict__.update(self.__dict__)
91 return obj
92
93 def taskFailed(self):
94 self.active = self.active - 1
95 self.failed = self.failed + 1
96
97 def taskCompleted(self, number = 1):
98 self.active = self.active - number
99 self.completed = self.completed + number
100
101 def taskSkipped(self, number = 1):
102 self.active = self.active + number
103 self.skipped = self.skipped + number
104
105 def taskActive(self):
106 self.active = self.active + 1
107
108# These values indicate the next step due to be run in the
109# runQueue state machine
110runQueuePrepare = 2
111runQueueSceneInit = 3
112runQueueSceneRun = 4
113runQueueRunInit = 5
114runQueueRunning = 6
115runQueueFailed = 7
116runQueueCleanUp = 8
117runQueueComplete = 9
118
119class RunQueueScheduler(object):
120 """
121 Control the order tasks are scheduled in.
122 """
123 name = "basic"
124
125 def __init__(self, runqueue, rqdata):
126 """
127 The default scheduler just returns the first buildable task (the
128 priority map is sorted by task number)
129 """
130 self.rq = runqueue
131 self.rqdata = rqdata
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600132 self.numTasks = len(self.rqdata.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500133
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600134 self.prio_map = [self.rqdata.runtaskentries.keys()]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500135
136 self.buildable = []
137 self.stamps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600138 for tid in self.rqdata.runtaskentries:
139 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
140 self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
141 if tid in self.rq.runq_buildable:
142 self.buildable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500143
144 self.rev_prio_map = None
145
146 def next_buildable_task(self):
147 """
148 Return the id of the first task we find that is buildable
149 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600150 self.buildable = [x for x in self.buildable if x not in self.rq.runq_running]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500151 if not self.buildable:
152 return None
153 if len(self.buildable) == 1:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600154 tid = self.buildable[0]
155 stamp = self.stamps[tid]
156 if stamp not in self.rq.build_stamps.values():
157 return tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500158
159 if not self.rev_prio_map:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600160 self.rev_prio_map = {}
161 for tid in self.rqdata.runtaskentries:
162 self.rev_prio_map[tid] = self.prio_map.index(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500163
164 best = None
165 bestprio = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600166 for tid in self.buildable:
167 prio = self.rev_prio_map[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500168 if bestprio is None or bestprio > prio:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600169 stamp = self.stamps[tid]
170 if stamp in self.rq.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500171 continue
172 bestprio = prio
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600173 best = tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500174
175 return best
176
177 def next(self):
178 """
179 Return the id of the task we should build next
180 """
181 if self.rq.stats.active < self.rq.number_tasks:
182 return self.next_buildable_task()
183
Brad Bishop316dfdd2018-06-25 12:45:53 -0400184 def newbuildable(self, task):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500185 self.buildable.append(task)
186
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500187 def describe_task(self, taskid):
188 result = 'ID %s' % taskid
189 if self.rev_prio_map:
190 result = result + (' pri %d' % self.rev_prio_map[taskid])
191 return result
192
193 def dump_prio(self, comment):
194 bb.debug(3, '%s (most important first):\n%s' %
195 (comment,
196 '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
197 index, taskid in enumerate(self.prio_map)])))
198
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500199class RunQueueSchedulerSpeed(RunQueueScheduler):
200 """
201 A scheduler optimised for speed. The priority map is sorted by task weight,
202 heavier weighted tasks (tasks needed by the most other tasks) are run first.
203 """
204 name = "speed"
205
206 def __init__(self, runqueue, rqdata):
207 """
208 The priority map is sorted by task weight.
209 """
210 RunQueueScheduler.__init__(self, runqueue, rqdata)
211
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600212 weights = {}
213 for tid in self.rqdata.runtaskentries:
214 weight = self.rqdata.runtaskentries[tid].weight
215 if not weight in weights:
216 weights[weight] = []
217 weights[weight].append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500218
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600219 self.prio_map = []
220 for weight in sorted(weights):
221 for w in weights[weight]:
222 self.prio_map.append(w)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500223
224 self.prio_map.reverse()
225
226class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
227 """
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500228 A scheduler optimised to complete .bb files as quickly as possible. The
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500229 priority map is sorted by task weight, but then reordered so once a given
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500230 .bb file starts to build, it's completed as quickly as possible by
231 running all tasks related to the same .bb file one after the after.
232 This works well where disk space is at a premium and classes like OE's
233 rm_work are in force.
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500234 """
235 name = "completion"
236
237 def __init__(self, runqueue, rqdata):
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500238 super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500239
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500240 # Extract list of tasks for each recipe, with tasks sorted
241 # ascending from "must run first" (typically do_fetch) to
242 # "runs last" (do_build). The speed scheduler prioritizes
243 # tasks that must run first before the ones that run later;
244 # this is what we depend on here.
245 task_lists = {}
246 for taskid in self.prio_map:
247 fn, taskname = taskid.rsplit(':', 1)
248 task_lists.setdefault(fn, []).append(taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500249
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500250 # Now unify the different task lists. The strategy is that
251 # common tasks get skipped and new ones get inserted after the
252 # preceeding common one(s) as they are found. Because task
253 # lists should differ only by their number of tasks, but not
254 # the ordering of the common tasks, this should result in a
255 # deterministic result that is a superset of the individual
256 # task ordering.
257 all_tasks = []
258 for recipe, new_tasks in task_lists.items():
259 index = 0
260 old_task = all_tasks[index] if index < len(all_tasks) else None
261 for new_task in new_tasks:
262 if old_task == new_task:
263 # Common task, skip it. This is the fast-path which
264 # avoids a full search.
265 index += 1
266 old_task = all_tasks[index] if index < len(all_tasks) else None
267 else:
268 try:
269 index = all_tasks.index(new_task)
270 # Already present, just not at the current
271 # place. We re-synchronized by changing the
272 # index so that it matches again. Now
273 # move on to the next existing task.
274 index += 1
275 old_task = all_tasks[index] if index < len(all_tasks) else None
276 except ValueError:
277 # Not present. Insert before old_task, which
278 # remains the same (but gets shifted back).
279 all_tasks.insert(index, new_task)
280 index += 1
281 bb.debug(3, 'merged task list: %s' % all_tasks)
282
283 # Now reverse the order so that tasks that finish the work on one
284 # recipe are considered more imporant (= come first). The ordering
285 # is now so that do_build is most important.
286 all_tasks.reverse()
287
288 # Group tasks of the same kind before tasks of less important
289 # kinds at the head of the queue (because earlier = lower
290 # priority number = runs earlier), while preserving the
291 # ordering by recipe. If recipe foo is more important than
292 # bar, then the goal is to work on foo's do_populate_sysroot
293 # before bar's do_populate_sysroot and on the more important
294 # tasks of foo before any of the less important tasks in any
295 # other recipe (if those other recipes are more important than
296 # foo).
297 #
298 # All of this only applies when tasks are runable. Explicit
299 # dependencies still override this ordering by priority.
300 #
301 # Here's an example why this priority re-ordering helps with
302 # minimizing disk usage. Consider a recipe foo with a higher
303 # priority than bar where foo DEPENDS on bar. Then the
304 # implicit rule (from base.bbclass) is that foo's do_configure
305 # depends on bar's do_populate_sysroot. This ensures that
306 # bar's do_populate_sysroot gets done first. Normally the
307 # tasks from foo would continue to run once that is done, and
308 # bar only gets completed and cleaned up later. By ordering
309 # bar's task that depend on bar's do_populate_sysroot before foo's
310 # do_configure, that problem gets avoided.
311 task_index = 0
312 self.dump_prio('original priorities')
313 for task in all_tasks:
314 for index in range(task_index, self.numTasks):
315 taskid = self.prio_map[index]
316 taskname = taskid.rsplit(':', 1)[1]
317 if taskname == task:
318 del self.prio_map[index]
319 self.prio_map.insert(task_index, taskid)
320 task_index += 1
321 self.dump_prio('completion priorities')
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500322
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600323class RunTaskEntry(object):
324 def __init__(self):
325 self.depends = set()
326 self.revdeps = set()
327 self.hash = None
328 self.task = None
329 self.weight = 1
330
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500331class RunQueueData:
332 """
333 BitBake Run Queue implementation
334 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600335 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500336 self.cooker = cooker
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600337 self.dataCaches = dataCaches
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500338 self.taskData = taskData
339 self.targets = targets
340 self.rq = rq
341 self.warn_multi_bb = False
342
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500343 self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
344 self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600345 self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
346 self.setscenewhitelist_checked = False
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500347 self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600348 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500349
350 self.reset()
351
352 def reset(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600353 self.runtaskentries = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500354
355 def runq_depends_names(self, ids):
356 import re
357 ret = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600358 for id in ids:
359 nam = os.path.basename(id)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500360 nam = re.sub("_[^,]*,", ",", nam)
361 ret.extend([nam])
362 return ret
363
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600364 def get_task_hash(self, tid):
365 return self.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500366
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600367 def get_user_idstring(self, tid, task_name_suffix = ""):
368 return tid + task_name_suffix
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500369
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500370 def get_short_user_idstring(self, task, task_name_suffix = ""):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500371 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
372 pn = self.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600373 taskname = taskname_from_tid(task) + task_name_suffix
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500374 return "%s:%s" % (pn, taskname)
375
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500376 def circular_depchains_handler(self, tasks):
377 """
378 Some tasks aren't buildable, likely due to circular dependency issues.
379 Identify the circular dependencies and print them in a user readable format.
380 """
381 from copy import deepcopy
382
383 valid_chains = []
384 explored_deps = {}
385 msgs = []
386
387 def chain_reorder(chain):
388 """
389 Reorder a dependency chain so the lowest task id is first
390 """
391 lowest = 0
392 new_chain = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600393 for entry in range(len(chain)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500394 if chain[entry] < chain[lowest]:
395 lowest = entry
396 new_chain.extend(chain[lowest:])
397 new_chain.extend(chain[:lowest])
398 return new_chain
399
400 def chain_compare_equal(chain1, chain2):
401 """
402 Compare two dependency chains and see if they're the same
403 """
404 if len(chain1) != len(chain2):
405 return False
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600406 for index in range(len(chain1)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500407 if chain1[index] != chain2[index]:
408 return False
409 return True
410
411 def chain_array_contains(chain, chain_array):
412 """
413 Return True if chain_array contains chain
414 """
415 for ch in chain_array:
416 if chain_compare_equal(ch, chain):
417 return True
418 return False
419
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600420 def find_chains(tid, prev_chain):
421 prev_chain.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500422 total_deps = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600423 total_deps.extend(self.runtaskentries[tid].revdeps)
424 for revdep in self.runtaskentries[tid].revdeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500425 if revdep in prev_chain:
426 idx = prev_chain.index(revdep)
427 # To prevent duplicates, reorder the chain to start with the lowest taskid
428 # and search through an array of those we've already printed
429 chain = prev_chain[idx:]
430 new_chain = chain_reorder(chain)
431 if not chain_array_contains(new_chain, valid_chains):
432 valid_chains.append(new_chain)
433 msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
434 for dep in new_chain:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600435 msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500436 msgs.append("\n")
437 if len(valid_chains) > 10:
438 msgs.append("Aborted dependency loops search after 10 matches.\n")
439 return msgs
440 continue
441 scan = False
442 if revdep not in explored_deps:
443 scan = True
444 elif revdep in explored_deps[revdep]:
445 scan = True
446 else:
447 for dep in prev_chain:
448 if dep in explored_deps[revdep]:
449 scan = True
450 if scan:
451 find_chains(revdep, copy.deepcopy(prev_chain))
452 for dep in explored_deps[revdep]:
453 if dep not in total_deps:
454 total_deps.append(dep)
455
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600456 explored_deps[tid] = total_deps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500457
458 for task in tasks:
459 find_chains(task, [])
460
461 return msgs
462
463 def calculate_task_weights(self, endpoints):
464 """
465 Calculate a number representing the "weight" of each task. Heavier weighted tasks
466 have more dependencies and hence should be executed sooner for maximum speed.
467
468 This function also sanity checks the task list finding tasks that are not
469 possible to execute due to circular dependencies.
470 """
471
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600472 numTasks = len(self.runtaskentries)
473 weight = {}
474 deps_left = {}
475 task_done = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500476
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600477 for tid in self.runtaskentries:
478 task_done[tid] = False
479 weight[tid] = 1
480 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500481
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600482 for tid in endpoints:
483 weight[tid] = 10
484 task_done[tid] = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500485
486 while True:
487 next_points = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600488 for tid in endpoints:
489 for revdep in self.runtaskentries[tid].depends:
490 weight[revdep] = weight[revdep] + weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500491 deps_left[revdep] = deps_left[revdep] - 1
492 if deps_left[revdep] == 0:
493 next_points.append(revdep)
494 task_done[revdep] = True
495 endpoints = next_points
496 if len(next_points) == 0:
497 break
498
499 # Circular dependency sanity check
500 problem_tasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600501 for tid in self.runtaskentries:
502 if task_done[tid] is False or deps_left[tid] != 0:
503 problem_tasks.append(tid)
504 logger.debug(2, "Task %s is not buildable", tid)
505 logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
506 self.runtaskentries[tid].weight = weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500507
508 if problem_tasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600509 message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500510 message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
511 message = message + "Identifying dependency loops (this may take a short while)...\n"
512 logger.error(message)
513
514 msgs = self.circular_depchains_handler(problem_tasks)
515
516 message = "\n"
517 for msg in msgs:
518 message = message + msg
519 bb.msg.fatal("RunQueue", message)
520
521 return weight
522
523 def prepare(self):
524 """
525 Turn a set of taskData into a RunQueue and compute data needed
526 to optimise the execution order.
527 """
528
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600529 runq_build = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500530 recursivetasks = {}
531 recursiveitasks = {}
532 recursivetasksselfref = set()
533
534 taskData = self.taskData
535
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600536 found = False
537 for mc in self.taskData:
538 if len(taskData[mc].taskentries) > 0:
539 found = True
540 break
541 if not found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500542 # Nothing to do
543 return 0
544
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600545 self.init_progress_reporter.start()
546 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500547
548 # Step A - Work out a list of tasks to run
549 #
550 # Taskdata gives us a list of possible providers for every build and run
551 # target ordered by priority. It also gives information on each of those
552 # providers.
553 #
554 # To create the actual list of tasks to execute we fix the list of
555 # providers and then resolve the dependencies into task IDs. This
556 # process is repeated for each type of dependency (tdepends, deptask,
557 # rdeptast, recrdeptask, idepends).
558
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600559 def add_build_dependencies(depids, tasknames, depends, mc):
560 for depname in depids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500561 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600562 if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500563 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600564 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500565 if depdata is None:
566 continue
567 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600568 t = depdata + ":" + taskname
569 if t in taskData[mc].taskentries:
570 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500571
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600572 def add_runtime_dependencies(depids, tasknames, depends, mc):
573 for depname in depids:
574 if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500575 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600576 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500577 if depdata is None:
578 continue
579 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600580 t = depdata + ":" + taskname
581 if t in taskData[mc].taskentries:
582 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500583
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600584 for mc in taskData:
585 for tid in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500586
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600587 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
588 #runtid = build_tid(mc, fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500589
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600590 #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
591
592 depends = set()
593 task_deps = self.dataCaches[mc].task_deps[taskfn]
594
595 self.runtaskentries[tid] = RunTaskEntry()
596
597 if fn in taskData[mc].failed_fns:
598 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500599
600 # Resolve task internal dependencies
601 #
602 # e.g. addtask before X after Y
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600603 for t in taskData[mc].taskentries[tid].tdepends:
604 (_, depfn, deptaskname, _) = split_tid_mcfn(t)
605 depends.add(build_tid(mc, depfn, deptaskname))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500606
607 # Resolve 'deptask' dependencies
608 #
609 # e.g. do_sometask[deptask] = "do_someothertask"
610 # (makes sure sometask runs after someothertask of all DEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600611 if 'deptask' in task_deps and taskname in task_deps['deptask']:
612 tasknames = task_deps['deptask'][taskname].split()
613 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500614
615 # Resolve 'rdeptask' dependencies
616 #
617 # e.g. do_sometask[rdeptask] = "do_someothertask"
618 # (makes sure sometask runs after someothertask of all RDEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600619 if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
620 tasknames = task_deps['rdeptask'][taskname].split()
621 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500622
623 # Resolve inter-task dependencies
624 #
625 # e.g. do_sometask[depends] = "targetname:do_someothertask"
626 # (makes sure sometask runs after targetname's someothertask)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600627 idepends = taskData[mc].taskentries[tid].idepends
628 for (depname, idependtask) in idepends:
629 if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500630 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600631 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500632 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600633 t = depdata + ":" + idependtask
634 depends.add(t)
635 if t not in taskData[mc].taskentries:
636 bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
637 irdepends = taskData[mc].taskentries[tid].irdepends
638 for (depname, idependtask) in irdepends:
639 if depname in taskData[mc].run_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500640 # Won't be in run_targets if ASSUME_PROVIDED
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500641 if not taskData[mc].run_targets[depname]:
642 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600643 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500644 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600645 t = depdata + ":" + idependtask
646 depends.add(t)
647 if t not in taskData[mc].taskentries:
648 bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500649
650 # Resolve recursive 'recrdeptask' dependencies (Part A)
651 #
652 # e.g. do_sometask[recrdeptask] = "do_someothertask"
653 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
654 # We cover the recursive part of the dependencies below
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600655 if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
656 tasknames = task_deps['recrdeptask'][taskname].split()
657 recursivetasks[tid] = tasknames
658 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
659 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
660 if taskname in tasknames:
661 recursivetasksselfref.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500662
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600663 if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
664 recursiveitasks[tid] = []
665 for t in task_deps['recideptask'][taskname].split():
666 newdep = build_tid(mc, fn, t)
667 recursiveitasks[tid].append(newdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500668
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600669 self.runtaskentries[tid].depends = depends
Brad Bishop316dfdd2018-06-25 12:45:53 -0400670 # Remove all self references
671 self.runtaskentries[tid].depends.discard(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500672
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600673 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500674
Brad Bishop316dfdd2018-06-25 12:45:53 -0400675 self.init_progress_reporter.next_stage()
676
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500677 # Resolve recursive 'recrdeptask' dependencies (Part B)
678 #
679 # e.g. do_sometask[recrdeptask] = "do_someothertask"
680 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600681 # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600682
Brad Bishop316dfdd2018-06-25 12:45:53 -0400683 # Generating/interating recursive lists of dependencies is painful and potentially slow
684 # Precompute recursive task dependencies here by:
685 # a) create a temp list of reverse dependencies (revdeps)
686 # b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0)
687 # c) combine the total list of dependencies in cumulativedeps
688 # d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500689
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500690
Brad Bishop316dfdd2018-06-25 12:45:53 -0400691 revdeps = {}
692 deps = {}
693 cumulativedeps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600694 for tid in self.runtaskentries:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400695 deps[tid] = set(self.runtaskentries[tid].depends)
696 revdeps[tid] = set()
697 cumulativedeps[tid] = set()
698 # Generate a temp list of reverse dependencies
699 for tid in self.runtaskentries:
700 for dep in self.runtaskentries[tid].depends:
701 revdeps[dep].add(tid)
702 # Find the dependency chain endpoints
703 endpoints = set()
704 for tid in self.runtaskentries:
705 if len(deps[tid]) == 0:
706 endpoints.add(tid)
707 # Iterate the chains collating dependencies
708 while endpoints:
709 next = set()
710 for tid in endpoints:
711 for dep in revdeps[tid]:
712 cumulativedeps[dep].add(fn_from_tid(tid))
713 cumulativedeps[dep].update(cumulativedeps[tid])
714 if tid in deps[dep]:
715 deps[dep].remove(tid)
716 if len(deps[dep]) == 0:
717 next.add(dep)
718 endpoints = next
719 #for tid in deps:
720 # if len(deps[tid]) != 0:
721 # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
722
723 # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
724 # resolve these recursively until we aren't adding any further extra dependencies
725 extradeps = True
726 while extradeps:
727 extradeps = 0
728 for tid in recursivetasks:
729 tasknames = recursivetasks[tid]
730
731 totaldeps = set(self.runtaskentries[tid].depends)
732 if tid in recursiveitasks:
733 totaldeps.update(recursiveitasks[tid])
734 for dep in recursiveitasks[tid]:
735 if dep not in self.runtaskentries:
736 continue
737 totaldeps.update(self.runtaskentries[dep].depends)
738
739 deps = set()
740 for dep in totaldeps:
741 if dep in cumulativedeps:
742 deps.update(cumulativedeps[dep])
743
744 for t in deps:
745 for taskname in tasknames:
746 newtid = t + ":" + taskname
747 if newtid == tid:
748 continue
749 if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
750 extradeps += 1
751 self.runtaskentries[tid].depends.add(newtid)
752
753 # Handle recursive tasks which depend upon other recursive tasks
754 deps = set()
755 for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
756 deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
757 for newtid in deps:
758 for taskname in tasknames:
759 if not newtid.endswith(":" + taskname):
760 continue
761 if newtid in self.runtaskentries:
762 extradeps += 1
763 self.runtaskentries[tid].depends.add(newtid)
764
765 bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps)
766
767 # Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work
768 for tid in recursivetasksselfref:
769 self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600770
771 self.init_progress_reporter.next_stage()
772
773 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500774
775 # Step B - Mark all active tasks
776 #
777 # Start with the tasks we were asked to run and mark all dependencies
778 # as active too. If the task is to be 'forced', clear its stamp. Once
779 # all active tasks are marked, prune the ones we don't need.
780
781 logger.verbose("Marking Active Tasks")
782
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600783 def mark_active(tid, depth):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500784 """
785 Mark an item as active along with its depends
786 (calls itself recursively)
787 """
788
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600789 if tid in runq_build:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500790 return
791
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600792 runq_build[tid] = 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500793
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600794 depends = self.runtaskentries[tid].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500795 for depend in depends:
796 mark_active(depend, depth+1)
797
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600798 self.target_tids = []
799 for (mc, target, task, fn) in self.targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500800
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600801 if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500802 continue
803
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600804 if target in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500805 continue
806
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500807 parents = False
808 if task.endswith('-'):
809 parents = True
810 task = task[:-1]
811
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600812 if fn in taskData[mc].failed_fns:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500813 continue
814
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600815 # fn already has mc prefix
816 tid = fn + ":" + task
817 self.target_tids.append(tid)
818 if tid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500819 import difflib
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600820 tasks = []
821 for x in taskData[mc].taskentries:
822 if x.startswith(fn + ":"):
823 tasks.append(taskname_from_tid(x))
824 close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500825 if close_matches:
826 extra = ". Close matches:\n %s" % "\n ".join(close_matches)
827 else:
828 extra = ""
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600829 bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
830
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500831 # For tasks called "XXXX-", ony run their dependencies
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500832 if parents:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600833 for i in self.runtaskentries[tid].depends:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500834 mark_active(i, 1)
835 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600836 mark_active(tid, 1)
837
838 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500839
840 # Step C - Prune all inactive tasks
841 #
842 # Once all active tasks are marked, prune the ones we don't need.
843
Brad Bishop316dfdd2018-06-25 12:45:53 -0400844 delcount = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600845 for tid in list(self.runtaskentries.keys()):
846 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400847 delcount[tid] = self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600848 del self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600849
Brad Bishop316dfdd2018-06-25 12:45:53 -0400850 # Handle --runall
851 if self.cooker.configuration.runall:
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500852 # re-run the mark_active and then drop unused tasks from new list
853 runq_build = {}
Brad Bishop316dfdd2018-06-25 12:45:53 -0400854
855 for task in self.cooker.configuration.runall:
856 runall_tids = set()
857 for tid in list(self.runtaskentries):
858 wanttid = fn_from_tid(tid) + ":do_%s" % task
859 if wanttid in delcount:
860 self.runtaskentries[wanttid] = delcount[wanttid]
861 if wanttid in self.runtaskentries:
862 runall_tids.add(wanttid)
863
864 for tid in list(runall_tids):
865 mark_active(tid,1)
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500866
867 for tid in list(self.runtaskentries.keys()):
868 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400869 delcount[tid] = self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500870 del self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500871
872 if len(self.runtaskentries) == 0:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400873 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
874
875 self.init_progress_reporter.next_stage()
876
877 # Handle runonly
878 if self.cooker.configuration.runonly:
879 # re-run the mark_active and then drop unused tasks from new list
880 runq_build = {}
881
882 for task in self.cooker.configuration.runonly:
883 runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == "do_%s" % task }
884
885 for tid in list(runonly_tids):
886 mark_active(tid,1)
887
888 for tid in list(self.runtaskentries.keys()):
889 if tid not in runq_build:
890 delcount[tid] = self.runtaskentries[tid]
891 del self.runtaskentries[tid]
892
893 if len(self.runtaskentries) == 0:
894 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500895
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500896 #
897 # Step D - Sanity checks and computation
898 #
899
900 # Check to make sure we still have tasks to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600901 if len(self.runtaskentries) == 0:
902 if not taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500903 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
904 else:
905 bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
906
Brad Bishop316dfdd2018-06-25 12:45:53 -0400907 logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500908
909 logger.verbose("Assign Weightings")
910
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600911 self.init_progress_reporter.next_stage()
912
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500913 # Generate a list of reverse dependencies to ease future calculations
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600914 for tid in self.runtaskentries:
915 for dep in self.runtaskentries[tid].depends:
916 self.runtaskentries[dep].revdeps.add(tid)
917
918 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500919
920 # Identify tasks at the end of dependency chains
921 # Error on circular dependency loops (length two)
922 endpoints = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600923 for tid in self.runtaskentries:
924 revdeps = self.runtaskentries[tid].revdeps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500925 if len(revdeps) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600926 endpoints.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500927 for dep in revdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600928 if dep in self.runtaskentries[tid].depends:
929 bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
930
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500931
932 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
933
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600934 self.init_progress_reporter.next_stage()
935
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500936 # Calculate task weights
937 # Check of higher length circular dependencies
938 self.runq_weight = self.calculate_task_weights(endpoints)
939
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600940 self.init_progress_reporter.next_stage()
941
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500942 # Sanity Check - Check for multiple tasks building the same provider
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600943 for mc in self.dataCaches:
944 prov_list = {}
945 seen_fn = []
946 for tid in self.runtaskentries:
947 (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
948 if taskfn in seen_fn:
949 continue
950 if mc != tidmc:
951 continue
952 seen_fn.append(taskfn)
953 for prov in self.dataCaches[mc].fn_provides[taskfn]:
954 if prov not in prov_list:
955 prov_list[prov] = [taskfn]
956 elif taskfn not in prov_list[prov]:
957 prov_list[prov].append(taskfn)
958 for prov in prov_list:
959 if len(prov_list[prov]) < 2:
960 continue
961 if prov in self.multi_provider_whitelist:
962 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500963 seen_pn = []
964 # If two versions of the same PN are being built its fatal, we don't support it.
965 for fn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600966 pn = self.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500967 if pn not in seen_pn:
968 seen_pn.append(pn)
969 else:
970 bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500971 msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
972 #
973 # Construct a list of things which uniquely depend on each provider
974 # since this may help the user figure out which dependency is triggering this warning
975 #
976 msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
977 deplist = {}
978 commondeps = None
979 for provfn in prov_list[prov]:
980 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600981 for tid in self.runtaskentries:
982 fn = fn_from_tid(tid)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500983 if fn != provfn:
984 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600985 for dep in self.runtaskentries[tid].revdeps:
986 fn = fn_from_tid(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500987 if fn == provfn:
988 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600989 deps.add(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500990 if not commondeps:
991 commondeps = set(deps)
992 else:
993 commondeps &= deps
994 deplist[provfn] = deps
995 for provfn in deplist:
996 msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
997 #
998 # Construct a list of provides and runtime providers for each recipe
999 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
1000 #
1001 msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
1002 provide_results = {}
1003 rprovide_results = {}
1004 commonprovs = None
1005 commonrprovs = None
1006 for provfn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001007 provides = set(self.dataCaches[mc].fn_provides[provfn])
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001008 rprovides = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001009 for rprovide in self.dataCaches[mc].rproviders:
1010 if provfn in self.dataCaches[mc].rproviders[rprovide]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001011 rprovides.add(rprovide)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001012 for package in self.dataCaches[mc].packages:
1013 if provfn in self.dataCaches[mc].packages[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001014 rprovides.add(package)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001015 for package in self.dataCaches[mc].packages_dynamic:
1016 if provfn in self.dataCaches[mc].packages_dynamic[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001017 rprovides.add(package)
1018 if not commonprovs:
1019 commonprovs = set(provides)
1020 else:
1021 commonprovs &= provides
1022 provide_results[provfn] = provides
1023 if not commonrprovs:
1024 commonrprovs = set(rprovides)
1025 else:
1026 commonrprovs &= rprovides
1027 rprovide_results[provfn] = rprovides
1028 #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
1029 #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
1030 for provfn in prov_list[prov]:
1031 msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
1032 msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
1033
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001034 if self.warn_multi_bb:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001035 logger.warning(msg)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001036 else:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001037 logger.error(msg)
1038
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001039 self.init_progress_reporter.next_stage()
1040
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001041 # Create a whitelist usable by the stamp checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001042 self.stampfnwhitelist = {}
1043 for mc in self.taskData:
1044 self.stampfnwhitelist[mc] = []
1045 for entry in self.stampwhitelist.split():
1046 if entry not in self.taskData[mc].build_targets:
1047 continue
1048 fn = self.taskData.build_targets[entry][0]
1049 self.stampfnwhitelist[mc].append(fn)
1050
1051 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001052
1053 # Iterate over the task list looking for tasks with a 'setscene' function
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001054 self.runq_setscene_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001055 if not self.cooker.configuration.nosetscene:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001056 for tid in self.runtaskentries:
1057 (mc, fn, taskname, _) = split_tid_mcfn(tid)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001058 setscenetid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001059 if setscenetid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001060 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001061 self.runq_setscene_tids.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001062
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001063 def invalidate_task(tid, error_nostamp):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001064 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1065 taskdep = self.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001066 if fn + ":" + taskname not in taskData[mc].taskentries:
1067 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001068 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1069 if error_nostamp:
1070 bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
1071 else:
1072 bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
1073 else:
1074 logger.verbose("Invalidate task %s, %s", taskname, fn)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001075 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], fn)
1076
1077 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001078
1079 # Invalidate task if force mode active
1080 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001081 for tid in self.target_tids:
1082 invalidate_task(tid, False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001083
1084 # Invalidate task if invalidate mode active
1085 if self.cooker.configuration.invalidate_stamp:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001086 for tid in self.target_tids:
1087 fn = fn_from_tid(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001088 for st in self.cooker.configuration.invalidate_stamp.split(','):
1089 if not st.startswith("do_"):
1090 st = "do_%s" % st
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001091 invalidate_task(fn + ":" + st, True)
1092
1093 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001094
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001095 # Create and print to the logs a virtual/xxxx -> PN (fn) table
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001096 for mc in taskData:
1097 virtmap = taskData[mc].get_providermap(prefix="virtual/")
1098 virtpnmap = {}
1099 for v in virtmap:
1100 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
1101 bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
1102 if hasattr(bb.parse.siggen, "tasks_resolved"):
1103 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1104
1105 self.init_progress_reporter.next_stage()
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001106
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001107 # Iterate over the task list and call into the siggen code
1108 dealtwith = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001109 todeal = set(self.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001110 while len(todeal) > 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001111 for tid in todeal.copy():
1112 if len(self.runtaskentries[tid].depends - dealtwith) == 0:
1113 dealtwith.add(tid)
1114 todeal.remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001115 procdep = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001116 for dep in self.runtaskentries[tid].depends:
1117 procdep.append(fn_from_tid(dep) + "." + taskname_from_tid(dep))
1118 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1119 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(taskfn, taskname, procdep, self.dataCaches[mc])
1120 task = self.runtaskentries[tid].task
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001121
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001122 bb.parse.siggen.writeout_file_checksum_cache()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001123
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001124 #self.dump_data()
1125 return len(self.runtaskentries)
1126
1127 def dump_data(self):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001128 """
1129 Dump some debug information on the internal data structures
1130 """
1131 logger.debug(3, "run_tasks:")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001132 for tid in self.runtaskentries:
1133 logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
1134 self.runtaskentries[tid].weight,
1135 self.runtaskentries[tid].depends,
1136 self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001137
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001138class RunQueueWorker():
1139 def __init__(self, process, pipe):
1140 self.process = process
1141 self.pipe = pipe
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001142
1143class RunQueue:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001144 def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001145
1146 self.cooker = cooker
1147 self.cfgData = cfgData
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001148 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001149
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001150 self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
1151 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
1152 self.setsceneverify = cfgData.getVar("BB_SETSCENE_VERIFY_FUNCTION2") or None
1153 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001154
1155 self.state = runQueuePrepare
1156
1157 # For disk space monitor
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001158 # Invoked at regular time intervals via the bitbake heartbeat event
1159 # while the build is running. We generate a unique name for the handler
1160 # here, just in case that there ever is more than one RunQueue instance,
1161 # start the handler when reaching runQueueSceneRun, and stop it when
1162 # done with the build.
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001163 self.dm = monitordisk.diskMonitor(cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001164 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
1165 self.dm_event_handler_registered = False
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001166 self.rqexe = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001167 self.worker = {}
1168 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001169
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001170 def _start_worker(self, mc, fakeroot = False, rqexec = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001171 logger.debug(1, "Starting bitbake-worker")
1172 magic = "decafbad"
1173 if self.cooker.configuration.profile:
1174 magic = "decafbadbad"
1175 if fakeroot:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001176 magic = magic + "beef"
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001177 mcdata = self.cooker.databuilder.mcdata[mc]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001178 fakerootcmd = mcdata.getVar("FAKEROOTCMD")
1179 fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001180 env = os.environ.copy()
1181 for key, value in (var.split('=') for var in fakerootenv):
1182 env[key] = value
1183 worker = subprocess.Popen([fakerootcmd, "bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
1184 else:
1185 worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
1186 bb.utils.nonblockingfd(worker.stdout)
1187 workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
1188
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001189 runqhash = {}
1190 for tid in self.rqdata.runtaskentries:
1191 runqhash[tid] = self.rqdata.runtaskentries[tid].hash
1192
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001193 workerdata = {
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001194 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1195 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1196 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1197 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001198 "sigdata" : bb.parse.siggen.get_taskdata(),
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001199 "runq_hash" : runqhash,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001200 "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
1201 "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
1202 "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
1203 "logdefaultdomain" : bb.msg.loggerDefaultDomains,
1204 "prhost" : self.cooker.prhost,
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001205 "buildname" : self.cfgData.getVar("BUILDNAME"),
1206 "date" : self.cfgData.getVar("DATE"),
1207 "time" : self.cfgData.getVar("TIME"),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001208 }
1209
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001210 worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001211 worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001212 worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001213 worker.stdin.flush()
1214
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001215 return RunQueueWorker(worker, workerpipe)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001216
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001217 def _teardown_worker(self, worker):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001218 if not worker:
1219 return
1220 logger.debug(1, "Teardown for bitbake-worker")
1221 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001222 worker.process.stdin.write(b"<quit></quit>")
1223 worker.process.stdin.flush()
1224 worker.process.stdin.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001225 except IOError:
1226 pass
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001227 while worker.process.returncode is None:
1228 worker.pipe.read()
1229 worker.process.poll()
1230 while worker.pipe.read():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001231 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001232 worker.pipe.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001233
1234 def start_worker(self):
1235 if self.worker:
1236 self.teardown_workers()
1237 self.teardown = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001238 for mc in self.rqdata.dataCaches:
1239 self.worker[mc] = self._start_worker(mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001240
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001241 def start_fakeworker(self, rqexec, mc):
1242 if not mc in self.fakeworker:
1243 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001244
1245 def teardown_workers(self):
1246 self.teardown = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001247 for mc in self.worker:
1248 self._teardown_worker(self.worker[mc])
1249 self.worker = {}
1250 for mc in self.fakeworker:
1251 self._teardown_worker(self.fakeworker[mc])
1252 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001253
1254 def read_workers(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001255 for mc in self.worker:
1256 self.worker[mc].pipe.read()
1257 for mc in self.fakeworker:
1258 self.fakeworker[mc].pipe.read()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001259
1260 def active_fds(self):
1261 fds = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001262 for mc in self.worker:
1263 fds.append(self.worker[mc].pipe.input)
1264 for mc in self.fakeworker:
1265 fds.append(self.fakeworker[mc].pipe.input)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001266 return fds
1267
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001268 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001269 def get_timestamp(f):
1270 try:
1271 if not os.access(f, os.F_OK):
1272 return None
1273 return os.stat(f)[stat.ST_MTIME]
1274 except:
1275 return None
1276
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001277 (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
1278 if taskname is None:
1279 taskname = tn
1280
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001281 if self.stamppolicy == "perfile":
1282 fulldeptree = False
1283 else:
1284 fulldeptree = True
1285 stampwhitelist = []
1286 if self.stamppolicy == "whitelist":
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001287 stampwhitelist = self.rqdata.stampfnwhitelist[mc]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001288
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001289 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001290
1291 # If the stamp is missing, it's not current
1292 if not os.access(stampfile, os.F_OK):
1293 logger.debug(2, "Stampfile %s not available", stampfile)
1294 return False
1295 # If it's a 'nostamp' task, it's not current
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001296 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001297 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1298 logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
1299 return False
1300
1301 if taskname != "do_setscene" and taskname.endswith("_setscene"):
1302 return True
1303
1304 if cache is None:
1305 cache = {}
1306
1307 iscurrent = True
1308 t1 = get_timestamp(stampfile)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001309 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001310 if iscurrent:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001311 (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
1312 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1313 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001314 t2 = get_timestamp(stampfile2)
1315 t3 = get_timestamp(stampfile3)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001316 if t3 and not t2:
1317 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001318 if t3 and t3 > t2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001319 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001320 if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
1321 if not t2:
1322 logger.debug(2, 'Stampfile %s does not exist', stampfile2)
1323 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001324 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001325 if t1 < t2:
1326 logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
1327 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001328 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001329 if recurse and iscurrent:
1330 if dep in cache:
1331 iscurrent = cache[dep]
1332 if not iscurrent:
1333 logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
1334 else:
1335 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1336 cache[dep] = iscurrent
1337 if recurse:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001338 cache[tid] = iscurrent
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001339 return iscurrent
1340
1341 def _execute_runqueue(self):
1342 """
1343 Run the tasks in a queue prepared by rqdata.prepare()
1344 Upon failure, optionally try to recover the build using any alternate providers
1345 (if the abort on failure configuration option isn't set)
1346 """
1347
1348 retval = True
1349
1350 if self.state is runQueuePrepare:
1351 self.rqexe = RunQueueExecuteDummy(self)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001352 # NOTE: if you add, remove or significantly refactor the stages of this
1353 # process then you should recalculate the weightings here. This is quite
1354 # easy to do - just change the next line temporarily to pass debug=True as
1355 # the last parameter and you'll get a printout of the weightings as well
1356 # as a map to the lines where next_stage() was called. Of course this isn't
1357 # critical, but it helps to keep the progress reporting accurate.
1358 self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
1359 "Initialising tasks",
1360 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001361 if self.rqdata.prepare() == 0:
1362 self.state = runQueueComplete
1363 else:
1364 self.state = runQueueSceneInit
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001365 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001366
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001367 # we are ready to run, emit dependency info to any UI or class which
1368 # needs it
1369 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1370 self.rqdata.init_progress_reporter.next_stage()
1371 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001372
1373 if self.state is runQueueSceneInit:
1374 dump = self.cooker.configuration.dump_signatures
1375 if dump:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001376 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001377 if 'printdiff' in dump:
1378 invalidtasks = self.print_diffscenetasks()
1379 self.dump_signatures(dump)
1380 if 'printdiff' in dump:
1381 self.write_diffscenetasks(invalidtasks)
1382 self.state = runQueueComplete
1383 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001384 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001385 self.start_worker()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001386 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001387 self.rqexe = RunQueueExecuteScenequeue(self)
1388
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001389 if self.state is runQueueSceneRun:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001390 if not self.dm_event_handler_registered:
1391 res = bb.event.register(self.dm_event_handler_name,
1392 lambda x: self.dm.check(self) if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp] else False,
1393 ('bb.event.HeartbeatEvent',))
1394 self.dm_event_handler_registered = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001395 retval = self.rqexe.execute()
1396
1397 if self.state is runQueueRunInit:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001398 if self.cooker.configuration.setsceneonly:
1399 self.state = runQueueComplete
1400 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001401 # Just in case we didn't setscene
1402 self.rqdata.init_progress_reporter.finish()
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001403 logger.info("Executing RunQueue Tasks")
1404 self.rqexe = RunQueueExecuteTasks(self)
1405 self.state = runQueueRunning
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001406
1407 if self.state is runQueueRunning:
1408 retval = self.rqexe.execute()
1409
1410 if self.state is runQueueCleanUp:
1411 retval = self.rqexe.finish()
1412
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001413 build_done = self.state is runQueueComplete or self.state is runQueueFailed
1414
1415 if build_done and self.dm_event_handler_registered:
1416 bb.event.remove(self.dm_event_handler_name, None)
1417 self.dm_event_handler_registered = False
1418
1419 if build_done and self.rqexe:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001420 self.teardown_workers()
1421 if self.rqexe.stats.failed:
1422 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
1423 else:
1424 # Let's avoid the word "failed" if nothing actually did
1425 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
1426
1427 if self.state is runQueueFailed:
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001428 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001429
1430 if self.state is runQueueComplete:
1431 # All done
1432 return False
1433
1434 # Loop
1435 return retval
1436
1437 def execute_runqueue(self):
1438 # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
1439 try:
1440 return self._execute_runqueue()
1441 except bb.runqueue.TaskFailure:
1442 raise
1443 except SystemExit:
1444 raise
1445 except bb.BBHandledException:
1446 try:
1447 self.teardown_workers()
1448 except:
1449 pass
1450 self.state = runQueueComplete
1451 raise
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001452 except Exception as err:
1453 logger.exception("An uncaught exception occurred in runqueue")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001454 try:
1455 self.teardown_workers()
1456 except:
1457 pass
1458 self.state = runQueueComplete
1459 raise
1460
1461 def finish_runqueue(self, now = False):
1462 if not self.rqexe:
1463 self.state = runQueueComplete
1464 return
1465
1466 if now:
1467 self.rqexe.finish_now()
1468 else:
1469 self.rqexe.finish()
1470
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001471 def rq_dump_sigfn(self, fn, options):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001472 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001473 the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
1474 siggen = bb.parse.siggen
1475 dataCaches = self.rqdata.dataCaches
1476 siggen.dump_sigfn(fn, dataCaches, options)
1477
1478 def dump_signatures(self, options):
1479 fns = set()
1480 bb.note("Reparsing files to collect dependency data")
1481
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001482 for tid in self.rqdata.runtaskentries:
1483 fn = fn_from_tid(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001484 fns.add(fn)
1485
1486 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1487 # We cannot use the real multiprocessing.Pool easily due to some local data
1488 # that can't be pickled. This is a cheap multi-process solution.
1489 launched = []
1490 while fns:
1491 if len(launched) < max_process:
1492 p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
1493 p.start()
1494 launched.append(p)
1495 for q in launched:
1496 # The finished processes are joined when calling is_alive()
1497 if not q.is_alive():
1498 launched.remove(q)
1499 for p in launched:
1500 p.join()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001501
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001502 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001503
1504 return
1505
1506 def print_diffscenetasks(self):
1507
1508 valid = []
1509 sq_hash = []
1510 sq_hashfn = []
1511 sq_fn = []
1512 sq_taskname = []
1513 sq_task = []
1514 noexec = []
1515 stamppresent = []
1516 valid_new = set()
1517
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001518 for tid in self.rqdata.runtaskentries:
1519 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1520 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001521
1522 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001523 noexec.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001524 continue
1525
1526 sq_fn.append(fn)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001527 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001528 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001529 sq_taskname.append(taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001530 sq_task.append(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001531 locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001532 try:
1533 call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=True)"
1534 valid = bb.utils.better_eval(call, locs)
1535 # Handle version with no siginfo parameter
1536 except TypeError:
1537 call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
1538 valid = bb.utils.better_eval(call, locs)
1539 for v in valid:
1540 valid_new.add(sq_task[v])
1541
1542 # Tasks which are both setscene and noexec never care about dependencies
1543 # We therefore find tasks which are setscene and noexec and mark their
1544 # unique dependencies as valid.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001545 for tid in noexec:
1546 if tid not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001547 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001548 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001549 hasnoexecparents = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001550 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1551 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001552 continue
1553 hasnoexecparents = False
1554 break
1555 if hasnoexecparents:
1556 valid_new.add(dep)
1557
1558 invalidtasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001559 for tid in self.rqdata.runtaskentries:
1560 if tid not in valid_new and tid not in noexec:
1561 invalidtasks.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001562
1563 found = set()
1564 processed = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001565 for tid in invalidtasks:
1566 toprocess = set([tid])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001567 while toprocess:
1568 next = set()
1569 for t in toprocess:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001570 for dep in self.rqdata.runtaskentries[t].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001571 if dep in invalidtasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001572 found.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001573 if dep not in processed:
1574 processed.add(dep)
1575 next.add(dep)
1576 toprocess = next
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001577 if tid in found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001578 toprocess = set()
1579
1580 tasklist = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001581 for tid in invalidtasks.difference(found):
1582 tasklist.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001583
1584 if tasklist:
1585 bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
1586
1587 return invalidtasks.difference(found)
1588
1589 def write_diffscenetasks(self, invalidtasks):
1590
1591 # Define recursion callback
1592 def recursecb(key, hash1, hash2):
1593 hashes = [hash1, hash2]
1594 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1595
1596 recout = []
1597 if len(hashfiles) == 2:
1598 out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
1599 recout.extend(list(' ' + l for l in out2))
1600 else:
1601 recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
1602
1603 return recout
1604
1605
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001606 for tid in invalidtasks:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001607 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1608 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001609 h = self.rqdata.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001610 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
1611 match = None
1612 for m in matches:
1613 if h in m:
1614 match = m
1615 if match is None:
1616 bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001617 matches = {k : v for k, v in iter(matches.items()) if h not in k}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001618 if matches:
1619 latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
1620 prevh = __find_md5__.search(latestmatch).group(0)
1621 output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
1622 bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
1623
1624class RunQueueExecute:
1625
1626 def __init__(self, rq):
1627 self.rq = rq
1628 self.cooker = rq.cooker
1629 self.cfgData = rq.cfgData
1630 self.rqdata = rq.rqdata
1631
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001632 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
1633 self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001634
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001635 self.runq_buildable = set()
1636 self.runq_running = set()
1637 self.runq_complete = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001638
1639 self.build_stamps = {}
1640 self.build_stamps2 = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001641 self.failed_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001642
1643 self.stampcache = {}
1644
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001645 for mc in rq.worker:
1646 rq.worker[mc].pipe.setrunqueueexec(self)
1647 for mc in rq.fakeworker:
1648 rq.fakeworker[mc].pipe.setrunqueueexec(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001649
1650 if self.number_tasks <= 0:
1651 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1652
1653 def runqueue_process_waitpid(self, task, status):
1654
1655 # self.build_stamps[pid] may not exist when use shared work directory.
1656 if task in self.build_stamps:
1657 self.build_stamps2.remove(self.build_stamps[task])
1658 del self.build_stamps[task]
1659
1660 if status != 0:
1661 self.task_fail(task, status)
1662 else:
1663 self.task_complete(task)
1664 return True
1665
1666 def finish_now(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001667 for mc in self.rq.worker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001668 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001669 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1670 self.rq.worker[mc].process.stdin.flush()
1671 except IOError:
1672 # worker must have died?
1673 pass
1674 for mc in self.rq.fakeworker:
1675 try:
1676 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1677 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001678 except IOError:
1679 # worker must have died?
1680 pass
1681
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001682 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001683 self.rq.state = runQueueFailed
1684 return
1685
1686 self.rq.state = runQueueComplete
1687 return
1688
1689 def finish(self):
1690 self.rq.state = runQueueCleanUp
1691
1692 if self.stats.active > 0:
1693 bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData)
1694 self.rq.read_workers()
1695 return self.rq.active_fds()
1696
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001697 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001698 self.rq.state = runQueueFailed
1699 return True
1700
1701 self.rq.state = runQueueComplete
1702 return True
1703
1704 def check_dependencies(self, task, taskdeps, setscene = False):
1705 if not self.rq.depvalidate:
1706 return False
1707
1708 taskdata = {}
1709 taskdeps.add(task)
1710 for dep in taskdeps:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001711 (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
1712 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001713 taskdata[dep] = [pn, taskname, fn]
1714 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001715 locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001716 valid = bb.utils.better_eval(call, locs)
1717 return valid
1718
1719class RunQueueExecuteDummy(RunQueueExecute):
1720 def __init__(self, rq):
1721 self.rq = rq
1722 self.stats = RunQueueStats(0)
1723
1724 def finish(self):
1725 self.rq.state = runQueueComplete
1726 return
1727
1728class RunQueueExecuteTasks(RunQueueExecute):
1729 def __init__(self, rq):
1730 RunQueueExecute.__init__(self, rq)
1731
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001732 self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001733
1734 self.stampcache = {}
1735
1736 initial_covered = self.rq.scenequeue_covered.copy()
1737
1738 # Mark initial buildable tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001739 for tid in self.rqdata.runtaskentries:
1740 if len(self.rqdata.runtaskentries[tid].depends) == 0:
1741 self.runq_buildable.add(tid)
1742 if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
1743 self.rq.scenequeue_covered.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001744
1745 found = True
1746 while found:
1747 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001748 for tid in self.rqdata.runtaskentries:
1749 if tid in self.rq.scenequeue_covered:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001750 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001751 logger.debug(1, 'Considering %s: %s' % (tid, str(self.rqdata.runtaskentries[tid].revdeps)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001752
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001753 if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
1754 if tid in self.rq.scenequeue_notcovered:
1755 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001756 found = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001757 self.rq.scenequeue_covered.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001758
1759 logger.debug(1, 'Skip list (pre setsceneverify) %s', sorted(self.rq.scenequeue_covered))
1760
1761 # Allow the metadata to elect for setscene tasks to run anyway
1762 covered_remove = set()
1763 if self.rq.setsceneverify:
1764 invalidtasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001765 tasknames = {}
1766 fns = {}
1767 for tid in self.rqdata.runtaskentries:
1768 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1769 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
1770 fns[tid] = taskfn
1771 tasknames[tid] = taskname
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001772 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1773 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001774 if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
1775 logger.debug(2, 'Setscene stamp current for task %s', tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001776 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001777 if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
1778 logger.debug(2, 'Normal stamp current for task %s', tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001779 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001780 invalidtasks.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001781
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001782 call = self.rq.setsceneverify + "(covered, tasknames, fns, d, invalidtasks=invalidtasks)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001783 locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : tasknames, "fns" : fns, "d" : self.cooker.data, "invalidtasks" : invalidtasks }
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001784 covered_remove = bb.utils.better_eval(call, locs)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001785
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001786 def removecoveredtask(tid):
1787 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1788 taskname = taskname + '_setscene'
1789 bb.build.del_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
1790 self.rq.scenequeue_covered.remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001791
1792 toremove = covered_remove
1793 for task in toremove:
1794 logger.debug(1, 'Not skipping task %s due to setsceneverify', task)
1795 while toremove:
1796 covered_remove = []
1797 for task in toremove:
1798 removecoveredtask(task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001799 for deptask in self.rqdata.runtaskentries[task].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001800 if deptask not in self.rq.scenequeue_covered:
1801 continue
1802 if deptask in toremove or deptask in covered_remove or deptask in initial_covered:
1803 continue
1804 logger.debug(1, 'Task %s depends on task %s so not skipping' % (task, deptask))
1805 covered_remove.append(deptask)
1806 toremove = covered_remove
1807
1808 logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered)
1809
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001810
1811 for mc in self.rqdata.dataCaches:
1812 target_pairs = []
1813 for tid in self.rqdata.target_tids:
1814 (tidmc, fn, taskname, _) = split_tid_mcfn(tid)
1815 if tidmc == mc:
1816 target_pairs.append((fn, taskname))
1817
1818 event.fire(bb.event.StampUpdate(target_pairs, self.rqdata.dataCaches[mc].stamp), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001819
1820 schedulers = self.get_schedulers()
1821 for scheduler in schedulers:
1822 if self.scheduler == scheduler.name:
1823 self.sched = scheduler(self, self.rqdata)
1824 logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
1825 break
1826 else:
1827 bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
1828 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1829
1830 def get_schedulers(self):
1831 schedulers = set(obj for obj in globals().values()
1832 if type(obj) is type and
1833 issubclass(obj, RunQueueScheduler))
1834
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001835 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001836 if user_schedulers:
1837 for sched in user_schedulers.split():
1838 if not "." in sched:
1839 bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
1840 continue
1841
1842 modname, name = sched.rsplit(".", 1)
1843 try:
1844 module = __import__(modname, fromlist=(name,))
1845 except ImportError as exc:
1846 logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
1847 raise SystemExit(1)
1848 else:
1849 schedulers.add(getattr(module, name))
1850 return schedulers
1851
1852 def setbuildable(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001853 self.runq_buildable.add(task)
Brad Bishop316dfdd2018-06-25 12:45:53 -04001854 self.sched.newbuildable(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001855
1856 def task_completeoutright(self, task):
1857 """
1858 Mark a task as completed
1859 Look at the reverse dependencies and mark any task with
1860 completed dependencies as buildable
1861 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001862 self.runq_complete.add(task)
1863 for revdep in self.rqdata.runtaskentries[task].revdeps:
1864 if revdep in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001865 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001866 if revdep in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001867 continue
1868 alldeps = 1
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001869 for dep in self.rqdata.runtaskentries[revdep].depends:
1870 if dep not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001871 alldeps = 0
1872 if alldeps == 1:
1873 self.setbuildable(revdep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001874 fn = fn_from_tid(revdep)
1875 taskname = taskname_from_tid(revdep)
1876 logger.debug(1, "Marking task %s as buildable", revdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001877
1878 def task_complete(self, task):
1879 self.stats.taskCompleted()
1880 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1881 self.task_completeoutright(task)
1882
1883 def task_fail(self, task, exitcode):
1884 """
1885 Called when a task has failed
1886 Updates the state engine with the failure
1887 """
1888 self.stats.taskFailed()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001889 self.failed_tids.append(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001890 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001891 if self.rqdata.taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001892 self.rq.state = runQueueCleanUp
1893
1894 def task_skip(self, task, reason):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001895 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001896 self.setbuildable(task)
1897 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
1898 self.task_completeoutright(task)
1899 self.stats.taskCompleted()
1900 self.stats.taskSkipped()
1901
1902 def execute(self):
1903 """
1904 Run the tasks in a queue prepared by rqdata.prepare()
1905 """
1906
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001907 if self.rqdata.setscenewhitelist is not None and not self.rqdata.setscenewhitelist_checked:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001908 self.rqdata.setscenewhitelist_checked = True
1909
1910 # Check tasks that are going to run against the whitelist
1911 def check_norun_task(tid, showerror=False):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001912 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001913 # Ignore covered tasks
1914 if tid in self.rq.scenequeue_covered:
1915 return False
1916 # Ignore stamped tasks
1917 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
1918 return False
1919 # Ignore noexec tasks
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001920 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001921 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1922 return False
1923
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001924 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001925 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
1926 if showerror:
1927 if tid in self.rqdata.runq_setscene_tids:
1928 logger.error('Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname))
1929 else:
1930 logger.error('Task %s.%s attempted to execute unexpectedly' % (pn, taskname))
1931 return True
1932 return False
1933 # Look to see if any tasks that we think shouldn't run are going to
1934 unexpected = False
1935 for tid in self.rqdata.runtaskentries:
1936 if check_norun_task(tid):
1937 unexpected = True
1938 break
1939 if unexpected:
1940 # Run through the tasks in the rough order they'd have executed and print errors
1941 # (since the order can be useful - usually missing sstate for the last few tasks
1942 # is the cause of the problem)
1943 task = self.sched.next()
1944 while task is not None:
1945 check_norun_task(task, showerror=True)
1946 self.task_skip(task, 'Setscene enforcement check')
1947 task = self.sched.next()
1948
1949 self.rq.state = runQueueCleanUp
1950 return True
1951
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001952 self.rq.read_workers()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001953
1954 if self.stats.total == 0:
1955 # nothing to do
1956 self.rq.state = runQueueCleanUp
1957
1958 task = self.sched.next()
1959 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001960 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001961
1962 if task in self.rq.scenequeue_covered:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001963 logger.debug(2, "Setscene covered task %s", task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001964 self.task_skip(task, "covered")
1965 return True
1966
1967 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001968 logger.debug(2, "Stamp current task %s", task)
1969
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001970 self.task_skip(task, "existing")
1971 return True
1972
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001973 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001974 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1975 startevent = runQueueTaskStarted(task, self.stats, self.rq,
1976 noexec=True)
1977 bb.event.fire(startevent, self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001978 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001979 self.stats.taskActive()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001980 if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001981 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001982 self.task_complete(task)
1983 return True
1984 else:
1985 startevent = runQueueTaskStarted(task, self.stats, self.rq)
1986 bb.event.fire(startevent, self.cfgData)
1987
1988 taskdepdata = self.build_taskdepdata(task)
1989
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001990 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001991 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001992 if not mc in self.rq.fakeworker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001993 try:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001994 self.rq.start_fakeworker(self, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001995 except OSError as exc:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001996 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001997 self.rq.state = runQueueFailed
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001998 self.stats.taskFailed()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001999 return True
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002000 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002001 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002002 else:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002003 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002004 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002005
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002006 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2007 self.build_stamps2.append(self.build_stamps[task])
2008 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002009 self.stats.taskActive()
2010 if self.stats.active < self.number_tasks:
2011 return True
2012
2013 if self.stats.active > 0:
2014 self.rq.read_workers()
2015 return self.rq.active_fds()
2016
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002017 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002018 self.rq.state = runQueueFailed
2019 return True
2020
2021 # Sanity Checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002022 for task in self.rqdata.runtaskentries:
2023 if task not in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002024 logger.error("Task %s never buildable!", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002025 if task not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002026 logger.error("Task %s never ran!", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002027 if task not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002028 logger.error("Task %s never completed!", task)
2029 self.rq.state = runQueueComplete
2030
2031 return True
2032
2033 def build_taskdepdata(self, task):
2034 taskdepdata = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002035 next = self.rqdata.runtaskentries[task].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002036 next.add(task)
2037 while next:
2038 additional = []
2039 for revdep in next:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002040 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2041 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2042 deps = self.rqdata.runtaskentries[revdep].depends
2043 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002044 taskhash = self.rqdata.runtaskentries[revdep].hash
2045 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002046 for revdep2 in deps:
2047 if revdep2 not in taskdepdata:
2048 additional.append(revdep2)
2049 next = additional
2050
2051 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2052 return taskdepdata
2053
2054class RunQueueExecuteScenequeue(RunQueueExecute):
2055 def __init__(self, rq):
2056 RunQueueExecute.__init__(self, rq)
2057
2058 self.scenequeue_covered = set()
2059 self.scenequeue_notcovered = set()
2060 self.scenequeue_notneeded = set()
2061
2062 # If we don't have any setscene functions, skip this step
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002063 if len(self.rqdata.runq_setscene_tids) == 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002064 rq.scenequeue_covered = set()
2065 rq.state = runQueueRunInit
2066 return
2067
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002068 self.stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002069
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002070 sq_revdeps = {}
2071 sq_revdeps_new = {}
2072 sq_revdeps_squash = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002073 self.sq_harddeps = {}
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002074 self.stamps = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002075
2076 # We need to construct a dependency graph for the setscene functions. Intermediate
2077 # dependencies between the setscene tasks only complicate the code. This code
2078 # therefore aims to collapse the huge runqueue dependency tree into a smaller one
2079 # only containing the setscene functions.
2080
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002081 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002082
2083 # First process the chains up to the first setscene task.
2084 endpoints = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002085 for tid in self.rqdata.runtaskentries:
2086 sq_revdeps[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
2087 sq_revdeps_new[tid] = set()
2088 if (len(sq_revdeps[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
2089 #bb.warn("Added endpoint %s" % (tid))
2090 endpoints[tid] = set()
2091
2092 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002093
2094 # Secondly process the chains between setscene tasks.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002095 for tid in self.rqdata.runq_setscene_tids:
2096 #bb.warn("Added endpoint 2 %s" % (tid))
2097 for dep in self.rqdata.runtaskentries[tid].depends:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002098 if tid in sq_revdeps[dep]:
2099 sq_revdeps[dep].remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002100 if dep not in endpoints:
2101 endpoints[dep] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002102 #bb.warn(" Added endpoint 3 %s" % (dep))
2103 endpoints[dep].add(tid)
2104
2105 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002106
2107 def process_endpoints(endpoints):
2108 newendpoints = {}
2109 for point, task in endpoints.items():
2110 tasks = set()
2111 if task:
2112 tasks |= task
2113 if sq_revdeps_new[point]:
2114 tasks |= sq_revdeps_new[point]
2115 sq_revdeps_new[point] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002116 if point in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002117 sq_revdeps_new[point] = tasks
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05002118 tasks = set()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002119 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002120 for dep in self.rqdata.runtaskentries[point].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002121 if point in sq_revdeps[dep]:
2122 sq_revdeps[dep].remove(point)
2123 if tasks:
2124 sq_revdeps_new[dep] |= tasks
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002125 if len(sq_revdeps[dep]) == 0 and dep not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002126 newendpoints[dep] = task
2127 if len(newendpoints) != 0:
2128 process_endpoints(newendpoints)
2129
2130 process_endpoints(endpoints)
2131
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002132 self.rqdata.init_progress_reporter.next_stage()
2133
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002134 # Build a list of setscene tasks which are "unskippable"
2135 # These are direct endpoints referenced by the build
2136 endpoints2 = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002137 sq_revdeps2 = {}
2138 sq_revdeps_new2 = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002139 def process_endpoints2(endpoints):
2140 newendpoints = {}
2141 for point, task in endpoints.items():
2142 tasks = set([point])
2143 if task:
2144 tasks |= task
2145 if sq_revdeps_new2[point]:
2146 tasks |= sq_revdeps_new2[point]
2147 sq_revdeps_new2[point] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002148 if point in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002149 sq_revdeps_new2[point] = tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002150 for dep in self.rqdata.runtaskentries[point].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002151 if point in sq_revdeps2[dep]:
2152 sq_revdeps2[dep].remove(point)
2153 if tasks:
2154 sq_revdeps_new2[dep] |= tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002155 if (len(sq_revdeps2[dep]) == 0 or len(sq_revdeps_new2[dep]) != 0) and dep not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002156 newendpoints[dep] = tasks
2157 if len(newendpoints) != 0:
2158 process_endpoints2(newendpoints)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002159 for tid in self.rqdata.runtaskentries:
2160 sq_revdeps2[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
2161 sq_revdeps_new2[tid] = set()
2162 if (len(sq_revdeps2[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
2163 endpoints2[tid] = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002164 process_endpoints2(endpoints2)
2165 self.unskippable = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002166 for tid in self.rqdata.runq_setscene_tids:
2167 if sq_revdeps_new2[tid]:
2168 self.unskippable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002169
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002170 self.rqdata.init_progress_reporter.next_stage(len(self.rqdata.runtaskentries))
2171
2172 for taskcounter, tid in enumerate(self.rqdata.runtaskentries):
2173 if tid in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002174 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002175 for dep in sq_revdeps_new[tid]:
2176 deps.add(dep)
2177 sq_revdeps_squash[tid] = deps
2178 elif len(sq_revdeps_new[tid]) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002179 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002180 self.rqdata.init_progress_reporter.update(taskcounter)
2181
2182 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002183
2184 # Resolve setscene inter-task dependencies
2185 # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
2186 # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002187 for tid in self.rqdata.runq_setscene_tids:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002188 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2189 realtid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002190 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002191 self.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002192 for (depname, idependtask) in idepends:
2193
2194 if depname not in self.rqdata.taskData[mc].build_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002195 continue
2196
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002197 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2198 if depfn is None:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002199 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002200 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2201 if deptid not in self.rqdata.runtaskentries:
2202 bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002203
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002204 if not deptid in self.sq_harddeps:
2205 self.sq_harddeps[deptid] = set()
2206 self.sq_harddeps[deptid].add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002207
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002208 sq_revdeps_squash[tid].add(deptid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002209 # Have to zero this to avoid circular dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002210 sq_revdeps_squash[deptid] = set()
2211
2212 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002213
2214 for task in self.sq_harddeps:
2215 for dep in self.sq_harddeps[task]:
2216 sq_revdeps_squash[dep].add(task)
2217
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002218 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002219
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002220 #for tid in sq_revdeps_squash:
2221 # for dep in sq_revdeps_squash[tid]:
2222 # data = data + "\n %s" % dep
2223 # bb.warn("Task %s_setscene: is %s " % (tid, data
2224
2225 self.sq_deps = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002226 self.sq_revdeps = sq_revdeps_squash
2227 self.sq_revdeps2 = copy.deepcopy(self.sq_revdeps)
2228
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002229 for tid in self.sq_revdeps:
2230 self.sq_deps[tid] = set()
2231 for tid in self.sq_revdeps:
2232 for dep in self.sq_revdeps[tid]:
2233 self.sq_deps[dep].add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002234
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002235 self.rqdata.init_progress_reporter.next_stage()
2236
2237 for tid in self.sq_revdeps:
2238 if len(self.sq_revdeps[tid]) == 0:
2239 self.runq_buildable.add(tid)
2240
2241 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002242
2243 self.outrightfail = []
2244 if self.rq.hashvalidate:
2245 sq_hash = []
2246 sq_hashfn = []
2247 sq_fn = []
2248 sq_taskname = []
2249 sq_task = []
2250 noexec = []
2251 stamppresent = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002252 for tid in self.sq_revdeps:
2253 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2254
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002255 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002256
2257 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002258 noexec.append(tid)
2259 self.task_skip(tid)
2260 bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002261 continue
2262
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002263 if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
2264 logger.debug(2, 'Setscene stamp current for task %s', tid)
2265 stamppresent.append(tid)
2266 self.task_skip(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002267 continue
2268
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002269 if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
2270 logger.debug(2, 'Normal stamp current for task %s', tid)
2271 stamppresent.append(tid)
2272 self.task_skip(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002273 continue
2274
2275 sq_fn.append(fn)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002276 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002277 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002278 sq_taskname.append(taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002279 sq_task.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002280 call = self.rq.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002281 locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002282 valid = bb.utils.better_eval(call, locs)
2283
2284 valid_new = stamppresent
2285 for v in valid:
2286 valid_new.append(sq_task[v])
2287
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002288 for tid in self.sq_revdeps:
2289 if tid not in valid_new and tid not in noexec:
2290 logger.debug(2, 'No package found, so skipping setscene task %s', tid)
2291 self.outrightfail.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002292
2293 logger.info('Executing SetScene Tasks')
2294
2295 self.rq.state = runQueueSceneRun
2296
2297 def scenequeue_updatecounters(self, task, fail = False):
2298 for dep in self.sq_deps[task]:
2299 if fail and task in self.sq_harddeps and dep in self.sq_harddeps[task]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002300 logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002301 self.scenequeue_updatecounters(dep, fail)
2302 continue
2303 if task not in self.sq_revdeps2[dep]:
2304 # May already have been removed by the fail case above
2305 continue
2306 self.sq_revdeps2[dep].remove(task)
2307 if len(self.sq_revdeps2[dep]) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002308 self.runq_buildable.add(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002309
2310 def task_completeoutright(self, task):
2311 """
2312 Mark a task as completed
2313 Look at the reverse dependencies and mark any task with
2314 completed dependencies as buildable
2315 """
2316
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002317 logger.debug(1, 'Found task %s which could be accelerated', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002318 self.scenequeue_covered.add(task)
2319 self.scenequeue_updatecounters(task)
2320
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002321 def check_taskfail(self, task):
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002322 if self.rqdata.setscenewhitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002323 realtask = task.split('_setscene')[0]
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002324 (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
2325 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002326 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2327 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2328 self.rq.state = runQueueCleanUp
2329
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002330 def task_complete(self, task):
2331 self.stats.taskCompleted()
2332 bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
2333 self.task_completeoutright(task)
2334
2335 def task_fail(self, task, result):
2336 self.stats.taskFailed()
2337 bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
2338 self.scenequeue_notcovered.add(task)
2339 self.scenequeue_updatecounters(task, True)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002340 self.check_taskfail(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002341
2342 def task_failoutright(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002343 self.runq_running.add(task)
2344 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002345 self.stats.taskCompleted()
2346 self.stats.taskSkipped()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002347 self.scenequeue_notcovered.add(task)
2348 self.scenequeue_updatecounters(task, True)
2349
2350 def task_skip(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002351 self.runq_running.add(task)
2352 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002353 self.task_completeoutright(task)
2354 self.stats.taskCompleted()
2355 self.stats.taskSkipped()
2356
2357 def execute(self):
2358 """
2359 Run the tasks in a queue prepared by prepare_runqueue
2360 """
2361
2362 self.rq.read_workers()
2363
2364 task = None
2365 if self.stats.active < self.number_tasks:
2366 # Find the next setscene to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002367 for nexttask in self.rqdata.runq_setscene_tids:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002368 if nexttask in self.runq_buildable and nexttask not in self.runq_running and self.stamps[nexttask] not in self.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002369 if nexttask in self.unskippable:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002370 logger.debug(2, "Setscene task %s is unskippable" % nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002371 if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002372 fn = fn_from_tid(nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002373 foundtarget = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002374
2375 if nexttask in self.rqdata.target_tids:
2376 foundtarget = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002377 if not foundtarget:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002378 logger.debug(2, "Skipping setscene for task %s" % nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002379 self.task_skip(nexttask)
2380 self.scenequeue_notneeded.add(nexttask)
2381 return True
2382 if nexttask in self.outrightfail:
2383 self.task_failoutright(nexttask)
2384 return True
2385 task = nexttask
2386 break
2387 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002388 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
2389 taskname = taskname + "_setscene"
2390 if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
2391 logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002392 self.task_failoutright(task)
2393 return True
2394
2395 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002396 if task in self.rqdata.target_tids:
2397 self.task_failoutright(task)
2398 return True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002399
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002400 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
2401 logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002402 self.task_skip(task)
2403 return True
2404
2405 startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
2406 bb.event.fire(startevent, self.cfgData)
2407
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002408 taskdepdata = self.build_taskdepdata(task)
2409
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002410 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2411 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002412 if not mc in self.rq.fakeworker:
2413 self.rq.start_fakeworker(self, mc)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002414 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002415 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002416 else:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002417 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002418 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002419
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002420 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2421 self.build_stamps2.append(self.build_stamps[task])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002422 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002423 self.stats.taskActive()
2424 if self.stats.active < self.number_tasks:
2425 return True
2426
2427 if self.stats.active > 0:
2428 self.rq.read_workers()
2429 return self.rq.active_fds()
2430
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002431 #for tid in self.sq_revdeps:
2432 # if tid not in self.runq_running:
2433 # buildable = tid in self.runq_buildable
2434 # revdeps = self.sq_revdeps[tid]
2435 # bb.warn("Found we didn't run %s %s %s" % (tid, buildable, str(revdeps)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002436
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002437 self.rq.scenequeue_covered = self.scenequeue_covered
2438 self.rq.scenequeue_notcovered = self.scenequeue_notcovered
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002439
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002440 logger.debug(1, 'We can skip tasks %s', "\n".join(sorted(self.rq.scenequeue_covered)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002441
2442 self.rq.state = runQueueRunInit
2443
2444 completeevent = sceneQueueComplete(self.stats, self.rq)
2445 bb.event.fire(completeevent, self.cfgData)
2446
2447 return True
2448
2449 def runqueue_process_waitpid(self, task, status):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002450 RunQueueExecute.runqueue_process_waitpid(self, task, status)
2451
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002452
2453 def build_taskdepdata(self, task):
2454 def getsetscenedeps(tid):
2455 deps = set()
2456 (mc, fn, taskname, _) = split_tid_mcfn(tid)
2457 realtid = tid + "_setscene"
2458 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
2459 for (depname, idependtask) in idepends:
2460 if depname not in self.rqdata.taskData[mc].build_targets:
2461 continue
2462
2463 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2464 if depfn is None:
2465 continue
2466 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2467 deps.add(deptid)
2468 return deps
2469
2470 taskdepdata = {}
2471 next = getsetscenedeps(task)
2472 next.add(task)
2473 while next:
2474 additional = []
2475 for revdep in next:
2476 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2477 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2478 deps = getsetscenedeps(revdep)
2479 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2480 taskhash = self.rqdata.runtaskentries[revdep].hash
2481 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash]
2482 for revdep2 in deps:
2483 if revdep2 not in taskdepdata:
2484 additional.append(revdep2)
2485 next = additional
2486
2487 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2488 return taskdepdata
2489
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002490class TaskFailure(Exception):
2491 """
2492 Exception raised when a task in a runqueue fails
2493 """
2494 def __init__(self, x):
2495 self.args = x
2496
2497
2498class runQueueExitWait(bb.event.Event):
2499 """
2500 Event when waiting for task processes to exit
2501 """
2502
2503 def __init__(self, remain):
2504 self.remain = remain
2505 self.message = "Waiting for %s active tasks to finish" % remain
2506 bb.event.Event.__init__(self)
2507
2508class runQueueEvent(bb.event.Event):
2509 """
2510 Base runQueue event class
2511 """
2512 def __init__(self, task, stats, rq):
2513 self.taskid = task
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002514 self.taskstring = task
2515 self.taskname = taskname_from_tid(task)
2516 self.taskfile = fn_from_tid(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002517 self.taskhash = rq.rqdata.get_task_hash(task)
2518 self.stats = stats.copy()
2519 bb.event.Event.__init__(self)
2520
2521class sceneQueueEvent(runQueueEvent):
2522 """
2523 Base sceneQueue event class
2524 """
2525 def __init__(self, task, stats, rq, noexec=False):
2526 runQueueEvent.__init__(self, task, stats, rq)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002527 self.taskstring = task + "_setscene"
2528 self.taskname = taskname_from_tid(task) + "_setscene"
2529 self.taskfile = fn_from_tid(task)
2530 self.taskhash = rq.rqdata.get_task_hash(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002531
2532class runQueueTaskStarted(runQueueEvent):
2533 """
2534 Event notifying a task was started
2535 """
2536 def __init__(self, task, stats, rq, noexec=False):
2537 runQueueEvent.__init__(self, task, stats, rq)
2538 self.noexec = noexec
2539
2540class sceneQueueTaskStarted(sceneQueueEvent):
2541 """
2542 Event notifying a setscene task was started
2543 """
2544 def __init__(self, task, stats, rq, noexec=False):
2545 sceneQueueEvent.__init__(self, task, stats, rq)
2546 self.noexec = noexec
2547
2548class runQueueTaskFailed(runQueueEvent):
2549 """
2550 Event notifying a task failed
2551 """
2552 def __init__(self, task, stats, exitcode, rq):
2553 runQueueEvent.__init__(self, task, stats, rq)
2554 self.exitcode = exitcode
2555
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002556 def __str__(self):
2557 return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
2558
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002559class sceneQueueTaskFailed(sceneQueueEvent):
2560 """
2561 Event notifying a setscene task failed
2562 """
2563 def __init__(self, task, stats, exitcode, rq):
2564 sceneQueueEvent.__init__(self, task, stats, rq)
2565 self.exitcode = exitcode
2566
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002567 def __str__(self):
2568 return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
2569
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002570class sceneQueueComplete(sceneQueueEvent):
2571 """
2572 Event when all the sceneQueue tasks are complete
2573 """
2574 def __init__(self, stats, rq):
2575 self.stats = stats.copy()
2576 bb.event.Event.__init__(self)
2577
2578class runQueueTaskCompleted(runQueueEvent):
2579 """
2580 Event notifying a task completed
2581 """
2582
2583class sceneQueueTaskCompleted(sceneQueueEvent):
2584 """
2585 Event notifying a setscene task completed
2586 """
2587
2588class runQueueTaskSkipped(runQueueEvent):
2589 """
2590 Event notifying a task was skipped
2591 """
2592 def __init__(self, task, stats, rq, reason):
2593 runQueueEvent.__init__(self, task, stats, rq)
2594 self.reason = reason
2595
2596class runQueuePipe():
2597 """
2598 Abstraction for a pipe between a worker thread and the server
2599 """
2600 def __init__(self, pipein, pipeout, d, rq, rqexec):
2601 self.input = pipein
2602 if pipeout:
2603 pipeout.close()
2604 bb.utils.nonblockingfd(self.input)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002605 self.queue = b""
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002606 self.d = d
2607 self.rq = rq
2608 self.rqexec = rqexec
2609
2610 def setrunqueueexec(self, rqexec):
2611 self.rqexec = rqexec
2612
2613 def read(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002614 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
2615 for worker in workers.values():
2616 worker.process.poll()
2617 if worker.process.returncode is not None and not self.rq.teardown:
2618 bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
2619 self.rq.finish_runqueue(True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002620
2621 start = len(self.queue)
2622 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002623 self.queue = self.queue + (self.input.read(102400) or b"")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002624 except (OSError, IOError) as e:
2625 if e.errno != errno.EAGAIN:
2626 raise
2627 end = len(self.queue)
2628 found = True
2629 while found and len(self.queue):
2630 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002631 index = self.queue.find(b"</event>")
2632 while index != -1 and self.queue.startswith(b"<event>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002633 try:
2634 event = pickle.loads(self.queue[7:index])
2635 except ValueError as e:
2636 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
2637 bb.event.fire_from_worker(event, self.d)
2638 found = True
2639 self.queue = self.queue[index+8:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002640 index = self.queue.find(b"</event>")
2641 index = self.queue.find(b"</exitcode>")
2642 while index != -1 and self.queue.startswith(b"<exitcode>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002643 try:
2644 task, status = pickle.loads(self.queue[10:index])
2645 except ValueError as e:
2646 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
2647 self.rqexec.runqueue_process_waitpid(task, status)
2648 found = True
2649 self.queue = self.queue[index+11:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002650 index = self.queue.find(b"</exitcode>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002651 return (end > start)
2652
2653 def close(self):
2654 while self.read():
2655 continue
2656 if len(self.queue) > 0:
2657 print("Warning, worker left partial message: %s" % self.queue)
2658 self.input.close()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002659
2660def get_setscene_enforce_whitelist(d):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002661 if d.getVar('BB_SETSCENE_ENFORCE') != '1':
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002662 return None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002663 whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002664 outlist = []
2665 for item in whitelist[:]:
2666 if item.startswith('%:'):
2667 for target in sys.argv[1:]:
2668 if not target.startswith('-'):
2669 outlist.append(target.split(':')[0] + ':' + item.split(':')[1])
2670 else:
2671 outlist.append(item)
2672 return outlist
2673
2674def check_setscene_enforce_whitelist(pn, taskname, whitelist):
2675 import fnmatch
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002676 if whitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002677 item = '%s:%s' % (pn, taskname)
2678 for whitelist_item in whitelist:
2679 if fnmatch.fnmatch(item, whitelist_item):
2680 return True
2681 return False
2682 return True