blob: ae12c250499e287e693692a73c78e0b3d0d2850f [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001#!/usr/bin/env python
2# ex:ts=4:sw=4:sts=4:et
3# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
4"""
5BitBake 'RunQueue' implementation
6
7Handles preparation and execution of a queue of tasks
8"""
9
10# Copyright (C) 2006-2007 Richard Purdie
11#
12# This program is free software; you can redistribute it and/or modify
13# it under the terms of the GNU General Public License version 2 as
14# published by the Free Software Foundation.
15#
16# This program is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19# GNU General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License along
22# with this program; if not, write to the Free Software Foundation, Inc.,
23# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24
25import copy
26import os
27import sys
28import signal
29import stat
30import fcntl
31import errno
32import logging
33import re
34import bb
35from bb import msg, data, event
36from bb import monitordisk
37import subprocess
Patrick Williamsc0f7c042017-02-23 20:41:17 -060038import pickle
Brad Bishop6e60e8b2018-02-01 10:27:11 -050039from multiprocessing import Process
Patrick Williamsc124f4f2015-09-15 14:41:29 -050040
41bblogger = logging.getLogger("BitBake")
42logger = logging.getLogger("BitBake.RunQueue")
43
44__find_md5__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{32}(?![a-z0-9])' )
45
Patrick Williamsc0f7c042017-02-23 20:41:17 -060046def fn_from_tid(tid):
47 return tid.rsplit(":", 1)[0]
48
49def taskname_from_tid(tid):
50 return tid.rsplit(":", 1)[1]
51
52def split_tid(tid):
53 (mc, fn, taskname, _) = split_tid_mcfn(tid)
54 return (mc, fn, taskname)
55
56def split_tid_mcfn(tid):
57 if tid.startswith('multiconfig:'):
58 elems = tid.split(':')
59 mc = elems[1]
60 fn = ":".join(elems[2:-1])
61 taskname = elems[-1]
62 mcfn = "multiconfig:" + mc + ":" + fn
63 else:
64 tid = tid.rsplit(":", 1)
65 mc = ""
66 fn = tid[0]
67 taskname = tid[1]
68 mcfn = fn
69
70 return (mc, fn, taskname, mcfn)
71
72def build_tid(mc, fn, taskname):
73 if mc:
74 return "multiconfig:" + mc + ":" + fn + ":" + taskname
75 return fn + ":" + taskname
76
Patrick Williamsc124f4f2015-09-15 14:41:29 -050077class RunQueueStats:
78 """
79 Holds statistics on the tasks handled by the associated runQueue
80 """
81 def __init__(self, total):
82 self.completed = 0
83 self.skipped = 0
84 self.failed = 0
85 self.active = 0
86 self.total = total
87
88 def copy(self):
89 obj = self.__class__(self.total)
90 obj.__dict__.update(self.__dict__)
91 return obj
92
93 def taskFailed(self):
94 self.active = self.active - 1
95 self.failed = self.failed + 1
96
97 def taskCompleted(self, number = 1):
98 self.active = self.active - number
99 self.completed = self.completed + number
100
101 def taskSkipped(self, number = 1):
102 self.active = self.active + number
103 self.skipped = self.skipped + number
104
105 def taskActive(self):
106 self.active = self.active + 1
107
108# These values indicate the next step due to be run in the
109# runQueue state machine
110runQueuePrepare = 2
111runQueueSceneInit = 3
112runQueueSceneRun = 4
113runQueueRunInit = 5
114runQueueRunning = 6
115runQueueFailed = 7
116runQueueCleanUp = 8
117runQueueComplete = 9
118
119class RunQueueScheduler(object):
120 """
121 Control the order tasks are scheduled in.
122 """
123 name = "basic"
124
125 def __init__(self, runqueue, rqdata):
126 """
127 The default scheduler just returns the first buildable task (the
128 priority map is sorted by task number)
129 """
130 self.rq = runqueue
131 self.rqdata = rqdata
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600132 self.numTasks = len(self.rqdata.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500133
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600134 self.prio_map = [self.rqdata.runtaskentries.keys()]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500135
136 self.buildable = []
137 self.stamps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600138 for tid in self.rqdata.runtaskentries:
139 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
140 self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
141 if tid in self.rq.runq_buildable:
142 self.buildable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500143
144 self.rev_prio_map = None
145
146 def next_buildable_task(self):
147 """
148 Return the id of the first task we find that is buildable
149 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600150 self.buildable = [x for x in self.buildable if x not in self.rq.runq_running]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500151 if not self.buildable:
152 return None
153 if len(self.buildable) == 1:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600154 tid = self.buildable[0]
155 stamp = self.stamps[tid]
156 if stamp not in self.rq.build_stamps.values():
157 return tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500158
159 if not self.rev_prio_map:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600160 self.rev_prio_map = {}
161 for tid in self.rqdata.runtaskentries:
162 self.rev_prio_map[tid] = self.prio_map.index(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500163
164 best = None
165 bestprio = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600166 for tid in self.buildable:
167 prio = self.rev_prio_map[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500168 if bestprio is None or bestprio > prio:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600169 stamp = self.stamps[tid]
170 if stamp in self.rq.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500171 continue
172 bestprio = prio
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600173 best = tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500174
175 return best
176
177 def next(self):
178 """
179 Return the id of the task we should build next
180 """
181 if self.rq.stats.active < self.rq.number_tasks:
182 return self.next_buildable_task()
183
184 def newbuilable(self, task):
185 self.buildable.append(task)
186
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500187 def describe_task(self, taskid):
188 result = 'ID %s' % taskid
189 if self.rev_prio_map:
190 result = result + (' pri %d' % self.rev_prio_map[taskid])
191 return result
192
193 def dump_prio(self, comment):
194 bb.debug(3, '%s (most important first):\n%s' %
195 (comment,
196 '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
197 index, taskid in enumerate(self.prio_map)])))
198
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500199class RunQueueSchedulerSpeed(RunQueueScheduler):
200 """
201 A scheduler optimised for speed. The priority map is sorted by task weight,
202 heavier weighted tasks (tasks needed by the most other tasks) are run first.
203 """
204 name = "speed"
205
206 def __init__(self, runqueue, rqdata):
207 """
208 The priority map is sorted by task weight.
209 """
210 RunQueueScheduler.__init__(self, runqueue, rqdata)
211
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600212 weights = {}
213 for tid in self.rqdata.runtaskentries:
214 weight = self.rqdata.runtaskentries[tid].weight
215 if not weight in weights:
216 weights[weight] = []
217 weights[weight].append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500218
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600219 self.prio_map = []
220 for weight in sorted(weights):
221 for w in weights[weight]:
222 self.prio_map.append(w)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500223
224 self.prio_map.reverse()
225
226class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
227 """
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500228 A scheduler optimised to complete .bb files as quickly as possible. The
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500229 priority map is sorted by task weight, but then reordered so once a given
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500230 .bb file starts to build, it's completed as quickly as possible by
231 running all tasks related to the same .bb file one after the after.
232 This works well where disk space is at a premium and classes like OE's
233 rm_work are in force.
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500234 """
235 name = "completion"
236
237 def __init__(self, runqueue, rqdata):
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500238 super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500239
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500240 # Extract list of tasks for each recipe, with tasks sorted
241 # ascending from "must run first" (typically do_fetch) to
242 # "runs last" (do_build). The speed scheduler prioritizes
243 # tasks that must run first before the ones that run later;
244 # this is what we depend on here.
245 task_lists = {}
246 for taskid in self.prio_map:
247 fn, taskname = taskid.rsplit(':', 1)
248 task_lists.setdefault(fn, []).append(taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500249
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500250 # Now unify the different task lists. The strategy is that
251 # common tasks get skipped and new ones get inserted after the
252 # preceeding common one(s) as they are found. Because task
253 # lists should differ only by their number of tasks, but not
254 # the ordering of the common tasks, this should result in a
255 # deterministic result that is a superset of the individual
256 # task ordering.
257 all_tasks = []
258 for recipe, new_tasks in task_lists.items():
259 index = 0
260 old_task = all_tasks[index] if index < len(all_tasks) else None
261 for new_task in new_tasks:
262 if old_task == new_task:
263 # Common task, skip it. This is the fast-path which
264 # avoids a full search.
265 index += 1
266 old_task = all_tasks[index] if index < len(all_tasks) else None
267 else:
268 try:
269 index = all_tasks.index(new_task)
270 # Already present, just not at the current
271 # place. We re-synchronized by changing the
272 # index so that it matches again. Now
273 # move on to the next existing task.
274 index += 1
275 old_task = all_tasks[index] if index < len(all_tasks) else None
276 except ValueError:
277 # Not present. Insert before old_task, which
278 # remains the same (but gets shifted back).
279 all_tasks.insert(index, new_task)
280 index += 1
281 bb.debug(3, 'merged task list: %s' % all_tasks)
282
283 # Now reverse the order so that tasks that finish the work on one
284 # recipe are considered more imporant (= come first). The ordering
285 # is now so that do_build is most important.
286 all_tasks.reverse()
287
288 # Group tasks of the same kind before tasks of less important
289 # kinds at the head of the queue (because earlier = lower
290 # priority number = runs earlier), while preserving the
291 # ordering by recipe. If recipe foo is more important than
292 # bar, then the goal is to work on foo's do_populate_sysroot
293 # before bar's do_populate_sysroot and on the more important
294 # tasks of foo before any of the less important tasks in any
295 # other recipe (if those other recipes are more important than
296 # foo).
297 #
298 # All of this only applies when tasks are runable. Explicit
299 # dependencies still override this ordering by priority.
300 #
301 # Here's an example why this priority re-ordering helps with
302 # minimizing disk usage. Consider a recipe foo with a higher
303 # priority than bar where foo DEPENDS on bar. Then the
304 # implicit rule (from base.bbclass) is that foo's do_configure
305 # depends on bar's do_populate_sysroot. This ensures that
306 # bar's do_populate_sysroot gets done first. Normally the
307 # tasks from foo would continue to run once that is done, and
308 # bar only gets completed and cleaned up later. By ordering
309 # bar's task that depend on bar's do_populate_sysroot before foo's
310 # do_configure, that problem gets avoided.
311 task_index = 0
312 self.dump_prio('original priorities')
313 for task in all_tasks:
314 for index in range(task_index, self.numTasks):
315 taskid = self.prio_map[index]
316 taskname = taskid.rsplit(':', 1)[1]
317 if taskname == task:
318 del self.prio_map[index]
319 self.prio_map.insert(task_index, taskid)
320 task_index += 1
321 self.dump_prio('completion priorities')
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500322
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600323class RunTaskEntry(object):
324 def __init__(self):
325 self.depends = set()
326 self.revdeps = set()
327 self.hash = None
328 self.task = None
329 self.weight = 1
330
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500331class RunQueueData:
332 """
333 BitBake Run Queue implementation
334 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600335 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500336 self.cooker = cooker
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600337 self.dataCaches = dataCaches
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500338 self.taskData = taskData
339 self.targets = targets
340 self.rq = rq
341 self.warn_multi_bb = False
342
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500343 self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
344 self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600345 self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
346 self.setscenewhitelist_checked = False
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500347 self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600348 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500349
350 self.reset()
351
352 def reset(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600353 self.runtaskentries = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500354
355 def runq_depends_names(self, ids):
356 import re
357 ret = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600358 for id in ids:
359 nam = os.path.basename(id)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500360 nam = re.sub("_[^,]*,", ",", nam)
361 ret.extend([nam])
362 return ret
363
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600364 def get_task_hash(self, tid):
365 return self.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500366
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600367 def get_user_idstring(self, tid, task_name_suffix = ""):
368 return tid + task_name_suffix
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500369
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500370 def get_short_user_idstring(self, task, task_name_suffix = ""):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500371 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
372 pn = self.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600373 taskname = taskname_from_tid(task) + task_name_suffix
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500374 return "%s:%s" % (pn, taskname)
375
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500376 def circular_depchains_handler(self, tasks):
377 """
378 Some tasks aren't buildable, likely due to circular dependency issues.
379 Identify the circular dependencies and print them in a user readable format.
380 """
381 from copy import deepcopy
382
383 valid_chains = []
384 explored_deps = {}
385 msgs = []
386
387 def chain_reorder(chain):
388 """
389 Reorder a dependency chain so the lowest task id is first
390 """
391 lowest = 0
392 new_chain = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600393 for entry in range(len(chain)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500394 if chain[entry] < chain[lowest]:
395 lowest = entry
396 new_chain.extend(chain[lowest:])
397 new_chain.extend(chain[:lowest])
398 return new_chain
399
400 def chain_compare_equal(chain1, chain2):
401 """
402 Compare two dependency chains and see if they're the same
403 """
404 if len(chain1) != len(chain2):
405 return False
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600406 for index in range(len(chain1)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500407 if chain1[index] != chain2[index]:
408 return False
409 return True
410
411 def chain_array_contains(chain, chain_array):
412 """
413 Return True if chain_array contains chain
414 """
415 for ch in chain_array:
416 if chain_compare_equal(ch, chain):
417 return True
418 return False
419
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600420 def find_chains(tid, prev_chain):
421 prev_chain.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500422 total_deps = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600423 total_deps.extend(self.runtaskentries[tid].revdeps)
424 for revdep in self.runtaskentries[tid].revdeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500425 if revdep in prev_chain:
426 idx = prev_chain.index(revdep)
427 # To prevent duplicates, reorder the chain to start with the lowest taskid
428 # and search through an array of those we've already printed
429 chain = prev_chain[idx:]
430 new_chain = chain_reorder(chain)
431 if not chain_array_contains(new_chain, valid_chains):
432 valid_chains.append(new_chain)
433 msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
434 for dep in new_chain:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600435 msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500436 msgs.append("\n")
437 if len(valid_chains) > 10:
438 msgs.append("Aborted dependency loops search after 10 matches.\n")
439 return msgs
440 continue
441 scan = False
442 if revdep not in explored_deps:
443 scan = True
444 elif revdep in explored_deps[revdep]:
445 scan = True
446 else:
447 for dep in prev_chain:
448 if dep in explored_deps[revdep]:
449 scan = True
450 if scan:
451 find_chains(revdep, copy.deepcopy(prev_chain))
452 for dep in explored_deps[revdep]:
453 if dep not in total_deps:
454 total_deps.append(dep)
455
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600456 explored_deps[tid] = total_deps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500457
458 for task in tasks:
459 find_chains(task, [])
460
461 return msgs
462
463 def calculate_task_weights(self, endpoints):
464 """
465 Calculate a number representing the "weight" of each task. Heavier weighted tasks
466 have more dependencies and hence should be executed sooner for maximum speed.
467
468 This function also sanity checks the task list finding tasks that are not
469 possible to execute due to circular dependencies.
470 """
471
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600472 numTasks = len(self.runtaskentries)
473 weight = {}
474 deps_left = {}
475 task_done = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500476
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600477 for tid in self.runtaskentries:
478 task_done[tid] = False
479 weight[tid] = 1
480 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500481
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600482 for tid in endpoints:
483 weight[tid] = 10
484 task_done[tid] = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500485
486 while True:
487 next_points = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600488 for tid in endpoints:
489 for revdep in self.runtaskentries[tid].depends:
490 weight[revdep] = weight[revdep] + weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500491 deps_left[revdep] = deps_left[revdep] - 1
492 if deps_left[revdep] == 0:
493 next_points.append(revdep)
494 task_done[revdep] = True
495 endpoints = next_points
496 if len(next_points) == 0:
497 break
498
499 # Circular dependency sanity check
500 problem_tasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600501 for tid in self.runtaskentries:
502 if task_done[tid] is False or deps_left[tid] != 0:
503 problem_tasks.append(tid)
504 logger.debug(2, "Task %s is not buildable", tid)
505 logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
506 self.runtaskentries[tid].weight = weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500507
508 if problem_tasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600509 message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500510 message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
511 message = message + "Identifying dependency loops (this may take a short while)...\n"
512 logger.error(message)
513
514 msgs = self.circular_depchains_handler(problem_tasks)
515
516 message = "\n"
517 for msg in msgs:
518 message = message + msg
519 bb.msg.fatal("RunQueue", message)
520
521 return weight
522
523 def prepare(self):
524 """
525 Turn a set of taskData into a RunQueue and compute data needed
526 to optimise the execution order.
527 """
528
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600529 runq_build = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500530 recursivetasks = {}
531 recursiveitasks = {}
532 recursivetasksselfref = set()
533
534 taskData = self.taskData
535
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600536 found = False
537 for mc in self.taskData:
538 if len(taskData[mc].taskentries) > 0:
539 found = True
540 break
541 if not found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500542 # Nothing to do
543 return 0
544
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600545 self.init_progress_reporter.start()
546 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500547
548 # Step A - Work out a list of tasks to run
549 #
550 # Taskdata gives us a list of possible providers for every build and run
551 # target ordered by priority. It also gives information on each of those
552 # providers.
553 #
554 # To create the actual list of tasks to execute we fix the list of
555 # providers and then resolve the dependencies into task IDs. This
556 # process is repeated for each type of dependency (tdepends, deptask,
557 # rdeptast, recrdeptask, idepends).
558
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600559 def add_build_dependencies(depids, tasknames, depends, mc):
560 for depname in depids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500561 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600562 if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500563 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600564 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500565 if depdata is None:
566 continue
567 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600568 t = depdata + ":" + taskname
569 if t in taskData[mc].taskentries:
570 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500571
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600572 def add_runtime_dependencies(depids, tasknames, depends, mc):
573 for depname in depids:
574 if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500575 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600576 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500577 if depdata is None:
578 continue
579 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600580 t = depdata + ":" + taskname
581 if t in taskData[mc].taskentries:
582 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500583
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600584 def add_resolved_dependencies(mc, fn, tasknames, depends):
585 for taskname in tasknames:
586 tid = build_tid(mc, fn, taskname)
587 if tid in self.runtaskentries:
588 depends.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500589
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600590 for mc in taskData:
591 for tid in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500592
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600593 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
594 #runtid = build_tid(mc, fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500595
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600596 #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
597
598 depends = set()
599 task_deps = self.dataCaches[mc].task_deps[taskfn]
600
601 self.runtaskentries[tid] = RunTaskEntry()
602
603 if fn in taskData[mc].failed_fns:
604 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500605
606 # Resolve task internal dependencies
607 #
608 # e.g. addtask before X after Y
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600609 for t in taskData[mc].taskentries[tid].tdepends:
610 (_, depfn, deptaskname, _) = split_tid_mcfn(t)
611 depends.add(build_tid(mc, depfn, deptaskname))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500612
613 # Resolve 'deptask' dependencies
614 #
615 # e.g. do_sometask[deptask] = "do_someothertask"
616 # (makes sure sometask runs after someothertask of all DEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600617 if 'deptask' in task_deps and taskname in task_deps['deptask']:
618 tasknames = task_deps['deptask'][taskname].split()
619 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500620
621 # Resolve 'rdeptask' dependencies
622 #
623 # e.g. do_sometask[rdeptask] = "do_someothertask"
624 # (makes sure sometask runs after someothertask of all RDEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600625 if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
626 tasknames = task_deps['rdeptask'][taskname].split()
627 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500628
629 # Resolve inter-task dependencies
630 #
631 # e.g. do_sometask[depends] = "targetname:do_someothertask"
632 # (makes sure sometask runs after targetname's someothertask)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600633 idepends = taskData[mc].taskentries[tid].idepends
634 for (depname, idependtask) in idepends:
635 if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500636 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600637 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500638 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600639 t = depdata + ":" + idependtask
640 depends.add(t)
641 if t not in taskData[mc].taskentries:
642 bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
643 irdepends = taskData[mc].taskentries[tid].irdepends
644 for (depname, idependtask) in irdepends:
645 if depname in taskData[mc].run_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500646 # Won't be in run_targets if ASSUME_PROVIDED
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500647 if not taskData[mc].run_targets[depname]:
648 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600649 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500650 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600651 t = depdata + ":" + idependtask
652 depends.add(t)
653 if t not in taskData[mc].taskentries:
654 bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500655
656 # Resolve recursive 'recrdeptask' dependencies (Part A)
657 #
658 # e.g. do_sometask[recrdeptask] = "do_someothertask"
659 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
660 # We cover the recursive part of the dependencies below
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600661 if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
662 tasknames = task_deps['recrdeptask'][taskname].split()
663 recursivetasks[tid] = tasknames
664 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
665 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
666 if taskname in tasknames:
667 recursivetasksselfref.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500668
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600669 if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
670 recursiveitasks[tid] = []
671 for t in task_deps['recideptask'][taskname].split():
672 newdep = build_tid(mc, fn, t)
673 recursiveitasks[tid].append(newdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500674
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600675 self.runtaskentries[tid].depends = depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500676
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600677 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500678
679 # Resolve recursive 'recrdeptask' dependencies (Part B)
680 #
681 # e.g. do_sometask[recrdeptask] = "do_someothertask"
682 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600683 # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
684 self.init_progress_reporter.next_stage(len(recursivetasks))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500685 extradeps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600686 for taskcounter, tid in enumerate(recursivetasks):
687 extradeps[tid] = set(self.runtaskentries[tid].depends)
688
689 tasknames = recursivetasks[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500690 seendeps = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500691
692 def generate_recdeps(t):
693 newdeps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600694 (mc, fn, taskname, _) = split_tid_mcfn(t)
695 add_resolved_dependencies(mc, fn, tasknames, newdeps)
696 extradeps[tid].update(newdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500697 seendeps.add(t)
698 newdeps.add(t)
699 for i in newdeps:
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500700 if i not in self.runtaskentries:
701 # Not all recipes might have the recrdeptask task as a task
702 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600703 task = self.runtaskentries[i].task
704 for n in self.runtaskentries[i].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500705 if n not in seendeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600706 generate_recdeps(n)
707 generate_recdeps(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500708
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600709 if tid in recursiveitasks:
710 for dep in recursiveitasks[tid]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500711 generate_recdeps(dep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600712 self.init_progress_reporter.update(taskcounter)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500713
714 # Remove circular references so that do_a[recrdeptask] = "do_a do_b" can work
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600715 for tid in recursivetasks:
716 extradeps[tid].difference_update(recursivetasksselfref)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500717
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600718 for tid in self.runtaskentries:
719 task = self.runtaskentries[tid].task
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500720 # Add in extra dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600721 if tid in extradeps:
722 self.runtaskentries[tid].depends = extradeps[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500723 # Remove all self references
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600724 if tid in self.runtaskentries[tid].depends:
725 logger.debug(2, "Task %s contains self reference!", tid)
726 self.runtaskentries[tid].depends.remove(tid)
727
728 self.init_progress_reporter.next_stage()
729
730 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500731
732 # Step B - Mark all active tasks
733 #
734 # Start with the tasks we were asked to run and mark all dependencies
735 # as active too. If the task is to be 'forced', clear its stamp. Once
736 # all active tasks are marked, prune the ones we don't need.
737
738 logger.verbose("Marking Active Tasks")
739
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600740 def mark_active(tid, depth):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500741 """
742 Mark an item as active along with its depends
743 (calls itself recursively)
744 """
745
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600746 if tid in runq_build:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500747 return
748
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600749 runq_build[tid] = 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500750
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600751 depends = self.runtaskentries[tid].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500752 for depend in depends:
753 mark_active(depend, depth+1)
754
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600755 self.target_tids = []
756 for (mc, target, task, fn) in self.targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500757
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600758 if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500759 continue
760
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600761 if target in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500762 continue
763
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500764 parents = False
765 if task.endswith('-'):
766 parents = True
767 task = task[:-1]
768
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600769 if fn in taskData[mc].failed_fns:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500770 continue
771
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600772 # fn already has mc prefix
773 tid = fn + ":" + task
774 self.target_tids.append(tid)
775 if tid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500776 import difflib
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600777 tasks = []
778 for x in taskData[mc].taskentries:
779 if x.startswith(fn + ":"):
780 tasks.append(taskname_from_tid(x))
781 close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500782 if close_matches:
783 extra = ". Close matches:\n %s" % "\n ".join(close_matches)
784 else:
785 extra = ""
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600786 bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
787
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500788 # For tasks called "XXXX-", ony run their dependencies
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500789 if parents:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600790 for i in self.runtaskentries[tid].depends:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500791 mark_active(i, 1)
792 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600793 mark_active(tid, 1)
794
795 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500796
797 # Step C - Prune all inactive tasks
798 #
799 # Once all active tasks are marked, prune the ones we don't need.
800
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500801 delcount = 0
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600802 for tid in list(self.runtaskentries.keys()):
803 if tid not in runq_build:
804 del self.runtaskentries[tid]
805 delcount += 1
806
807 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500808
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500809 if self.cooker.configuration.runall is not None:
810 runall = "do_%s" % self.cooker.configuration.runall
811 runall_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == runall }
812
813 # re-run the mark_active and then drop unused tasks from new list
814 runq_build = {}
815 for tid in list(runall_tids):
816 mark_active(tid,1)
817
818 for tid in list(self.runtaskentries.keys()):
819 if tid not in runq_build:
820 del self.runtaskentries[tid]
821 delcount += 1
822
823 if len(self.runtaskentries) == 0:
824 bb.msg.fatal("RunQueue", "No remaining tasks to run for build target %s with runall %s" % (target, runall))
825
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500826 #
827 # Step D - Sanity checks and computation
828 #
829
830 # Check to make sure we still have tasks to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600831 if len(self.runtaskentries) == 0:
832 if not taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500833 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
834 else:
835 bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
836
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600837 logger.verbose("Pruned %s inactive tasks, %s left", delcount, len(self.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500838
839 logger.verbose("Assign Weightings")
840
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600841 self.init_progress_reporter.next_stage()
842
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500843 # Generate a list of reverse dependencies to ease future calculations
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600844 for tid in self.runtaskentries:
845 for dep in self.runtaskentries[tid].depends:
846 self.runtaskentries[dep].revdeps.add(tid)
847
848 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500849
850 # Identify tasks at the end of dependency chains
851 # Error on circular dependency loops (length two)
852 endpoints = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600853 for tid in self.runtaskentries:
854 revdeps = self.runtaskentries[tid].revdeps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500855 if len(revdeps) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600856 endpoints.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500857 for dep in revdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600858 if dep in self.runtaskentries[tid].depends:
859 bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
860
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500861
862 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
863
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600864 self.init_progress_reporter.next_stage()
865
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500866 # Calculate task weights
867 # Check of higher length circular dependencies
868 self.runq_weight = self.calculate_task_weights(endpoints)
869
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600870 self.init_progress_reporter.next_stage()
871
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500872 # Sanity Check - Check for multiple tasks building the same provider
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600873 for mc in self.dataCaches:
874 prov_list = {}
875 seen_fn = []
876 for tid in self.runtaskentries:
877 (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
878 if taskfn in seen_fn:
879 continue
880 if mc != tidmc:
881 continue
882 seen_fn.append(taskfn)
883 for prov in self.dataCaches[mc].fn_provides[taskfn]:
884 if prov not in prov_list:
885 prov_list[prov] = [taskfn]
886 elif taskfn not in prov_list[prov]:
887 prov_list[prov].append(taskfn)
888 for prov in prov_list:
889 if len(prov_list[prov]) < 2:
890 continue
891 if prov in self.multi_provider_whitelist:
892 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500893 seen_pn = []
894 # If two versions of the same PN are being built its fatal, we don't support it.
895 for fn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600896 pn = self.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500897 if pn not in seen_pn:
898 seen_pn.append(pn)
899 else:
900 bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500901 msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
902 #
903 # Construct a list of things which uniquely depend on each provider
904 # since this may help the user figure out which dependency is triggering this warning
905 #
906 msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
907 deplist = {}
908 commondeps = None
909 for provfn in prov_list[prov]:
910 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600911 for tid in self.runtaskentries:
912 fn = fn_from_tid(tid)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500913 if fn != provfn:
914 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600915 for dep in self.runtaskentries[tid].revdeps:
916 fn = fn_from_tid(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500917 if fn == provfn:
918 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600919 deps.add(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500920 if not commondeps:
921 commondeps = set(deps)
922 else:
923 commondeps &= deps
924 deplist[provfn] = deps
925 for provfn in deplist:
926 msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
927 #
928 # Construct a list of provides and runtime providers for each recipe
929 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
930 #
931 msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
932 provide_results = {}
933 rprovide_results = {}
934 commonprovs = None
935 commonrprovs = None
936 for provfn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600937 provides = set(self.dataCaches[mc].fn_provides[provfn])
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500938 rprovides = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600939 for rprovide in self.dataCaches[mc].rproviders:
940 if provfn in self.dataCaches[mc].rproviders[rprovide]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500941 rprovides.add(rprovide)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600942 for package in self.dataCaches[mc].packages:
943 if provfn in self.dataCaches[mc].packages[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500944 rprovides.add(package)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600945 for package in self.dataCaches[mc].packages_dynamic:
946 if provfn in self.dataCaches[mc].packages_dynamic[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500947 rprovides.add(package)
948 if not commonprovs:
949 commonprovs = set(provides)
950 else:
951 commonprovs &= provides
952 provide_results[provfn] = provides
953 if not commonrprovs:
954 commonrprovs = set(rprovides)
955 else:
956 commonrprovs &= rprovides
957 rprovide_results[provfn] = rprovides
958 #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
959 #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
960 for provfn in prov_list[prov]:
961 msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
962 msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
963
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500964 if self.warn_multi_bb:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600965 logger.warning(msg)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500966 else:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500967 logger.error(msg)
968
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600969 self.init_progress_reporter.next_stage()
970
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500971 # Create a whitelist usable by the stamp checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600972 self.stampfnwhitelist = {}
973 for mc in self.taskData:
974 self.stampfnwhitelist[mc] = []
975 for entry in self.stampwhitelist.split():
976 if entry not in self.taskData[mc].build_targets:
977 continue
978 fn = self.taskData.build_targets[entry][0]
979 self.stampfnwhitelist[mc].append(fn)
980
981 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500982
983 # Iterate over the task list looking for tasks with a 'setscene' function
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600984 self.runq_setscene_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500985 if not self.cooker.configuration.nosetscene:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600986 for tid in self.runtaskentries:
987 (mc, fn, taskname, _) = split_tid_mcfn(tid)
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500988 setscenetid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600989 if setscenetid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500990 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600991 self.runq_setscene_tids.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500992
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600993 def invalidate_task(tid, error_nostamp):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500994 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
995 taskdep = self.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600996 if fn + ":" + taskname not in taskData[mc].taskentries:
997 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500998 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
999 if error_nostamp:
1000 bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
1001 else:
1002 bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
1003 else:
1004 logger.verbose("Invalidate task %s, %s", taskname, fn)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001005 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], fn)
1006
1007 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001008
1009 # Invalidate task if force mode active
1010 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001011 for tid in self.target_tids:
1012 invalidate_task(tid, False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001013
1014 # Invalidate task if invalidate mode active
1015 if self.cooker.configuration.invalidate_stamp:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001016 for tid in self.target_tids:
1017 fn = fn_from_tid(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001018 for st in self.cooker.configuration.invalidate_stamp.split(','):
1019 if not st.startswith("do_"):
1020 st = "do_%s" % st
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001021 invalidate_task(fn + ":" + st, True)
1022
1023 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001024
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001025 # Create and print to the logs a virtual/xxxx -> PN (fn) table
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001026 for mc in taskData:
1027 virtmap = taskData[mc].get_providermap(prefix="virtual/")
1028 virtpnmap = {}
1029 for v in virtmap:
1030 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
1031 bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
1032 if hasattr(bb.parse.siggen, "tasks_resolved"):
1033 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1034
1035 self.init_progress_reporter.next_stage()
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001036
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001037 # Iterate over the task list and call into the siggen code
1038 dealtwith = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001039 todeal = set(self.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001040 while len(todeal) > 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001041 for tid in todeal.copy():
1042 if len(self.runtaskentries[tid].depends - dealtwith) == 0:
1043 dealtwith.add(tid)
1044 todeal.remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001045 procdep = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001046 for dep in self.runtaskentries[tid].depends:
1047 procdep.append(fn_from_tid(dep) + "." + taskname_from_tid(dep))
1048 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1049 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(taskfn, taskname, procdep, self.dataCaches[mc])
1050 task = self.runtaskentries[tid].task
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001051
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001052 bb.parse.siggen.writeout_file_checksum_cache()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001053
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001054 #self.dump_data()
1055 return len(self.runtaskentries)
1056
1057 def dump_data(self):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001058 """
1059 Dump some debug information on the internal data structures
1060 """
1061 logger.debug(3, "run_tasks:")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001062 for tid in self.runtaskentries:
1063 logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
1064 self.runtaskentries[tid].weight,
1065 self.runtaskentries[tid].depends,
1066 self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001067
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001068class RunQueueWorker():
1069 def __init__(self, process, pipe):
1070 self.process = process
1071 self.pipe = pipe
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001072
1073class RunQueue:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001074 def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001075
1076 self.cooker = cooker
1077 self.cfgData = cfgData
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001078 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001079
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001080 self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
1081 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
1082 self.setsceneverify = cfgData.getVar("BB_SETSCENE_VERIFY_FUNCTION2") or None
1083 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001084
1085 self.state = runQueuePrepare
1086
1087 # For disk space monitor
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001088 # Invoked at regular time intervals via the bitbake heartbeat event
1089 # while the build is running. We generate a unique name for the handler
1090 # here, just in case that there ever is more than one RunQueue instance,
1091 # start the handler when reaching runQueueSceneRun, and stop it when
1092 # done with the build.
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001093 self.dm = monitordisk.diskMonitor(cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001094 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
1095 self.dm_event_handler_registered = False
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001096 self.rqexe = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001097 self.worker = {}
1098 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001099
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001100 def _start_worker(self, mc, fakeroot = False, rqexec = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001101 logger.debug(1, "Starting bitbake-worker")
1102 magic = "decafbad"
1103 if self.cooker.configuration.profile:
1104 magic = "decafbadbad"
1105 if fakeroot:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001106 magic = magic + "beef"
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001107 mcdata = self.cooker.databuilder.mcdata[mc]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001108 fakerootcmd = mcdata.getVar("FAKEROOTCMD")
1109 fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001110 env = os.environ.copy()
1111 for key, value in (var.split('=') for var in fakerootenv):
1112 env[key] = value
1113 worker = subprocess.Popen([fakerootcmd, "bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
1114 else:
1115 worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
1116 bb.utils.nonblockingfd(worker.stdout)
1117 workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
1118
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001119 runqhash = {}
1120 for tid in self.rqdata.runtaskentries:
1121 runqhash[tid] = self.rqdata.runtaskentries[tid].hash
1122
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001123 workerdata = {
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001124 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1125 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1126 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1127 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001128 "sigdata" : bb.parse.siggen.get_taskdata(),
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001129 "runq_hash" : runqhash,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001130 "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
1131 "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
1132 "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
1133 "logdefaultdomain" : bb.msg.loggerDefaultDomains,
1134 "prhost" : self.cooker.prhost,
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001135 "buildname" : self.cfgData.getVar("BUILDNAME"),
1136 "date" : self.cfgData.getVar("DATE"),
1137 "time" : self.cfgData.getVar("TIME"),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001138 }
1139
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001140 worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001141 worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001142 worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001143 worker.stdin.flush()
1144
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001145 return RunQueueWorker(worker, workerpipe)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001146
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001147 def _teardown_worker(self, worker):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001148 if not worker:
1149 return
1150 logger.debug(1, "Teardown for bitbake-worker")
1151 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001152 worker.process.stdin.write(b"<quit></quit>")
1153 worker.process.stdin.flush()
1154 worker.process.stdin.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001155 except IOError:
1156 pass
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001157 while worker.process.returncode is None:
1158 worker.pipe.read()
1159 worker.process.poll()
1160 while worker.pipe.read():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001161 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001162 worker.pipe.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001163
1164 def start_worker(self):
1165 if self.worker:
1166 self.teardown_workers()
1167 self.teardown = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001168 for mc in self.rqdata.dataCaches:
1169 self.worker[mc] = self._start_worker(mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001170
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001171 def start_fakeworker(self, rqexec, mc):
1172 if not mc in self.fakeworker:
1173 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001174
1175 def teardown_workers(self):
1176 self.teardown = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001177 for mc in self.worker:
1178 self._teardown_worker(self.worker[mc])
1179 self.worker = {}
1180 for mc in self.fakeworker:
1181 self._teardown_worker(self.fakeworker[mc])
1182 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001183
1184 def read_workers(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001185 for mc in self.worker:
1186 self.worker[mc].pipe.read()
1187 for mc in self.fakeworker:
1188 self.fakeworker[mc].pipe.read()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001189
1190 def active_fds(self):
1191 fds = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001192 for mc in self.worker:
1193 fds.append(self.worker[mc].pipe.input)
1194 for mc in self.fakeworker:
1195 fds.append(self.fakeworker[mc].pipe.input)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001196 return fds
1197
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001198 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001199 def get_timestamp(f):
1200 try:
1201 if not os.access(f, os.F_OK):
1202 return None
1203 return os.stat(f)[stat.ST_MTIME]
1204 except:
1205 return None
1206
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001207 (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
1208 if taskname is None:
1209 taskname = tn
1210
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001211 if self.stamppolicy == "perfile":
1212 fulldeptree = False
1213 else:
1214 fulldeptree = True
1215 stampwhitelist = []
1216 if self.stamppolicy == "whitelist":
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001217 stampwhitelist = self.rqdata.stampfnwhitelist[mc]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001218
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001219 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001220
1221 # If the stamp is missing, it's not current
1222 if not os.access(stampfile, os.F_OK):
1223 logger.debug(2, "Stampfile %s not available", stampfile)
1224 return False
1225 # If it's a 'nostamp' task, it's not current
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001226 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001227 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1228 logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
1229 return False
1230
1231 if taskname != "do_setscene" and taskname.endswith("_setscene"):
1232 return True
1233
1234 if cache is None:
1235 cache = {}
1236
1237 iscurrent = True
1238 t1 = get_timestamp(stampfile)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001239 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001240 if iscurrent:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001241 (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
1242 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1243 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001244 t2 = get_timestamp(stampfile2)
1245 t3 = get_timestamp(stampfile3)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001246 if t3 and not t2:
1247 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001248 if t3 and t3 > t2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001249 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001250 if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
1251 if not t2:
1252 logger.debug(2, 'Stampfile %s does not exist', stampfile2)
1253 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001254 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001255 if t1 < t2:
1256 logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
1257 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001258 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001259 if recurse and iscurrent:
1260 if dep in cache:
1261 iscurrent = cache[dep]
1262 if not iscurrent:
1263 logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
1264 else:
1265 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1266 cache[dep] = iscurrent
1267 if recurse:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001268 cache[tid] = iscurrent
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001269 return iscurrent
1270
1271 def _execute_runqueue(self):
1272 """
1273 Run the tasks in a queue prepared by rqdata.prepare()
1274 Upon failure, optionally try to recover the build using any alternate providers
1275 (if the abort on failure configuration option isn't set)
1276 """
1277
1278 retval = True
1279
1280 if self.state is runQueuePrepare:
1281 self.rqexe = RunQueueExecuteDummy(self)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001282 # NOTE: if you add, remove or significantly refactor the stages of this
1283 # process then you should recalculate the weightings here. This is quite
1284 # easy to do - just change the next line temporarily to pass debug=True as
1285 # the last parameter and you'll get a printout of the weightings as well
1286 # as a map to the lines where next_stage() was called. Of course this isn't
1287 # critical, but it helps to keep the progress reporting accurate.
1288 self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
1289 "Initialising tasks",
1290 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001291 if self.rqdata.prepare() == 0:
1292 self.state = runQueueComplete
1293 else:
1294 self.state = runQueueSceneInit
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001295 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001296
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001297 # we are ready to run, emit dependency info to any UI or class which
1298 # needs it
1299 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1300 self.rqdata.init_progress_reporter.next_stage()
1301 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001302
1303 if self.state is runQueueSceneInit:
1304 dump = self.cooker.configuration.dump_signatures
1305 if dump:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001306 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001307 if 'printdiff' in dump:
1308 invalidtasks = self.print_diffscenetasks()
1309 self.dump_signatures(dump)
1310 if 'printdiff' in dump:
1311 self.write_diffscenetasks(invalidtasks)
1312 self.state = runQueueComplete
1313 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001314 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001315 self.start_worker()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001316 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001317 self.rqexe = RunQueueExecuteScenequeue(self)
1318
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001319 if self.state is runQueueSceneRun:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001320 if not self.dm_event_handler_registered:
1321 res = bb.event.register(self.dm_event_handler_name,
1322 lambda x: self.dm.check(self) if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp] else False,
1323 ('bb.event.HeartbeatEvent',))
1324 self.dm_event_handler_registered = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001325 retval = self.rqexe.execute()
1326
1327 if self.state is runQueueRunInit:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001328 if self.cooker.configuration.setsceneonly:
1329 self.state = runQueueComplete
1330 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001331 # Just in case we didn't setscene
1332 self.rqdata.init_progress_reporter.finish()
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001333 logger.info("Executing RunQueue Tasks")
1334 self.rqexe = RunQueueExecuteTasks(self)
1335 self.state = runQueueRunning
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001336
1337 if self.state is runQueueRunning:
1338 retval = self.rqexe.execute()
1339
1340 if self.state is runQueueCleanUp:
1341 retval = self.rqexe.finish()
1342
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001343 build_done = self.state is runQueueComplete or self.state is runQueueFailed
1344
1345 if build_done and self.dm_event_handler_registered:
1346 bb.event.remove(self.dm_event_handler_name, None)
1347 self.dm_event_handler_registered = False
1348
1349 if build_done and self.rqexe:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001350 self.teardown_workers()
1351 if self.rqexe.stats.failed:
1352 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
1353 else:
1354 # Let's avoid the word "failed" if nothing actually did
1355 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
1356
1357 if self.state is runQueueFailed:
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001358 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001359
1360 if self.state is runQueueComplete:
1361 # All done
1362 return False
1363
1364 # Loop
1365 return retval
1366
1367 def execute_runqueue(self):
1368 # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
1369 try:
1370 return self._execute_runqueue()
1371 except bb.runqueue.TaskFailure:
1372 raise
1373 except SystemExit:
1374 raise
1375 except bb.BBHandledException:
1376 try:
1377 self.teardown_workers()
1378 except:
1379 pass
1380 self.state = runQueueComplete
1381 raise
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001382 except Exception as err:
1383 logger.exception("An uncaught exception occurred in runqueue")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001384 try:
1385 self.teardown_workers()
1386 except:
1387 pass
1388 self.state = runQueueComplete
1389 raise
1390
1391 def finish_runqueue(self, now = False):
1392 if not self.rqexe:
1393 self.state = runQueueComplete
1394 return
1395
1396 if now:
1397 self.rqexe.finish_now()
1398 else:
1399 self.rqexe.finish()
1400
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001401 def rq_dump_sigfn(self, fn, options):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001402 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001403 the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
1404 siggen = bb.parse.siggen
1405 dataCaches = self.rqdata.dataCaches
1406 siggen.dump_sigfn(fn, dataCaches, options)
1407
1408 def dump_signatures(self, options):
1409 fns = set()
1410 bb.note("Reparsing files to collect dependency data")
1411
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001412 for tid in self.rqdata.runtaskentries:
1413 fn = fn_from_tid(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001414 fns.add(fn)
1415
1416 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1417 # We cannot use the real multiprocessing.Pool easily due to some local data
1418 # that can't be pickled. This is a cheap multi-process solution.
1419 launched = []
1420 while fns:
1421 if len(launched) < max_process:
1422 p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
1423 p.start()
1424 launched.append(p)
1425 for q in launched:
1426 # The finished processes are joined when calling is_alive()
1427 if not q.is_alive():
1428 launched.remove(q)
1429 for p in launched:
1430 p.join()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001431
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001432 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001433
1434 return
1435
1436 def print_diffscenetasks(self):
1437
1438 valid = []
1439 sq_hash = []
1440 sq_hashfn = []
1441 sq_fn = []
1442 sq_taskname = []
1443 sq_task = []
1444 noexec = []
1445 stamppresent = []
1446 valid_new = set()
1447
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001448 for tid in self.rqdata.runtaskentries:
1449 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1450 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001451
1452 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001453 noexec.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001454 continue
1455
1456 sq_fn.append(fn)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001457 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001458 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001459 sq_taskname.append(taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001460 sq_task.append(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001461 locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001462 try:
1463 call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=True)"
1464 valid = bb.utils.better_eval(call, locs)
1465 # Handle version with no siginfo parameter
1466 except TypeError:
1467 call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
1468 valid = bb.utils.better_eval(call, locs)
1469 for v in valid:
1470 valid_new.add(sq_task[v])
1471
1472 # Tasks which are both setscene and noexec never care about dependencies
1473 # We therefore find tasks which are setscene and noexec and mark their
1474 # unique dependencies as valid.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001475 for tid in noexec:
1476 if tid not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001477 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001478 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001479 hasnoexecparents = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001480 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1481 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001482 continue
1483 hasnoexecparents = False
1484 break
1485 if hasnoexecparents:
1486 valid_new.add(dep)
1487
1488 invalidtasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001489 for tid in self.rqdata.runtaskentries:
1490 if tid not in valid_new and tid not in noexec:
1491 invalidtasks.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001492
1493 found = set()
1494 processed = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001495 for tid in invalidtasks:
1496 toprocess = set([tid])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001497 while toprocess:
1498 next = set()
1499 for t in toprocess:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001500 for dep in self.rqdata.runtaskentries[t].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001501 if dep in invalidtasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001502 found.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001503 if dep not in processed:
1504 processed.add(dep)
1505 next.add(dep)
1506 toprocess = next
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001507 if tid in found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001508 toprocess = set()
1509
1510 tasklist = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001511 for tid in invalidtasks.difference(found):
1512 tasklist.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001513
1514 if tasklist:
1515 bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
1516
1517 return invalidtasks.difference(found)
1518
1519 def write_diffscenetasks(self, invalidtasks):
1520
1521 # Define recursion callback
1522 def recursecb(key, hash1, hash2):
1523 hashes = [hash1, hash2]
1524 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1525
1526 recout = []
1527 if len(hashfiles) == 2:
1528 out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
1529 recout.extend(list(' ' + l for l in out2))
1530 else:
1531 recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
1532
1533 return recout
1534
1535
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001536 for tid in invalidtasks:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001537 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1538 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001539 h = self.rqdata.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001540 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
1541 match = None
1542 for m in matches:
1543 if h in m:
1544 match = m
1545 if match is None:
1546 bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001547 matches = {k : v for k, v in iter(matches.items()) if h not in k}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001548 if matches:
1549 latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
1550 prevh = __find_md5__.search(latestmatch).group(0)
1551 output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
1552 bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
1553
1554class RunQueueExecute:
1555
1556 def __init__(self, rq):
1557 self.rq = rq
1558 self.cooker = rq.cooker
1559 self.cfgData = rq.cfgData
1560 self.rqdata = rq.rqdata
1561
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001562 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
1563 self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001564
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001565 self.runq_buildable = set()
1566 self.runq_running = set()
1567 self.runq_complete = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001568
1569 self.build_stamps = {}
1570 self.build_stamps2 = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001571 self.failed_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001572
1573 self.stampcache = {}
1574
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001575 for mc in rq.worker:
1576 rq.worker[mc].pipe.setrunqueueexec(self)
1577 for mc in rq.fakeworker:
1578 rq.fakeworker[mc].pipe.setrunqueueexec(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001579
1580 if self.number_tasks <= 0:
1581 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1582
1583 def runqueue_process_waitpid(self, task, status):
1584
1585 # self.build_stamps[pid] may not exist when use shared work directory.
1586 if task in self.build_stamps:
1587 self.build_stamps2.remove(self.build_stamps[task])
1588 del self.build_stamps[task]
1589
1590 if status != 0:
1591 self.task_fail(task, status)
1592 else:
1593 self.task_complete(task)
1594 return True
1595
1596 def finish_now(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001597 for mc in self.rq.worker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001598 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001599 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1600 self.rq.worker[mc].process.stdin.flush()
1601 except IOError:
1602 # worker must have died?
1603 pass
1604 for mc in self.rq.fakeworker:
1605 try:
1606 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1607 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001608 except IOError:
1609 # worker must have died?
1610 pass
1611
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001612 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001613 self.rq.state = runQueueFailed
1614 return
1615
1616 self.rq.state = runQueueComplete
1617 return
1618
1619 def finish(self):
1620 self.rq.state = runQueueCleanUp
1621
1622 if self.stats.active > 0:
1623 bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData)
1624 self.rq.read_workers()
1625 return self.rq.active_fds()
1626
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001627 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001628 self.rq.state = runQueueFailed
1629 return True
1630
1631 self.rq.state = runQueueComplete
1632 return True
1633
1634 def check_dependencies(self, task, taskdeps, setscene = False):
1635 if not self.rq.depvalidate:
1636 return False
1637
1638 taskdata = {}
1639 taskdeps.add(task)
1640 for dep in taskdeps:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001641 (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
1642 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001643 taskdata[dep] = [pn, taskname, fn]
1644 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001645 locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001646 valid = bb.utils.better_eval(call, locs)
1647 return valid
1648
1649class RunQueueExecuteDummy(RunQueueExecute):
1650 def __init__(self, rq):
1651 self.rq = rq
1652 self.stats = RunQueueStats(0)
1653
1654 def finish(self):
1655 self.rq.state = runQueueComplete
1656 return
1657
1658class RunQueueExecuteTasks(RunQueueExecute):
1659 def __init__(self, rq):
1660 RunQueueExecute.__init__(self, rq)
1661
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001662 self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001663
1664 self.stampcache = {}
1665
1666 initial_covered = self.rq.scenequeue_covered.copy()
1667
1668 # Mark initial buildable tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001669 for tid in self.rqdata.runtaskentries:
1670 if len(self.rqdata.runtaskentries[tid].depends) == 0:
1671 self.runq_buildable.add(tid)
1672 if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
1673 self.rq.scenequeue_covered.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001674
1675 found = True
1676 while found:
1677 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001678 for tid in self.rqdata.runtaskentries:
1679 if tid in self.rq.scenequeue_covered:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001680 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001681 logger.debug(1, 'Considering %s: %s' % (tid, str(self.rqdata.runtaskentries[tid].revdeps)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001682
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001683 if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
1684 if tid in self.rq.scenequeue_notcovered:
1685 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001686 found = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001687 self.rq.scenequeue_covered.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001688
1689 logger.debug(1, 'Skip list (pre setsceneverify) %s', sorted(self.rq.scenequeue_covered))
1690
1691 # Allow the metadata to elect for setscene tasks to run anyway
1692 covered_remove = set()
1693 if self.rq.setsceneverify:
1694 invalidtasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001695 tasknames = {}
1696 fns = {}
1697 for tid in self.rqdata.runtaskentries:
1698 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1699 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
1700 fns[tid] = taskfn
1701 tasknames[tid] = taskname
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001702 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1703 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001704 if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
1705 logger.debug(2, 'Setscene stamp current for task %s', tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001706 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001707 if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
1708 logger.debug(2, 'Normal stamp current for task %s', tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001709 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001710 invalidtasks.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001711
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001712 call = self.rq.setsceneverify + "(covered, tasknames, fns, d, invalidtasks=invalidtasks)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001713 locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : tasknames, "fns" : fns, "d" : self.cooker.data, "invalidtasks" : invalidtasks }
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001714 covered_remove = bb.utils.better_eval(call, locs)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001715
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001716 def removecoveredtask(tid):
1717 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1718 taskname = taskname + '_setscene'
1719 bb.build.del_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
1720 self.rq.scenequeue_covered.remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001721
1722 toremove = covered_remove
1723 for task in toremove:
1724 logger.debug(1, 'Not skipping task %s due to setsceneverify', task)
1725 while toremove:
1726 covered_remove = []
1727 for task in toremove:
1728 removecoveredtask(task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001729 for deptask in self.rqdata.runtaskentries[task].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001730 if deptask not in self.rq.scenequeue_covered:
1731 continue
1732 if deptask in toremove or deptask in covered_remove or deptask in initial_covered:
1733 continue
1734 logger.debug(1, 'Task %s depends on task %s so not skipping' % (task, deptask))
1735 covered_remove.append(deptask)
1736 toremove = covered_remove
1737
1738 logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered)
1739
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001740
1741 for mc in self.rqdata.dataCaches:
1742 target_pairs = []
1743 for tid in self.rqdata.target_tids:
1744 (tidmc, fn, taskname, _) = split_tid_mcfn(tid)
1745 if tidmc == mc:
1746 target_pairs.append((fn, taskname))
1747
1748 event.fire(bb.event.StampUpdate(target_pairs, self.rqdata.dataCaches[mc].stamp), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001749
1750 schedulers = self.get_schedulers()
1751 for scheduler in schedulers:
1752 if self.scheduler == scheduler.name:
1753 self.sched = scheduler(self, self.rqdata)
1754 logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
1755 break
1756 else:
1757 bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
1758 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1759
1760 def get_schedulers(self):
1761 schedulers = set(obj for obj in globals().values()
1762 if type(obj) is type and
1763 issubclass(obj, RunQueueScheduler))
1764
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001765 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001766 if user_schedulers:
1767 for sched in user_schedulers.split():
1768 if not "." in sched:
1769 bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
1770 continue
1771
1772 modname, name = sched.rsplit(".", 1)
1773 try:
1774 module = __import__(modname, fromlist=(name,))
1775 except ImportError as exc:
1776 logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
1777 raise SystemExit(1)
1778 else:
1779 schedulers.add(getattr(module, name))
1780 return schedulers
1781
1782 def setbuildable(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001783 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001784 self.sched.newbuilable(task)
1785
1786 def task_completeoutright(self, task):
1787 """
1788 Mark a task as completed
1789 Look at the reverse dependencies and mark any task with
1790 completed dependencies as buildable
1791 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001792 self.runq_complete.add(task)
1793 for revdep in self.rqdata.runtaskentries[task].revdeps:
1794 if revdep in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001795 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001796 if revdep in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001797 continue
1798 alldeps = 1
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001799 for dep in self.rqdata.runtaskentries[revdep].depends:
1800 if dep not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001801 alldeps = 0
1802 if alldeps == 1:
1803 self.setbuildable(revdep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001804 fn = fn_from_tid(revdep)
1805 taskname = taskname_from_tid(revdep)
1806 logger.debug(1, "Marking task %s as buildable", revdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001807
1808 def task_complete(self, task):
1809 self.stats.taskCompleted()
1810 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1811 self.task_completeoutright(task)
1812
1813 def task_fail(self, task, exitcode):
1814 """
1815 Called when a task has failed
1816 Updates the state engine with the failure
1817 """
1818 self.stats.taskFailed()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001819 self.failed_tids.append(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001820 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001821 if self.rqdata.taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001822 self.rq.state = runQueueCleanUp
1823
1824 def task_skip(self, task, reason):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001825 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001826 self.setbuildable(task)
1827 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
1828 self.task_completeoutright(task)
1829 self.stats.taskCompleted()
1830 self.stats.taskSkipped()
1831
1832 def execute(self):
1833 """
1834 Run the tasks in a queue prepared by rqdata.prepare()
1835 """
1836
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001837 if self.rqdata.setscenewhitelist is not None and not self.rqdata.setscenewhitelist_checked:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001838 self.rqdata.setscenewhitelist_checked = True
1839
1840 # Check tasks that are going to run against the whitelist
1841 def check_norun_task(tid, showerror=False):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001842 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001843 # Ignore covered tasks
1844 if tid in self.rq.scenequeue_covered:
1845 return False
1846 # Ignore stamped tasks
1847 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
1848 return False
1849 # Ignore noexec tasks
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001850 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001851 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1852 return False
1853
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001854 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001855 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
1856 if showerror:
1857 if tid in self.rqdata.runq_setscene_tids:
1858 logger.error('Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname))
1859 else:
1860 logger.error('Task %s.%s attempted to execute unexpectedly' % (pn, taskname))
1861 return True
1862 return False
1863 # Look to see if any tasks that we think shouldn't run are going to
1864 unexpected = False
1865 for tid in self.rqdata.runtaskentries:
1866 if check_norun_task(tid):
1867 unexpected = True
1868 break
1869 if unexpected:
1870 # Run through the tasks in the rough order they'd have executed and print errors
1871 # (since the order can be useful - usually missing sstate for the last few tasks
1872 # is the cause of the problem)
1873 task = self.sched.next()
1874 while task is not None:
1875 check_norun_task(task, showerror=True)
1876 self.task_skip(task, 'Setscene enforcement check')
1877 task = self.sched.next()
1878
1879 self.rq.state = runQueueCleanUp
1880 return True
1881
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001882 self.rq.read_workers()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001883
1884 if self.stats.total == 0:
1885 # nothing to do
1886 self.rq.state = runQueueCleanUp
1887
1888 task = self.sched.next()
1889 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001890 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001891
1892 if task in self.rq.scenequeue_covered:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001893 logger.debug(2, "Setscene covered task %s", task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001894 self.task_skip(task, "covered")
1895 return True
1896
1897 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001898 logger.debug(2, "Stamp current task %s", task)
1899
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001900 self.task_skip(task, "existing")
1901 return True
1902
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001903 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001904 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1905 startevent = runQueueTaskStarted(task, self.stats, self.rq,
1906 noexec=True)
1907 bb.event.fire(startevent, self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001908 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001909 self.stats.taskActive()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001910 if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001911 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001912 self.task_complete(task)
1913 return True
1914 else:
1915 startevent = runQueueTaskStarted(task, self.stats, self.rq)
1916 bb.event.fire(startevent, self.cfgData)
1917
1918 taskdepdata = self.build_taskdepdata(task)
1919
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001920 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001921 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001922 if not mc in self.rq.fakeworker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001923 try:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001924 self.rq.start_fakeworker(self, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001925 except OSError as exc:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001926 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001927 self.rq.state = runQueueFailed
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001928 self.stats.taskFailed()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001929 return True
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001930 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001931 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001932 else:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001933 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001934 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001935
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001936 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
1937 self.build_stamps2.append(self.build_stamps[task])
1938 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001939 self.stats.taskActive()
1940 if self.stats.active < self.number_tasks:
1941 return True
1942
1943 if self.stats.active > 0:
1944 self.rq.read_workers()
1945 return self.rq.active_fds()
1946
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001947 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001948 self.rq.state = runQueueFailed
1949 return True
1950
1951 # Sanity Checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001952 for task in self.rqdata.runtaskentries:
1953 if task not in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001954 logger.error("Task %s never buildable!", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001955 if task not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001956 logger.error("Task %s never ran!", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001957 if task not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001958 logger.error("Task %s never completed!", task)
1959 self.rq.state = runQueueComplete
1960
1961 return True
1962
1963 def build_taskdepdata(self, task):
1964 taskdepdata = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001965 next = self.rqdata.runtaskentries[task].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001966 next.add(task)
1967 while next:
1968 additional = []
1969 for revdep in next:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001970 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
1971 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
1972 deps = self.rqdata.runtaskentries[revdep].depends
1973 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001974 taskhash = self.rqdata.runtaskentries[revdep].hash
1975 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001976 for revdep2 in deps:
1977 if revdep2 not in taskdepdata:
1978 additional.append(revdep2)
1979 next = additional
1980
1981 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
1982 return taskdepdata
1983
1984class RunQueueExecuteScenequeue(RunQueueExecute):
1985 def __init__(self, rq):
1986 RunQueueExecute.__init__(self, rq)
1987
1988 self.scenequeue_covered = set()
1989 self.scenequeue_notcovered = set()
1990 self.scenequeue_notneeded = set()
1991
1992 # If we don't have any setscene functions, skip this step
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001993 if len(self.rqdata.runq_setscene_tids) == 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001994 rq.scenequeue_covered = set()
1995 rq.state = runQueueRunInit
1996 return
1997
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001998 self.stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001999
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002000 sq_revdeps = {}
2001 sq_revdeps_new = {}
2002 sq_revdeps_squash = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002003 self.sq_harddeps = {}
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002004 self.stamps = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002005
2006 # We need to construct a dependency graph for the setscene functions. Intermediate
2007 # dependencies between the setscene tasks only complicate the code. This code
2008 # therefore aims to collapse the huge runqueue dependency tree into a smaller one
2009 # only containing the setscene functions.
2010
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002011 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002012
2013 # First process the chains up to the first setscene task.
2014 endpoints = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002015 for tid in self.rqdata.runtaskentries:
2016 sq_revdeps[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
2017 sq_revdeps_new[tid] = set()
2018 if (len(sq_revdeps[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
2019 #bb.warn("Added endpoint %s" % (tid))
2020 endpoints[tid] = set()
2021
2022 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002023
2024 # Secondly process the chains between setscene tasks.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002025 for tid in self.rqdata.runq_setscene_tids:
2026 #bb.warn("Added endpoint 2 %s" % (tid))
2027 for dep in self.rqdata.runtaskentries[tid].depends:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002028 if tid in sq_revdeps[dep]:
2029 sq_revdeps[dep].remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002030 if dep not in endpoints:
2031 endpoints[dep] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002032 #bb.warn(" Added endpoint 3 %s" % (dep))
2033 endpoints[dep].add(tid)
2034
2035 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002036
2037 def process_endpoints(endpoints):
2038 newendpoints = {}
2039 for point, task in endpoints.items():
2040 tasks = set()
2041 if task:
2042 tasks |= task
2043 if sq_revdeps_new[point]:
2044 tasks |= sq_revdeps_new[point]
2045 sq_revdeps_new[point] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002046 if point in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002047 sq_revdeps_new[point] = tasks
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05002048 tasks = set()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002049 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002050 for dep in self.rqdata.runtaskentries[point].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002051 if point in sq_revdeps[dep]:
2052 sq_revdeps[dep].remove(point)
2053 if tasks:
2054 sq_revdeps_new[dep] |= tasks
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002055 if len(sq_revdeps[dep]) == 0 and dep not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002056 newendpoints[dep] = task
2057 if len(newendpoints) != 0:
2058 process_endpoints(newendpoints)
2059
2060 process_endpoints(endpoints)
2061
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002062 self.rqdata.init_progress_reporter.next_stage()
2063
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002064 # Build a list of setscene tasks which are "unskippable"
2065 # These are direct endpoints referenced by the build
2066 endpoints2 = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002067 sq_revdeps2 = {}
2068 sq_revdeps_new2 = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002069 def process_endpoints2(endpoints):
2070 newendpoints = {}
2071 for point, task in endpoints.items():
2072 tasks = set([point])
2073 if task:
2074 tasks |= task
2075 if sq_revdeps_new2[point]:
2076 tasks |= sq_revdeps_new2[point]
2077 sq_revdeps_new2[point] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002078 if point in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002079 sq_revdeps_new2[point] = tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002080 for dep in self.rqdata.runtaskentries[point].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002081 if point in sq_revdeps2[dep]:
2082 sq_revdeps2[dep].remove(point)
2083 if tasks:
2084 sq_revdeps_new2[dep] |= tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002085 if (len(sq_revdeps2[dep]) == 0 or len(sq_revdeps_new2[dep]) != 0) and dep not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002086 newendpoints[dep] = tasks
2087 if len(newendpoints) != 0:
2088 process_endpoints2(newendpoints)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002089 for tid in self.rqdata.runtaskentries:
2090 sq_revdeps2[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
2091 sq_revdeps_new2[tid] = set()
2092 if (len(sq_revdeps2[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
2093 endpoints2[tid] = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002094 process_endpoints2(endpoints2)
2095 self.unskippable = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002096 for tid in self.rqdata.runq_setscene_tids:
2097 if sq_revdeps_new2[tid]:
2098 self.unskippable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002099
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002100 self.rqdata.init_progress_reporter.next_stage(len(self.rqdata.runtaskentries))
2101
2102 for taskcounter, tid in enumerate(self.rqdata.runtaskentries):
2103 if tid in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002104 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002105 for dep in sq_revdeps_new[tid]:
2106 deps.add(dep)
2107 sq_revdeps_squash[tid] = deps
2108 elif len(sq_revdeps_new[tid]) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002109 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002110 self.rqdata.init_progress_reporter.update(taskcounter)
2111
2112 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002113
2114 # Resolve setscene inter-task dependencies
2115 # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
2116 # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002117 for tid in self.rqdata.runq_setscene_tids:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002118 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2119 realtid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002120 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002121 self.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002122 for (depname, idependtask) in idepends:
2123
2124 if depname not in self.rqdata.taskData[mc].build_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002125 continue
2126
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002127 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2128 if depfn is None:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002129 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002130 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2131 if deptid not in self.rqdata.runtaskentries:
2132 bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002133
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002134 if not deptid in self.sq_harddeps:
2135 self.sq_harddeps[deptid] = set()
2136 self.sq_harddeps[deptid].add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002137
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002138 sq_revdeps_squash[tid].add(deptid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002139 # Have to zero this to avoid circular dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002140 sq_revdeps_squash[deptid] = set()
2141
2142 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002143
2144 for task in self.sq_harddeps:
2145 for dep in self.sq_harddeps[task]:
2146 sq_revdeps_squash[dep].add(task)
2147
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002148 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002149
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002150 #for tid in sq_revdeps_squash:
2151 # for dep in sq_revdeps_squash[tid]:
2152 # data = data + "\n %s" % dep
2153 # bb.warn("Task %s_setscene: is %s " % (tid, data
2154
2155 self.sq_deps = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002156 self.sq_revdeps = sq_revdeps_squash
2157 self.sq_revdeps2 = copy.deepcopy(self.sq_revdeps)
2158
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002159 for tid in self.sq_revdeps:
2160 self.sq_deps[tid] = set()
2161 for tid in self.sq_revdeps:
2162 for dep in self.sq_revdeps[tid]:
2163 self.sq_deps[dep].add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002164
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002165 self.rqdata.init_progress_reporter.next_stage()
2166
2167 for tid in self.sq_revdeps:
2168 if len(self.sq_revdeps[tid]) == 0:
2169 self.runq_buildable.add(tid)
2170
2171 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002172
2173 self.outrightfail = []
2174 if self.rq.hashvalidate:
2175 sq_hash = []
2176 sq_hashfn = []
2177 sq_fn = []
2178 sq_taskname = []
2179 sq_task = []
2180 noexec = []
2181 stamppresent = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002182 for tid in self.sq_revdeps:
2183 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2184
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002185 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002186
2187 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002188 noexec.append(tid)
2189 self.task_skip(tid)
2190 bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002191 continue
2192
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002193 if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
2194 logger.debug(2, 'Setscene stamp current for task %s', tid)
2195 stamppresent.append(tid)
2196 self.task_skip(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002197 continue
2198
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002199 if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
2200 logger.debug(2, 'Normal stamp current for task %s', tid)
2201 stamppresent.append(tid)
2202 self.task_skip(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002203 continue
2204
2205 sq_fn.append(fn)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002206 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002207 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002208 sq_taskname.append(taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002209 sq_task.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002210 call = self.rq.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002211 locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002212 valid = bb.utils.better_eval(call, locs)
2213
2214 valid_new = stamppresent
2215 for v in valid:
2216 valid_new.append(sq_task[v])
2217
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002218 for tid in self.sq_revdeps:
2219 if tid not in valid_new and tid not in noexec:
2220 logger.debug(2, 'No package found, so skipping setscene task %s', tid)
2221 self.outrightfail.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002222
2223 logger.info('Executing SetScene Tasks')
2224
2225 self.rq.state = runQueueSceneRun
2226
2227 def scenequeue_updatecounters(self, task, fail = False):
2228 for dep in self.sq_deps[task]:
2229 if fail and task in self.sq_harddeps and dep in self.sq_harddeps[task]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002230 logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002231 self.scenequeue_updatecounters(dep, fail)
2232 continue
2233 if task not in self.sq_revdeps2[dep]:
2234 # May already have been removed by the fail case above
2235 continue
2236 self.sq_revdeps2[dep].remove(task)
2237 if len(self.sq_revdeps2[dep]) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002238 self.runq_buildable.add(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002239
2240 def task_completeoutright(self, task):
2241 """
2242 Mark a task as completed
2243 Look at the reverse dependencies and mark any task with
2244 completed dependencies as buildable
2245 """
2246
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002247 logger.debug(1, 'Found task %s which could be accelerated', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002248 self.scenequeue_covered.add(task)
2249 self.scenequeue_updatecounters(task)
2250
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002251 def check_taskfail(self, task):
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002252 if self.rqdata.setscenewhitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002253 realtask = task.split('_setscene')[0]
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002254 (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
2255 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002256 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2257 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2258 self.rq.state = runQueueCleanUp
2259
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002260 def task_complete(self, task):
2261 self.stats.taskCompleted()
2262 bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
2263 self.task_completeoutright(task)
2264
2265 def task_fail(self, task, result):
2266 self.stats.taskFailed()
2267 bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
2268 self.scenequeue_notcovered.add(task)
2269 self.scenequeue_updatecounters(task, True)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002270 self.check_taskfail(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002271
2272 def task_failoutright(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002273 self.runq_running.add(task)
2274 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002275 self.stats.taskCompleted()
2276 self.stats.taskSkipped()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002277 self.scenequeue_notcovered.add(task)
2278 self.scenequeue_updatecounters(task, True)
2279
2280 def task_skip(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002281 self.runq_running.add(task)
2282 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002283 self.task_completeoutright(task)
2284 self.stats.taskCompleted()
2285 self.stats.taskSkipped()
2286
2287 def execute(self):
2288 """
2289 Run the tasks in a queue prepared by prepare_runqueue
2290 """
2291
2292 self.rq.read_workers()
2293
2294 task = None
2295 if self.stats.active < self.number_tasks:
2296 # Find the next setscene to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002297 for nexttask in self.rqdata.runq_setscene_tids:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002298 if nexttask in self.runq_buildable and nexttask not in self.runq_running and self.stamps[nexttask] not in self.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002299 if nexttask in self.unskippable:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002300 logger.debug(2, "Setscene task %s is unskippable" % nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002301 if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002302 fn = fn_from_tid(nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002303 foundtarget = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002304
2305 if nexttask in self.rqdata.target_tids:
2306 foundtarget = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002307 if not foundtarget:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002308 logger.debug(2, "Skipping setscene for task %s" % nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002309 self.task_skip(nexttask)
2310 self.scenequeue_notneeded.add(nexttask)
2311 return True
2312 if nexttask in self.outrightfail:
2313 self.task_failoutright(nexttask)
2314 return True
2315 task = nexttask
2316 break
2317 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002318 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
2319 taskname = taskname + "_setscene"
2320 if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
2321 logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002322 self.task_failoutright(task)
2323 return True
2324
2325 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002326 if task in self.rqdata.target_tids:
2327 self.task_failoutright(task)
2328 return True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002329
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002330 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
2331 logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002332 self.task_skip(task)
2333 return True
2334
2335 startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
2336 bb.event.fire(startevent, self.cfgData)
2337
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002338 taskdepdata = self.build_taskdepdata(task)
2339
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002340 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2341 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002342 if not mc in self.rq.fakeworker:
2343 self.rq.start_fakeworker(self, mc)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002344 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002345 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002346 else:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002347 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002348 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002349
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002350 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2351 self.build_stamps2.append(self.build_stamps[task])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002352 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002353 self.stats.taskActive()
2354 if self.stats.active < self.number_tasks:
2355 return True
2356
2357 if self.stats.active > 0:
2358 self.rq.read_workers()
2359 return self.rq.active_fds()
2360
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002361 #for tid in self.sq_revdeps:
2362 # if tid not in self.runq_running:
2363 # buildable = tid in self.runq_buildable
2364 # revdeps = self.sq_revdeps[tid]
2365 # bb.warn("Found we didn't run %s %s %s" % (tid, buildable, str(revdeps)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002366
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002367 self.rq.scenequeue_covered = self.scenequeue_covered
2368 self.rq.scenequeue_notcovered = self.scenequeue_notcovered
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002369
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002370 logger.debug(1, 'We can skip tasks %s', "\n".join(sorted(self.rq.scenequeue_covered)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002371
2372 self.rq.state = runQueueRunInit
2373
2374 completeevent = sceneQueueComplete(self.stats, self.rq)
2375 bb.event.fire(completeevent, self.cfgData)
2376
2377 return True
2378
2379 def runqueue_process_waitpid(self, task, status):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002380 RunQueueExecute.runqueue_process_waitpid(self, task, status)
2381
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002382
2383 def build_taskdepdata(self, task):
2384 def getsetscenedeps(tid):
2385 deps = set()
2386 (mc, fn, taskname, _) = split_tid_mcfn(tid)
2387 realtid = tid + "_setscene"
2388 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
2389 for (depname, idependtask) in idepends:
2390 if depname not in self.rqdata.taskData[mc].build_targets:
2391 continue
2392
2393 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2394 if depfn is None:
2395 continue
2396 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2397 deps.add(deptid)
2398 return deps
2399
2400 taskdepdata = {}
2401 next = getsetscenedeps(task)
2402 next.add(task)
2403 while next:
2404 additional = []
2405 for revdep in next:
2406 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2407 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2408 deps = getsetscenedeps(revdep)
2409 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2410 taskhash = self.rqdata.runtaskentries[revdep].hash
2411 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash]
2412 for revdep2 in deps:
2413 if revdep2 not in taskdepdata:
2414 additional.append(revdep2)
2415 next = additional
2416
2417 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2418 return taskdepdata
2419
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002420class TaskFailure(Exception):
2421 """
2422 Exception raised when a task in a runqueue fails
2423 """
2424 def __init__(self, x):
2425 self.args = x
2426
2427
2428class runQueueExitWait(bb.event.Event):
2429 """
2430 Event when waiting for task processes to exit
2431 """
2432
2433 def __init__(self, remain):
2434 self.remain = remain
2435 self.message = "Waiting for %s active tasks to finish" % remain
2436 bb.event.Event.__init__(self)
2437
2438class runQueueEvent(bb.event.Event):
2439 """
2440 Base runQueue event class
2441 """
2442 def __init__(self, task, stats, rq):
2443 self.taskid = task
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002444 self.taskstring = task
2445 self.taskname = taskname_from_tid(task)
2446 self.taskfile = fn_from_tid(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002447 self.taskhash = rq.rqdata.get_task_hash(task)
2448 self.stats = stats.copy()
2449 bb.event.Event.__init__(self)
2450
2451class sceneQueueEvent(runQueueEvent):
2452 """
2453 Base sceneQueue event class
2454 """
2455 def __init__(self, task, stats, rq, noexec=False):
2456 runQueueEvent.__init__(self, task, stats, rq)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002457 self.taskstring = task + "_setscene"
2458 self.taskname = taskname_from_tid(task) + "_setscene"
2459 self.taskfile = fn_from_tid(task)
2460 self.taskhash = rq.rqdata.get_task_hash(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002461
2462class runQueueTaskStarted(runQueueEvent):
2463 """
2464 Event notifying a task was started
2465 """
2466 def __init__(self, task, stats, rq, noexec=False):
2467 runQueueEvent.__init__(self, task, stats, rq)
2468 self.noexec = noexec
2469
2470class sceneQueueTaskStarted(sceneQueueEvent):
2471 """
2472 Event notifying a setscene task was started
2473 """
2474 def __init__(self, task, stats, rq, noexec=False):
2475 sceneQueueEvent.__init__(self, task, stats, rq)
2476 self.noexec = noexec
2477
2478class runQueueTaskFailed(runQueueEvent):
2479 """
2480 Event notifying a task failed
2481 """
2482 def __init__(self, task, stats, exitcode, rq):
2483 runQueueEvent.__init__(self, task, stats, rq)
2484 self.exitcode = exitcode
2485
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002486 def __str__(self):
2487 return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
2488
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002489class sceneQueueTaskFailed(sceneQueueEvent):
2490 """
2491 Event notifying a setscene task failed
2492 """
2493 def __init__(self, task, stats, exitcode, rq):
2494 sceneQueueEvent.__init__(self, task, stats, rq)
2495 self.exitcode = exitcode
2496
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002497 def __str__(self):
2498 return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
2499
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002500class sceneQueueComplete(sceneQueueEvent):
2501 """
2502 Event when all the sceneQueue tasks are complete
2503 """
2504 def __init__(self, stats, rq):
2505 self.stats = stats.copy()
2506 bb.event.Event.__init__(self)
2507
2508class runQueueTaskCompleted(runQueueEvent):
2509 """
2510 Event notifying a task completed
2511 """
2512
2513class sceneQueueTaskCompleted(sceneQueueEvent):
2514 """
2515 Event notifying a setscene task completed
2516 """
2517
2518class runQueueTaskSkipped(runQueueEvent):
2519 """
2520 Event notifying a task was skipped
2521 """
2522 def __init__(self, task, stats, rq, reason):
2523 runQueueEvent.__init__(self, task, stats, rq)
2524 self.reason = reason
2525
2526class runQueuePipe():
2527 """
2528 Abstraction for a pipe between a worker thread and the server
2529 """
2530 def __init__(self, pipein, pipeout, d, rq, rqexec):
2531 self.input = pipein
2532 if pipeout:
2533 pipeout.close()
2534 bb.utils.nonblockingfd(self.input)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002535 self.queue = b""
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002536 self.d = d
2537 self.rq = rq
2538 self.rqexec = rqexec
2539
2540 def setrunqueueexec(self, rqexec):
2541 self.rqexec = rqexec
2542
2543 def read(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002544 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
2545 for worker in workers.values():
2546 worker.process.poll()
2547 if worker.process.returncode is not None and not self.rq.teardown:
2548 bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
2549 self.rq.finish_runqueue(True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002550
2551 start = len(self.queue)
2552 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002553 self.queue = self.queue + (self.input.read(102400) or b"")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002554 except (OSError, IOError) as e:
2555 if e.errno != errno.EAGAIN:
2556 raise
2557 end = len(self.queue)
2558 found = True
2559 while found and len(self.queue):
2560 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002561 index = self.queue.find(b"</event>")
2562 while index != -1 and self.queue.startswith(b"<event>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002563 try:
2564 event = pickle.loads(self.queue[7:index])
2565 except ValueError as e:
2566 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
2567 bb.event.fire_from_worker(event, self.d)
2568 found = True
2569 self.queue = self.queue[index+8:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002570 index = self.queue.find(b"</event>")
2571 index = self.queue.find(b"</exitcode>")
2572 while index != -1 and self.queue.startswith(b"<exitcode>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002573 try:
2574 task, status = pickle.loads(self.queue[10:index])
2575 except ValueError as e:
2576 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
2577 self.rqexec.runqueue_process_waitpid(task, status)
2578 found = True
2579 self.queue = self.queue[index+11:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002580 index = self.queue.find(b"</exitcode>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002581 return (end > start)
2582
2583 def close(self):
2584 while self.read():
2585 continue
2586 if len(self.queue) > 0:
2587 print("Warning, worker left partial message: %s" % self.queue)
2588 self.input.close()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002589
2590def get_setscene_enforce_whitelist(d):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002591 if d.getVar('BB_SETSCENE_ENFORCE') != '1':
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002592 return None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002593 whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002594 outlist = []
2595 for item in whitelist[:]:
2596 if item.startswith('%:'):
2597 for target in sys.argv[1:]:
2598 if not target.startswith('-'):
2599 outlist.append(target.split(':')[0] + ':' + item.split(':')[1])
2600 else:
2601 outlist.append(item)
2602 return outlist
2603
2604def check_setscene_enforce_whitelist(pn, taskname, whitelist):
2605 import fnmatch
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002606 if whitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002607 item = '%s:%s' % (pn, taskname)
2608 for whitelist_item in whitelist:
2609 if fnmatch.fnmatch(item, whitelist_item):
2610 return True
2611 return False
2612 return True