blob: 7d2ff818ebd8b0dfcd539121977bce88581e1249 [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001#!/usr/bin/env python
2# ex:ts=4:sw=4:sts=4:et
3# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
4"""
5BitBake 'RunQueue' implementation
6
7Handles preparation and execution of a queue of tasks
8"""
9
10# Copyright (C) 2006-2007 Richard Purdie
11#
12# This program is free software; you can redistribute it and/or modify
13# it under the terms of the GNU General Public License version 2 as
14# published by the Free Software Foundation.
15#
16# This program is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19# GNU General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License along
22# with this program; if not, write to the Free Software Foundation, Inc.,
23# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24
25import copy
26import os
27import sys
28import signal
29import stat
30import fcntl
31import errno
32import logging
33import re
34import bb
35from bb import msg, data, event
36from bb import monitordisk
37import subprocess
Patrick Williamsc0f7c042017-02-23 20:41:17 -060038import pickle
Brad Bishop6e60e8b2018-02-01 10:27:11 -050039from multiprocessing import Process
Patrick Williamsc124f4f2015-09-15 14:41:29 -050040
41bblogger = logging.getLogger("BitBake")
42logger = logging.getLogger("BitBake.RunQueue")
43
44__find_md5__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{32}(?![a-z0-9])' )
45
Patrick Williamsc0f7c042017-02-23 20:41:17 -060046def fn_from_tid(tid):
47 return tid.rsplit(":", 1)[0]
48
49def taskname_from_tid(tid):
50 return tid.rsplit(":", 1)[1]
51
52def split_tid(tid):
53 (mc, fn, taskname, _) = split_tid_mcfn(tid)
54 return (mc, fn, taskname)
55
56def split_tid_mcfn(tid):
57 if tid.startswith('multiconfig:'):
58 elems = tid.split(':')
59 mc = elems[1]
60 fn = ":".join(elems[2:-1])
61 taskname = elems[-1]
62 mcfn = "multiconfig:" + mc + ":" + fn
63 else:
64 tid = tid.rsplit(":", 1)
65 mc = ""
66 fn = tid[0]
67 taskname = tid[1]
68 mcfn = fn
69
70 return (mc, fn, taskname, mcfn)
71
72def build_tid(mc, fn, taskname):
73 if mc:
74 return "multiconfig:" + mc + ":" + fn + ":" + taskname
75 return fn + ":" + taskname
76
Patrick Williamsc124f4f2015-09-15 14:41:29 -050077class RunQueueStats:
78 """
79 Holds statistics on the tasks handled by the associated runQueue
80 """
81 def __init__(self, total):
82 self.completed = 0
83 self.skipped = 0
84 self.failed = 0
85 self.active = 0
86 self.total = total
87
88 def copy(self):
89 obj = self.__class__(self.total)
90 obj.__dict__.update(self.__dict__)
91 return obj
92
93 def taskFailed(self):
94 self.active = self.active - 1
95 self.failed = self.failed + 1
96
97 def taskCompleted(self, number = 1):
98 self.active = self.active - number
99 self.completed = self.completed + number
100
101 def taskSkipped(self, number = 1):
102 self.active = self.active + number
103 self.skipped = self.skipped + number
104
105 def taskActive(self):
106 self.active = self.active + 1
107
108# These values indicate the next step due to be run in the
109# runQueue state machine
110runQueuePrepare = 2
111runQueueSceneInit = 3
112runQueueSceneRun = 4
113runQueueRunInit = 5
114runQueueRunning = 6
115runQueueFailed = 7
116runQueueCleanUp = 8
117runQueueComplete = 9
118
119class RunQueueScheduler(object):
120 """
121 Control the order tasks are scheduled in.
122 """
123 name = "basic"
124
125 def __init__(self, runqueue, rqdata):
126 """
127 The default scheduler just returns the first buildable task (the
128 priority map is sorted by task number)
129 """
130 self.rq = runqueue
131 self.rqdata = rqdata
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600132 self.numTasks = len(self.rqdata.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500133
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600134 self.prio_map = [self.rqdata.runtaskentries.keys()]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500135
136 self.buildable = []
137 self.stamps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600138 for tid in self.rqdata.runtaskentries:
139 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
140 self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
141 if tid in self.rq.runq_buildable:
142 self.buildable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500143
144 self.rev_prio_map = None
145
146 def next_buildable_task(self):
147 """
148 Return the id of the first task we find that is buildable
149 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600150 self.buildable = [x for x in self.buildable if x not in self.rq.runq_running]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500151 if not self.buildable:
152 return None
153 if len(self.buildable) == 1:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600154 tid = self.buildable[0]
155 stamp = self.stamps[tid]
156 if stamp not in self.rq.build_stamps.values():
157 return tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500158
159 if not self.rev_prio_map:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600160 self.rev_prio_map = {}
161 for tid in self.rqdata.runtaskentries:
162 self.rev_prio_map[tid] = self.prio_map.index(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500163
164 best = None
165 bestprio = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600166 for tid in self.buildable:
167 prio = self.rev_prio_map[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500168 if bestprio is None or bestprio > prio:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600169 stamp = self.stamps[tid]
170 if stamp in self.rq.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500171 continue
172 bestprio = prio
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600173 best = tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500174
175 return best
176
177 def next(self):
178 """
179 Return the id of the task we should build next
180 """
181 if self.rq.stats.active < self.rq.number_tasks:
182 return self.next_buildable_task()
183
184 def newbuilable(self, task):
185 self.buildable.append(task)
186
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500187 def describe_task(self, taskid):
188 result = 'ID %s' % taskid
189 if self.rev_prio_map:
190 result = result + (' pri %d' % self.rev_prio_map[taskid])
191 return result
192
193 def dump_prio(self, comment):
194 bb.debug(3, '%s (most important first):\n%s' %
195 (comment,
196 '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
197 index, taskid in enumerate(self.prio_map)])))
198
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500199class RunQueueSchedulerSpeed(RunQueueScheduler):
200 """
201 A scheduler optimised for speed. The priority map is sorted by task weight,
202 heavier weighted tasks (tasks needed by the most other tasks) are run first.
203 """
204 name = "speed"
205
206 def __init__(self, runqueue, rqdata):
207 """
208 The priority map is sorted by task weight.
209 """
210 RunQueueScheduler.__init__(self, runqueue, rqdata)
211
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600212 weights = {}
213 for tid in self.rqdata.runtaskentries:
214 weight = self.rqdata.runtaskentries[tid].weight
215 if not weight in weights:
216 weights[weight] = []
217 weights[weight].append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500218
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600219 self.prio_map = []
220 for weight in sorted(weights):
221 for w in weights[weight]:
222 self.prio_map.append(w)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500223
224 self.prio_map.reverse()
225
226class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
227 """
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500228 A scheduler optimised to complete .bb files as quickly as possible. The
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500229 priority map is sorted by task weight, but then reordered so once a given
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500230 .bb file starts to build, it's completed as quickly as possible by
231 running all tasks related to the same .bb file one after the after.
232 This works well where disk space is at a premium and classes like OE's
233 rm_work are in force.
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500234 """
235 name = "completion"
236
237 def __init__(self, runqueue, rqdata):
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500238 super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500239
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500240 # Extract list of tasks for each recipe, with tasks sorted
241 # ascending from "must run first" (typically do_fetch) to
242 # "runs last" (do_build). The speed scheduler prioritizes
243 # tasks that must run first before the ones that run later;
244 # this is what we depend on here.
245 task_lists = {}
246 for taskid in self.prio_map:
247 fn, taskname = taskid.rsplit(':', 1)
248 task_lists.setdefault(fn, []).append(taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500249
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500250 # Now unify the different task lists. The strategy is that
251 # common tasks get skipped and new ones get inserted after the
252 # preceeding common one(s) as they are found. Because task
253 # lists should differ only by their number of tasks, but not
254 # the ordering of the common tasks, this should result in a
255 # deterministic result that is a superset of the individual
256 # task ordering.
257 all_tasks = []
258 for recipe, new_tasks in task_lists.items():
259 index = 0
260 old_task = all_tasks[index] if index < len(all_tasks) else None
261 for new_task in new_tasks:
262 if old_task == new_task:
263 # Common task, skip it. This is the fast-path which
264 # avoids a full search.
265 index += 1
266 old_task = all_tasks[index] if index < len(all_tasks) else None
267 else:
268 try:
269 index = all_tasks.index(new_task)
270 # Already present, just not at the current
271 # place. We re-synchronized by changing the
272 # index so that it matches again. Now
273 # move on to the next existing task.
274 index += 1
275 old_task = all_tasks[index] if index < len(all_tasks) else None
276 except ValueError:
277 # Not present. Insert before old_task, which
278 # remains the same (but gets shifted back).
279 all_tasks.insert(index, new_task)
280 index += 1
281 bb.debug(3, 'merged task list: %s' % all_tasks)
282
283 # Now reverse the order so that tasks that finish the work on one
284 # recipe are considered more imporant (= come first). The ordering
285 # is now so that do_build is most important.
286 all_tasks.reverse()
287
288 # Group tasks of the same kind before tasks of less important
289 # kinds at the head of the queue (because earlier = lower
290 # priority number = runs earlier), while preserving the
291 # ordering by recipe. If recipe foo is more important than
292 # bar, then the goal is to work on foo's do_populate_sysroot
293 # before bar's do_populate_sysroot and on the more important
294 # tasks of foo before any of the less important tasks in any
295 # other recipe (if those other recipes are more important than
296 # foo).
297 #
298 # All of this only applies when tasks are runable. Explicit
299 # dependencies still override this ordering by priority.
300 #
301 # Here's an example why this priority re-ordering helps with
302 # minimizing disk usage. Consider a recipe foo with a higher
303 # priority than bar where foo DEPENDS on bar. Then the
304 # implicit rule (from base.bbclass) is that foo's do_configure
305 # depends on bar's do_populate_sysroot. This ensures that
306 # bar's do_populate_sysroot gets done first. Normally the
307 # tasks from foo would continue to run once that is done, and
308 # bar only gets completed and cleaned up later. By ordering
309 # bar's task that depend on bar's do_populate_sysroot before foo's
310 # do_configure, that problem gets avoided.
311 task_index = 0
312 self.dump_prio('original priorities')
313 for task in all_tasks:
314 for index in range(task_index, self.numTasks):
315 taskid = self.prio_map[index]
316 taskname = taskid.rsplit(':', 1)[1]
317 if taskname == task:
318 del self.prio_map[index]
319 self.prio_map.insert(task_index, taskid)
320 task_index += 1
321 self.dump_prio('completion priorities')
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500322
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600323class RunTaskEntry(object):
324 def __init__(self):
325 self.depends = set()
326 self.revdeps = set()
327 self.hash = None
328 self.task = None
329 self.weight = 1
330
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500331class RunQueueData:
332 """
333 BitBake Run Queue implementation
334 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600335 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500336 self.cooker = cooker
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600337 self.dataCaches = dataCaches
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500338 self.taskData = taskData
339 self.targets = targets
340 self.rq = rq
341 self.warn_multi_bb = False
342
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500343 self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
344 self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600345 self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
346 self.setscenewhitelist_checked = False
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500347 self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600348 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500349
350 self.reset()
351
352 def reset(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600353 self.runtaskentries = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500354
355 def runq_depends_names(self, ids):
356 import re
357 ret = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600358 for id in ids:
359 nam = os.path.basename(id)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500360 nam = re.sub("_[^,]*,", ",", nam)
361 ret.extend([nam])
362 return ret
363
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600364 def get_task_hash(self, tid):
365 return self.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500366
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600367 def get_user_idstring(self, tid, task_name_suffix = ""):
368 return tid + task_name_suffix
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500369
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500370 def get_short_user_idstring(self, task, task_name_suffix = ""):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500371 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
372 pn = self.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600373 taskname = taskname_from_tid(task) + task_name_suffix
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500374 return "%s:%s" % (pn, taskname)
375
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500376 def circular_depchains_handler(self, tasks):
377 """
378 Some tasks aren't buildable, likely due to circular dependency issues.
379 Identify the circular dependencies and print them in a user readable format.
380 """
381 from copy import deepcopy
382
383 valid_chains = []
384 explored_deps = {}
385 msgs = []
386
387 def chain_reorder(chain):
388 """
389 Reorder a dependency chain so the lowest task id is first
390 """
391 lowest = 0
392 new_chain = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600393 for entry in range(len(chain)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500394 if chain[entry] < chain[lowest]:
395 lowest = entry
396 new_chain.extend(chain[lowest:])
397 new_chain.extend(chain[:lowest])
398 return new_chain
399
400 def chain_compare_equal(chain1, chain2):
401 """
402 Compare two dependency chains and see if they're the same
403 """
404 if len(chain1) != len(chain2):
405 return False
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600406 for index in range(len(chain1)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500407 if chain1[index] != chain2[index]:
408 return False
409 return True
410
411 def chain_array_contains(chain, chain_array):
412 """
413 Return True if chain_array contains chain
414 """
415 for ch in chain_array:
416 if chain_compare_equal(ch, chain):
417 return True
418 return False
419
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600420 def find_chains(tid, prev_chain):
421 prev_chain.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500422 total_deps = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600423 total_deps.extend(self.runtaskentries[tid].revdeps)
424 for revdep in self.runtaskentries[tid].revdeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500425 if revdep in prev_chain:
426 idx = prev_chain.index(revdep)
427 # To prevent duplicates, reorder the chain to start with the lowest taskid
428 # and search through an array of those we've already printed
429 chain = prev_chain[idx:]
430 new_chain = chain_reorder(chain)
431 if not chain_array_contains(new_chain, valid_chains):
432 valid_chains.append(new_chain)
433 msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
434 for dep in new_chain:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600435 msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500436 msgs.append("\n")
437 if len(valid_chains) > 10:
438 msgs.append("Aborted dependency loops search after 10 matches.\n")
439 return msgs
440 continue
441 scan = False
442 if revdep not in explored_deps:
443 scan = True
444 elif revdep in explored_deps[revdep]:
445 scan = True
446 else:
447 for dep in prev_chain:
448 if dep in explored_deps[revdep]:
449 scan = True
450 if scan:
451 find_chains(revdep, copy.deepcopy(prev_chain))
452 for dep in explored_deps[revdep]:
453 if dep not in total_deps:
454 total_deps.append(dep)
455
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600456 explored_deps[tid] = total_deps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500457
458 for task in tasks:
459 find_chains(task, [])
460
461 return msgs
462
463 def calculate_task_weights(self, endpoints):
464 """
465 Calculate a number representing the "weight" of each task. Heavier weighted tasks
466 have more dependencies and hence should be executed sooner for maximum speed.
467
468 This function also sanity checks the task list finding tasks that are not
469 possible to execute due to circular dependencies.
470 """
471
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600472 numTasks = len(self.runtaskentries)
473 weight = {}
474 deps_left = {}
475 task_done = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500476
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600477 for tid in self.runtaskentries:
478 task_done[tid] = False
479 weight[tid] = 1
480 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500481
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600482 for tid in endpoints:
483 weight[tid] = 10
484 task_done[tid] = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500485
486 while True:
487 next_points = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600488 for tid in endpoints:
489 for revdep in self.runtaskentries[tid].depends:
490 weight[revdep] = weight[revdep] + weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500491 deps_left[revdep] = deps_left[revdep] - 1
492 if deps_left[revdep] == 0:
493 next_points.append(revdep)
494 task_done[revdep] = True
495 endpoints = next_points
496 if len(next_points) == 0:
497 break
498
499 # Circular dependency sanity check
500 problem_tasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600501 for tid in self.runtaskentries:
502 if task_done[tid] is False or deps_left[tid] != 0:
503 problem_tasks.append(tid)
504 logger.debug(2, "Task %s is not buildable", tid)
505 logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
506 self.runtaskentries[tid].weight = weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500507
508 if problem_tasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600509 message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500510 message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
511 message = message + "Identifying dependency loops (this may take a short while)...\n"
512 logger.error(message)
513
514 msgs = self.circular_depchains_handler(problem_tasks)
515
516 message = "\n"
517 for msg in msgs:
518 message = message + msg
519 bb.msg.fatal("RunQueue", message)
520
521 return weight
522
523 def prepare(self):
524 """
525 Turn a set of taskData into a RunQueue and compute data needed
526 to optimise the execution order.
527 """
528
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600529 runq_build = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500530 recursivetasks = {}
531 recursiveitasks = {}
532 recursivetasksselfref = set()
533
534 taskData = self.taskData
535
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600536 found = False
537 for mc in self.taskData:
538 if len(taskData[mc].taskentries) > 0:
539 found = True
540 break
541 if not found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500542 # Nothing to do
543 return 0
544
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600545 self.init_progress_reporter.start()
546 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500547
548 # Step A - Work out a list of tasks to run
549 #
550 # Taskdata gives us a list of possible providers for every build and run
551 # target ordered by priority. It also gives information on each of those
552 # providers.
553 #
554 # To create the actual list of tasks to execute we fix the list of
555 # providers and then resolve the dependencies into task IDs. This
556 # process is repeated for each type of dependency (tdepends, deptask,
557 # rdeptast, recrdeptask, idepends).
558
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600559 def add_build_dependencies(depids, tasknames, depends, mc):
560 for depname in depids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500561 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600562 if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500563 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600564 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500565 if depdata is None:
566 continue
567 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600568 t = depdata + ":" + taskname
569 if t in taskData[mc].taskentries:
570 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500571
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600572 def add_runtime_dependencies(depids, tasknames, depends, mc):
573 for depname in depids:
574 if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500575 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600576 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500577 if depdata is None:
578 continue
579 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600580 t = depdata + ":" + taskname
581 if t in taskData[mc].taskentries:
582 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500583
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600584 def add_resolved_dependencies(mc, fn, tasknames, depends):
585 for taskname in tasknames:
586 tid = build_tid(mc, fn, taskname)
587 if tid in self.runtaskentries:
588 depends.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500589
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600590 for mc in taskData:
591 for tid in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500592
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600593 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
594 #runtid = build_tid(mc, fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500595
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600596 #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
597
598 depends = set()
599 task_deps = self.dataCaches[mc].task_deps[taskfn]
600
601 self.runtaskentries[tid] = RunTaskEntry()
602
603 if fn in taskData[mc].failed_fns:
604 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500605
606 # Resolve task internal dependencies
607 #
608 # e.g. addtask before X after Y
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600609 for t in taskData[mc].taskentries[tid].tdepends:
610 (_, depfn, deptaskname, _) = split_tid_mcfn(t)
611 depends.add(build_tid(mc, depfn, deptaskname))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500612
613 # Resolve 'deptask' dependencies
614 #
615 # e.g. do_sometask[deptask] = "do_someothertask"
616 # (makes sure sometask runs after someothertask of all DEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600617 if 'deptask' in task_deps and taskname in task_deps['deptask']:
618 tasknames = task_deps['deptask'][taskname].split()
619 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500620
621 # Resolve 'rdeptask' dependencies
622 #
623 # e.g. do_sometask[rdeptask] = "do_someothertask"
624 # (makes sure sometask runs after someothertask of all RDEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600625 if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
626 tasknames = task_deps['rdeptask'][taskname].split()
627 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500628
629 # Resolve inter-task dependencies
630 #
631 # e.g. do_sometask[depends] = "targetname:do_someothertask"
632 # (makes sure sometask runs after targetname's someothertask)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600633 idepends = taskData[mc].taskentries[tid].idepends
634 for (depname, idependtask) in idepends:
635 if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500636 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600637 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500638 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600639 t = depdata + ":" + idependtask
640 depends.add(t)
641 if t not in taskData[mc].taskentries:
642 bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
643 irdepends = taskData[mc].taskentries[tid].irdepends
644 for (depname, idependtask) in irdepends:
645 if depname in taskData[mc].run_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500646 # Won't be in run_targets if ASSUME_PROVIDED
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500647 if not taskData[mc].run_targets[depname]:
648 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600649 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500650 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600651 t = depdata + ":" + idependtask
652 depends.add(t)
653 if t not in taskData[mc].taskentries:
654 bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500655
656 # Resolve recursive 'recrdeptask' dependencies (Part A)
657 #
658 # e.g. do_sometask[recrdeptask] = "do_someothertask"
659 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
660 # We cover the recursive part of the dependencies below
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600661 if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
662 tasknames = task_deps['recrdeptask'][taskname].split()
663 recursivetasks[tid] = tasknames
664 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
665 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
666 if taskname in tasknames:
667 recursivetasksselfref.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500668
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600669 if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
670 recursiveitasks[tid] = []
671 for t in task_deps['recideptask'][taskname].split():
672 newdep = build_tid(mc, fn, t)
673 recursiveitasks[tid].append(newdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500674
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600675 self.runtaskentries[tid].depends = depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500676
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600677 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500678
679 # Resolve recursive 'recrdeptask' dependencies (Part B)
680 #
681 # e.g. do_sometask[recrdeptask] = "do_someothertask"
682 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600683 # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
684 self.init_progress_reporter.next_stage(len(recursivetasks))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500685 extradeps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600686 for taskcounter, tid in enumerate(recursivetasks):
687 extradeps[tid] = set(self.runtaskentries[tid].depends)
688
689 tasknames = recursivetasks[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500690 seendeps = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500691
692 def generate_recdeps(t):
693 newdeps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600694 (mc, fn, taskname, _) = split_tid_mcfn(t)
695 add_resolved_dependencies(mc, fn, tasknames, newdeps)
696 extradeps[tid].update(newdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500697 seendeps.add(t)
698 newdeps.add(t)
699 for i in newdeps:
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500700 if i not in self.runtaskentries:
701 # Not all recipes might have the recrdeptask task as a task
702 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600703 task = self.runtaskentries[i].task
704 for n in self.runtaskentries[i].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500705 if n not in seendeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600706 generate_recdeps(n)
707 generate_recdeps(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500708
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600709 if tid in recursiveitasks:
710 for dep in recursiveitasks[tid]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500711 generate_recdeps(dep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600712 self.init_progress_reporter.update(taskcounter)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500713
714 # Remove circular references so that do_a[recrdeptask] = "do_a do_b" can work
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600715 for tid in recursivetasks:
716 extradeps[tid].difference_update(recursivetasksselfref)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500717
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600718 for tid in self.runtaskentries:
719 task = self.runtaskentries[tid].task
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500720 # Add in extra dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600721 if tid in extradeps:
722 self.runtaskentries[tid].depends = extradeps[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500723 # Remove all self references
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600724 if tid in self.runtaskentries[tid].depends:
725 logger.debug(2, "Task %s contains self reference!", tid)
726 self.runtaskentries[tid].depends.remove(tid)
727
728 self.init_progress_reporter.next_stage()
729
730 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500731
732 # Step B - Mark all active tasks
733 #
734 # Start with the tasks we were asked to run and mark all dependencies
735 # as active too. If the task is to be 'forced', clear its stamp. Once
736 # all active tasks are marked, prune the ones we don't need.
737
738 logger.verbose("Marking Active Tasks")
739
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600740 def mark_active(tid, depth):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500741 """
742 Mark an item as active along with its depends
743 (calls itself recursively)
744 """
745
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600746 if tid in runq_build:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500747 return
748
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600749 runq_build[tid] = 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500750
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600751 depends = self.runtaskentries[tid].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500752 for depend in depends:
753 mark_active(depend, depth+1)
754
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600755 self.target_tids = []
756 for (mc, target, task, fn) in self.targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500757
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600758 if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500759 continue
760
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600761 if target in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500762 continue
763
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500764 parents = False
765 if task.endswith('-'):
766 parents = True
767 task = task[:-1]
768
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600769 if fn in taskData[mc].failed_fns:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500770 continue
771
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600772 # fn already has mc prefix
773 tid = fn + ":" + task
774 self.target_tids.append(tid)
775 if tid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500776 import difflib
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600777 tasks = []
778 for x in taskData[mc].taskentries:
779 if x.startswith(fn + ":"):
780 tasks.append(taskname_from_tid(x))
781 close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500782 if close_matches:
783 extra = ". Close matches:\n %s" % "\n ".join(close_matches)
784 else:
785 extra = ""
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600786 bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
787
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500788 # For tasks called "XXXX-", ony run their dependencies
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500789 if parents:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600790 for i in self.runtaskentries[tid].depends:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500791 mark_active(i, 1)
792 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600793 mark_active(tid, 1)
794
795 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500796
797 # Step C - Prune all inactive tasks
798 #
799 # Once all active tasks are marked, prune the ones we don't need.
800
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500801 delcount = 0
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600802 for tid in list(self.runtaskentries.keys()):
803 if tid not in runq_build:
804 del self.runtaskentries[tid]
805 delcount += 1
806
807 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500808
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500809 if self.cooker.configuration.runall is not None:
810 runall = "do_%s" % self.cooker.configuration.runall
811 runall_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == runall }
812
813 # re-run the mark_active and then drop unused tasks from new list
814 runq_build = {}
815 for tid in list(runall_tids):
816 mark_active(tid,1)
817
818 for tid in list(self.runtaskentries.keys()):
819 if tid not in runq_build:
820 del self.runtaskentries[tid]
821 delcount += 1
822
823 if len(self.runtaskentries) == 0:
824 bb.msg.fatal("RunQueue", "No remaining tasks to run for build target %s with runall %s" % (target, runall))
825
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500826 #
827 # Step D - Sanity checks and computation
828 #
829
830 # Check to make sure we still have tasks to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600831 if len(self.runtaskentries) == 0:
832 if not taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500833 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
834 else:
835 bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
836
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600837 logger.verbose("Pruned %s inactive tasks, %s left", delcount, len(self.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500838
839 logger.verbose("Assign Weightings")
840
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600841 self.init_progress_reporter.next_stage()
842
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500843 # Generate a list of reverse dependencies to ease future calculations
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600844 for tid in self.runtaskentries:
845 for dep in self.runtaskentries[tid].depends:
846 self.runtaskentries[dep].revdeps.add(tid)
847
848 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500849
850 # Identify tasks at the end of dependency chains
851 # Error on circular dependency loops (length two)
852 endpoints = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600853 for tid in self.runtaskentries:
854 revdeps = self.runtaskentries[tid].revdeps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500855 if len(revdeps) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600856 endpoints.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500857 for dep in revdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600858 if dep in self.runtaskentries[tid].depends:
859 bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
860
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500861
862 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
863
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600864 self.init_progress_reporter.next_stage()
865
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500866 # Calculate task weights
867 # Check of higher length circular dependencies
868 self.runq_weight = self.calculate_task_weights(endpoints)
869
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600870 self.init_progress_reporter.next_stage()
871
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500872 # Sanity Check - Check for multiple tasks building the same provider
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600873 for mc in self.dataCaches:
874 prov_list = {}
875 seen_fn = []
876 for tid in self.runtaskentries:
877 (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
878 if taskfn in seen_fn:
879 continue
880 if mc != tidmc:
881 continue
882 seen_fn.append(taskfn)
883 for prov in self.dataCaches[mc].fn_provides[taskfn]:
884 if prov not in prov_list:
885 prov_list[prov] = [taskfn]
886 elif taskfn not in prov_list[prov]:
887 prov_list[prov].append(taskfn)
888 for prov in prov_list:
889 if len(prov_list[prov]) < 2:
890 continue
891 if prov in self.multi_provider_whitelist:
892 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500893 seen_pn = []
894 # If two versions of the same PN are being built its fatal, we don't support it.
895 for fn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600896 pn = self.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500897 if pn not in seen_pn:
898 seen_pn.append(pn)
899 else:
900 bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500901 msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
902 #
903 # Construct a list of things which uniquely depend on each provider
904 # since this may help the user figure out which dependency is triggering this warning
905 #
906 msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
907 deplist = {}
908 commondeps = None
909 for provfn in prov_list[prov]:
910 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600911 for tid in self.runtaskentries:
912 fn = fn_from_tid(tid)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500913 if fn != provfn:
914 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600915 for dep in self.runtaskentries[tid].revdeps:
916 fn = fn_from_tid(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500917 if fn == provfn:
918 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600919 deps.add(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500920 if not commondeps:
921 commondeps = set(deps)
922 else:
923 commondeps &= deps
924 deplist[provfn] = deps
925 for provfn in deplist:
926 msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
927 #
928 # Construct a list of provides and runtime providers for each recipe
929 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
930 #
931 msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
932 provide_results = {}
933 rprovide_results = {}
934 commonprovs = None
935 commonrprovs = None
936 for provfn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600937 provides = set(self.dataCaches[mc].fn_provides[provfn])
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500938 rprovides = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600939 for rprovide in self.dataCaches[mc].rproviders:
940 if provfn in self.dataCaches[mc].rproviders[rprovide]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500941 rprovides.add(rprovide)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600942 for package in self.dataCaches[mc].packages:
943 if provfn in self.dataCaches[mc].packages[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500944 rprovides.add(package)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600945 for package in self.dataCaches[mc].packages_dynamic:
946 if provfn in self.dataCaches[mc].packages_dynamic[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500947 rprovides.add(package)
948 if not commonprovs:
949 commonprovs = set(provides)
950 else:
951 commonprovs &= provides
952 provide_results[provfn] = provides
953 if not commonrprovs:
954 commonrprovs = set(rprovides)
955 else:
956 commonrprovs &= rprovides
957 rprovide_results[provfn] = rprovides
958 #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
959 #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
960 for provfn in prov_list[prov]:
961 msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
962 msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
963
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500964 if self.warn_multi_bb:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600965 logger.warning(msg)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500966 else:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500967 logger.error(msg)
968
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600969 self.init_progress_reporter.next_stage()
970
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500971 # Create a whitelist usable by the stamp checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600972 self.stampfnwhitelist = {}
973 for mc in self.taskData:
974 self.stampfnwhitelist[mc] = []
975 for entry in self.stampwhitelist.split():
976 if entry not in self.taskData[mc].build_targets:
977 continue
978 fn = self.taskData.build_targets[entry][0]
979 self.stampfnwhitelist[mc].append(fn)
980
981 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500982
983 # Iterate over the task list looking for tasks with a 'setscene' function
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600984 self.runq_setscene_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500985 if not self.cooker.configuration.nosetscene:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600986 for tid in self.runtaskentries:
987 (mc, fn, taskname, _) = split_tid_mcfn(tid)
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500988 setscenetid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600989 if setscenetid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500990 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600991 self.runq_setscene_tids.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500992
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600993 def invalidate_task(tid, error_nostamp):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500994 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
995 taskdep = self.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600996 if fn + ":" + taskname not in taskData[mc].taskentries:
997 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500998 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
999 if error_nostamp:
1000 bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
1001 else:
1002 bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
1003 else:
1004 logger.verbose("Invalidate task %s, %s", taskname, fn)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001005 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], fn)
1006
1007 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001008
1009 # Invalidate task if force mode active
1010 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001011 for tid in self.target_tids:
1012 invalidate_task(tid, False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001013
1014 # Invalidate task if invalidate mode active
1015 if self.cooker.configuration.invalidate_stamp:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001016 for tid in self.target_tids:
1017 fn = fn_from_tid(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001018 for st in self.cooker.configuration.invalidate_stamp.split(','):
1019 if not st.startswith("do_"):
1020 st = "do_%s" % st
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001021 invalidate_task(fn + ":" + st, True)
1022
1023 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001024
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001025 # Create and print to the logs a virtual/xxxx -> PN (fn) table
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001026 for mc in taskData:
1027 virtmap = taskData[mc].get_providermap(prefix="virtual/")
1028 virtpnmap = {}
1029 for v in virtmap:
1030 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
1031 bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
1032 if hasattr(bb.parse.siggen, "tasks_resolved"):
1033 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1034
1035 self.init_progress_reporter.next_stage()
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001036
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001037 # Iterate over the task list and call into the siggen code
1038 dealtwith = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001039 todeal = set(self.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001040 while len(todeal) > 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001041 for tid in todeal.copy():
1042 if len(self.runtaskentries[tid].depends - dealtwith) == 0:
1043 dealtwith.add(tid)
1044 todeal.remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001045 procdep = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001046 for dep in self.runtaskentries[tid].depends:
1047 procdep.append(fn_from_tid(dep) + "." + taskname_from_tid(dep))
1048 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1049 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(taskfn, taskname, procdep, self.dataCaches[mc])
1050 task = self.runtaskentries[tid].task
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001051
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001052 bb.parse.siggen.writeout_file_checksum_cache()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001053
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001054 #self.dump_data()
1055 return len(self.runtaskentries)
1056
1057 def dump_data(self):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001058 """
1059 Dump some debug information on the internal data structures
1060 """
1061 logger.debug(3, "run_tasks:")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001062 for tid in self.runtaskentries:
1063 logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
1064 self.runtaskentries[tid].weight,
1065 self.runtaskentries[tid].depends,
1066 self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001067
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001068class RunQueueWorker():
1069 def __init__(self, process, pipe):
1070 self.process = process
1071 self.pipe = pipe
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001072
1073class RunQueue:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001074 def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001075
1076 self.cooker = cooker
1077 self.cfgData = cfgData
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001078 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001079
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001080 self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
1081 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
1082 self.setsceneverify = cfgData.getVar("BB_SETSCENE_VERIFY_FUNCTION2") or None
1083 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001084
1085 self.state = runQueuePrepare
1086
1087 # For disk space monitor
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001088 # Invoked at regular time intervals via the bitbake heartbeat event
1089 # while the build is running. We generate a unique name for the handler
1090 # here, just in case that there ever is more than one RunQueue instance,
1091 # start the handler when reaching runQueueSceneRun, and stop it when
1092 # done with the build.
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001093 self.dm = monitordisk.diskMonitor(cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001094 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
1095 self.dm_event_handler_registered = False
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001096 self.rqexe = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001097 self.worker = {}
1098 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001099
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001100 def _start_worker(self, mc, fakeroot = False, rqexec = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001101 logger.debug(1, "Starting bitbake-worker")
1102 magic = "decafbad"
1103 if self.cooker.configuration.profile:
1104 magic = "decafbadbad"
1105 if fakeroot:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001106 magic = magic + "beef"
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001107 mcdata = self.cooker.databuilder.mcdata[mc]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001108 fakerootcmd = mcdata.getVar("FAKEROOTCMD")
1109 fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001110 env = os.environ.copy()
1111 for key, value in (var.split('=') for var in fakerootenv):
1112 env[key] = value
1113 worker = subprocess.Popen([fakerootcmd, "bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
1114 else:
1115 worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
1116 bb.utils.nonblockingfd(worker.stdout)
1117 workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
1118
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001119 runqhash = {}
1120 for tid in self.rqdata.runtaskentries:
1121 runqhash[tid] = self.rqdata.runtaskentries[tid].hash
1122
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001123 workerdata = {
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001124 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1125 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1126 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1127 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001128 "sigdata" : bb.parse.siggen.get_taskdata(),
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001129 "runq_hash" : runqhash,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001130 "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
1131 "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
1132 "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
1133 "logdefaultdomain" : bb.msg.loggerDefaultDomains,
1134 "prhost" : self.cooker.prhost,
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001135 "buildname" : self.cfgData.getVar("BUILDNAME"),
1136 "date" : self.cfgData.getVar("DATE"),
1137 "time" : self.cfgData.getVar("TIME"),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001138 }
1139
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001140 worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001141 worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001142 worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001143 worker.stdin.flush()
1144
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001145 return RunQueueWorker(worker, workerpipe)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001146
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001147 def _teardown_worker(self, worker):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001148 if not worker:
1149 return
1150 logger.debug(1, "Teardown for bitbake-worker")
1151 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001152 worker.process.stdin.write(b"<quit></quit>")
1153 worker.process.stdin.flush()
1154 worker.process.stdin.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001155 except IOError:
1156 pass
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001157 while worker.process.returncode is None:
1158 worker.pipe.read()
1159 worker.process.poll()
1160 while worker.pipe.read():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001161 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001162 worker.pipe.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001163
1164 def start_worker(self):
1165 if self.worker:
1166 self.teardown_workers()
1167 self.teardown = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001168 for mc in self.rqdata.dataCaches:
1169 self.worker[mc] = self._start_worker(mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001170
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001171 def start_fakeworker(self, rqexec, mc):
1172 if not mc in self.fakeworker:
1173 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001174
1175 def teardown_workers(self):
1176 self.teardown = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001177 for mc in self.worker:
1178 self._teardown_worker(self.worker[mc])
1179 self.worker = {}
1180 for mc in self.fakeworker:
1181 self._teardown_worker(self.fakeworker[mc])
1182 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001183
1184 def read_workers(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001185 for mc in self.worker:
1186 self.worker[mc].pipe.read()
1187 for mc in self.fakeworker:
1188 self.fakeworker[mc].pipe.read()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001189
1190 def active_fds(self):
1191 fds = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001192 for mc in self.worker:
1193 fds.append(self.worker[mc].pipe.input)
1194 for mc in self.fakeworker:
1195 fds.append(self.fakeworker[mc].pipe.input)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001196 return fds
1197
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001198 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001199 def get_timestamp(f):
1200 try:
1201 if not os.access(f, os.F_OK):
1202 return None
1203 return os.stat(f)[stat.ST_MTIME]
1204 except:
1205 return None
1206
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001207 (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
1208 if taskname is None:
1209 taskname = tn
1210
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001211 if self.stamppolicy == "perfile":
1212 fulldeptree = False
1213 else:
1214 fulldeptree = True
1215 stampwhitelist = []
1216 if self.stamppolicy == "whitelist":
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001217 stampwhitelist = self.rqdata.stampfnwhitelist[mc]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001218
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001219 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001220
1221 # If the stamp is missing, it's not current
1222 if not os.access(stampfile, os.F_OK):
1223 logger.debug(2, "Stampfile %s not available", stampfile)
1224 return False
1225 # If it's a 'nostamp' task, it's not current
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001226 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001227 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1228 logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
1229 return False
1230
1231 if taskname != "do_setscene" and taskname.endswith("_setscene"):
1232 return True
1233
1234 if cache is None:
1235 cache = {}
1236
1237 iscurrent = True
1238 t1 = get_timestamp(stampfile)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001239 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001240 if iscurrent:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001241 (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
1242 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1243 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001244 t2 = get_timestamp(stampfile2)
1245 t3 = get_timestamp(stampfile3)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001246 if t3 and not t2:
1247 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001248 if t3 and t3 > t2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001249 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001250 if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
1251 if not t2:
1252 logger.debug(2, 'Stampfile %s does not exist', stampfile2)
1253 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001254 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001255 if t1 < t2:
1256 logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
1257 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001258 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001259 if recurse and iscurrent:
1260 if dep in cache:
1261 iscurrent = cache[dep]
1262 if not iscurrent:
1263 logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
1264 else:
1265 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1266 cache[dep] = iscurrent
1267 if recurse:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001268 cache[tid] = iscurrent
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001269 return iscurrent
1270
1271 def _execute_runqueue(self):
1272 """
1273 Run the tasks in a queue prepared by rqdata.prepare()
1274 Upon failure, optionally try to recover the build using any alternate providers
1275 (if the abort on failure configuration option isn't set)
1276 """
1277
1278 retval = True
1279
1280 if self.state is runQueuePrepare:
1281 self.rqexe = RunQueueExecuteDummy(self)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001282 # NOTE: if you add, remove or significantly refactor the stages of this
1283 # process then you should recalculate the weightings here. This is quite
1284 # easy to do - just change the next line temporarily to pass debug=True as
1285 # the last parameter and you'll get a printout of the weightings as well
1286 # as a map to the lines where next_stage() was called. Of course this isn't
1287 # critical, but it helps to keep the progress reporting accurate.
1288 self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
1289 "Initialising tasks",
1290 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001291 if self.rqdata.prepare() == 0:
1292 self.state = runQueueComplete
1293 else:
1294 self.state = runQueueSceneInit
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001295 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001296
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001297 # we are ready to run, emit dependency info to any UI or class which
1298 # needs it
1299 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1300 self.rqdata.init_progress_reporter.next_stage()
1301 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001302
1303 if self.state is runQueueSceneInit:
1304 dump = self.cooker.configuration.dump_signatures
1305 if dump:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001306 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001307 if 'printdiff' in dump:
1308 invalidtasks = self.print_diffscenetasks()
1309 self.dump_signatures(dump)
1310 if 'printdiff' in dump:
1311 self.write_diffscenetasks(invalidtasks)
1312 self.state = runQueueComplete
1313 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001314 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001315 self.start_worker()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001316 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001317 self.rqexe = RunQueueExecuteScenequeue(self)
1318
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001319 if self.state is runQueueSceneRun:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001320 if not self.dm_event_handler_registered:
1321 res = bb.event.register(self.dm_event_handler_name,
1322 lambda x: self.dm.check(self) if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp] else False,
1323 ('bb.event.HeartbeatEvent',))
1324 self.dm_event_handler_registered = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001325 retval = self.rqexe.execute()
1326
1327 if self.state is runQueueRunInit:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001328 if self.cooker.configuration.setsceneonly:
1329 self.state = runQueueComplete
1330 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001331 # Just in case we didn't setscene
1332 self.rqdata.init_progress_reporter.finish()
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001333 logger.info("Executing RunQueue Tasks")
1334 self.rqexe = RunQueueExecuteTasks(self)
1335 self.state = runQueueRunning
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001336
1337 if self.state is runQueueRunning:
1338 retval = self.rqexe.execute()
1339
1340 if self.state is runQueueCleanUp:
1341 retval = self.rqexe.finish()
1342
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001343 build_done = self.state is runQueueComplete or self.state is runQueueFailed
1344
1345 if build_done and self.dm_event_handler_registered:
1346 bb.event.remove(self.dm_event_handler_name, None)
1347 self.dm_event_handler_registered = False
1348
1349 if build_done and self.rqexe:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001350 self.teardown_workers()
1351 if self.rqexe.stats.failed:
1352 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
1353 else:
1354 # Let's avoid the word "failed" if nothing actually did
1355 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
1356
1357 if self.state is runQueueFailed:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001358 if not self.rqdata.taskData[''].tryaltconfigs:
1359 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
1360 for tid in self.rqexe.failed_tids:
1361 (mc, fn, tn, _) = split_tid_mcfn(tid)
1362 self.rqdata.taskData[mc].fail_fn(fn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001363 self.rqdata.reset()
1364
1365 if self.state is runQueueComplete:
1366 # All done
1367 return False
1368
1369 # Loop
1370 return retval
1371
1372 def execute_runqueue(self):
1373 # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
1374 try:
1375 return self._execute_runqueue()
1376 except bb.runqueue.TaskFailure:
1377 raise
1378 except SystemExit:
1379 raise
1380 except bb.BBHandledException:
1381 try:
1382 self.teardown_workers()
1383 except:
1384 pass
1385 self.state = runQueueComplete
1386 raise
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001387 except Exception as err:
1388 logger.exception("An uncaught exception occurred in runqueue")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001389 try:
1390 self.teardown_workers()
1391 except:
1392 pass
1393 self.state = runQueueComplete
1394 raise
1395
1396 def finish_runqueue(self, now = False):
1397 if not self.rqexe:
1398 self.state = runQueueComplete
1399 return
1400
1401 if now:
1402 self.rqexe.finish_now()
1403 else:
1404 self.rqexe.finish()
1405
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001406 def rq_dump_sigfn(self, fn, options):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001407 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001408 the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
1409 siggen = bb.parse.siggen
1410 dataCaches = self.rqdata.dataCaches
1411 siggen.dump_sigfn(fn, dataCaches, options)
1412
1413 def dump_signatures(self, options):
1414 fns = set()
1415 bb.note("Reparsing files to collect dependency data")
1416
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001417 for tid in self.rqdata.runtaskentries:
1418 fn = fn_from_tid(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001419 fns.add(fn)
1420
1421 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1422 # We cannot use the real multiprocessing.Pool easily due to some local data
1423 # that can't be pickled. This is a cheap multi-process solution.
1424 launched = []
1425 while fns:
1426 if len(launched) < max_process:
1427 p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
1428 p.start()
1429 launched.append(p)
1430 for q in launched:
1431 # The finished processes are joined when calling is_alive()
1432 if not q.is_alive():
1433 launched.remove(q)
1434 for p in launched:
1435 p.join()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001436
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001437 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001438
1439 return
1440
1441 def print_diffscenetasks(self):
1442
1443 valid = []
1444 sq_hash = []
1445 sq_hashfn = []
1446 sq_fn = []
1447 sq_taskname = []
1448 sq_task = []
1449 noexec = []
1450 stamppresent = []
1451 valid_new = set()
1452
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001453 for tid in self.rqdata.runtaskentries:
1454 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1455 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001456
1457 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001458 noexec.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001459 continue
1460
1461 sq_fn.append(fn)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001462 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001463 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001464 sq_taskname.append(taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001465 sq_task.append(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001466 locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001467 try:
1468 call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=True)"
1469 valid = bb.utils.better_eval(call, locs)
1470 # Handle version with no siginfo parameter
1471 except TypeError:
1472 call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
1473 valid = bb.utils.better_eval(call, locs)
1474 for v in valid:
1475 valid_new.add(sq_task[v])
1476
1477 # Tasks which are both setscene and noexec never care about dependencies
1478 # We therefore find tasks which are setscene and noexec and mark their
1479 # unique dependencies as valid.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001480 for tid in noexec:
1481 if tid not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001482 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001483 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001484 hasnoexecparents = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001485 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1486 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001487 continue
1488 hasnoexecparents = False
1489 break
1490 if hasnoexecparents:
1491 valid_new.add(dep)
1492
1493 invalidtasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001494 for tid in self.rqdata.runtaskentries:
1495 if tid not in valid_new and tid not in noexec:
1496 invalidtasks.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001497
1498 found = set()
1499 processed = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001500 for tid in invalidtasks:
1501 toprocess = set([tid])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001502 while toprocess:
1503 next = set()
1504 for t in toprocess:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001505 for dep in self.rqdata.runtaskentries[t].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001506 if dep in invalidtasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001507 found.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001508 if dep not in processed:
1509 processed.add(dep)
1510 next.add(dep)
1511 toprocess = next
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001512 if tid in found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001513 toprocess = set()
1514
1515 tasklist = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001516 for tid in invalidtasks.difference(found):
1517 tasklist.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001518
1519 if tasklist:
1520 bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
1521
1522 return invalidtasks.difference(found)
1523
1524 def write_diffscenetasks(self, invalidtasks):
1525
1526 # Define recursion callback
1527 def recursecb(key, hash1, hash2):
1528 hashes = [hash1, hash2]
1529 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1530
1531 recout = []
1532 if len(hashfiles) == 2:
1533 out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
1534 recout.extend(list(' ' + l for l in out2))
1535 else:
1536 recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
1537
1538 return recout
1539
1540
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001541 for tid in invalidtasks:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001542 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1543 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001544 h = self.rqdata.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001545 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
1546 match = None
1547 for m in matches:
1548 if h in m:
1549 match = m
1550 if match is None:
1551 bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001552 matches = {k : v for k, v in iter(matches.items()) if h not in k}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001553 if matches:
1554 latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
1555 prevh = __find_md5__.search(latestmatch).group(0)
1556 output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
1557 bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
1558
1559class RunQueueExecute:
1560
1561 def __init__(self, rq):
1562 self.rq = rq
1563 self.cooker = rq.cooker
1564 self.cfgData = rq.cfgData
1565 self.rqdata = rq.rqdata
1566
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001567 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
1568 self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001569
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001570 self.runq_buildable = set()
1571 self.runq_running = set()
1572 self.runq_complete = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001573
1574 self.build_stamps = {}
1575 self.build_stamps2 = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001576 self.failed_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001577
1578 self.stampcache = {}
1579
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001580 for mc in rq.worker:
1581 rq.worker[mc].pipe.setrunqueueexec(self)
1582 for mc in rq.fakeworker:
1583 rq.fakeworker[mc].pipe.setrunqueueexec(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001584
1585 if self.number_tasks <= 0:
1586 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1587
1588 def runqueue_process_waitpid(self, task, status):
1589
1590 # self.build_stamps[pid] may not exist when use shared work directory.
1591 if task in self.build_stamps:
1592 self.build_stamps2.remove(self.build_stamps[task])
1593 del self.build_stamps[task]
1594
1595 if status != 0:
1596 self.task_fail(task, status)
1597 else:
1598 self.task_complete(task)
1599 return True
1600
1601 def finish_now(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001602 for mc in self.rq.worker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001603 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001604 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1605 self.rq.worker[mc].process.stdin.flush()
1606 except IOError:
1607 # worker must have died?
1608 pass
1609 for mc in self.rq.fakeworker:
1610 try:
1611 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1612 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001613 except IOError:
1614 # worker must have died?
1615 pass
1616
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001617 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001618 self.rq.state = runQueueFailed
1619 return
1620
1621 self.rq.state = runQueueComplete
1622 return
1623
1624 def finish(self):
1625 self.rq.state = runQueueCleanUp
1626
1627 if self.stats.active > 0:
1628 bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData)
1629 self.rq.read_workers()
1630 return self.rq.active_fds()
1631
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001632 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001633 self.rq.state = runQueueFailed
1634 return True
1635
1636 self.rq.state = runQueueComplete
1637 return True
1638
1639 def check_dependencies(self, task, taskdeps, setscene = False):
1640 if not self.rq.depvalidate:
1641 return False
1642
1643 taskdata = {}
1644 taskdeps.add(task)
1645 for dep in taskdeps:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001646 (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
1647 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001648 taskdata[dep] = [pn, taskname, fn]
1649 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001650 locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001651 valid = bb.utils.better_eval(call, locs)
1652 return valid
1653
1654class RunQueueExecuteDummy(RunQueueExecute):
1655 def __init__(self, rq):
1656 self.rq = rq
1657 self.stats = RunQueueStats(0)
1658
1659 def finish(self):
1660 self.rq.state = runQueueComplete
1661 return
1662
1663class RunQueueExecuteTasks(RunQueueExecute):
1664 def __init__(self, rq):
1665 RunQueueExecute.__init__(self, rq)
1666
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001667 self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001668
1669 self.stampcache = {}
1670
1671 initial_covered = self.rq.scenequeue_covered.copy()
1672
1673 # Mark initial buildable tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001674 for tid in self.rqdata.runtaskentries:
1675 if len(self.rqdata.runtaskentries[tid].depends) == 0:
1676 self.runq_buildable.add(tid)
1677 if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
1678 self.rq.scenequeue_covered.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001679
1680 found = True
1681 while found:
1682 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001683 for tid in self.rqdata.runtaskentries:
1684 if tid in self.rq.scenequeue_covered:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001685 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001686 logger.debug(1, 'Considering %s: %s' % (tid, str(self.rqdata.runtaskentries[tid].revdeps)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001687
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001688 if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
1689 if tid in self.rq.scenequeue_notcovered:
1690 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001691 found = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001692 self.rq.scenequeue_covered.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001693
1694 logger.debug(1, 'Skip list (pre setsceneverify) %s', sorted(self.rq.scenequeue_covered))
1695
1696 # Allow the metadata to elect for setscene tasks to run anyway
1697 covered_remove = set()
1698 if self.rq.setsceneverify:
1699 invalidtasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001700 tasknames = {}
1701 fns = {}
1702 for tid in self.rqdata.runtaskentries:
1703 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1704 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
1705 fns[tid] = taskfn
1706 tasknames[tid] = taskname
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001707 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1708 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001709 if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
1710 logger.debug(2, 'Setscene stamp current for task %s', tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001711 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001712 if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
1713 logger.debug(2, 'Normal stamp current for task %s', tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001714 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001715 invalidtasks.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001716
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001717 call = self.rq.setsceneverify + "(covered, tasknames, fns, d, invalidtasks=invalidtasks)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001718 locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : tasknames, "fns" : fns, "d" : self.cooker.data, "invalidtasks" : invalidtasks }
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001719 covered_remove = bb.utils.better_eval(call, locs)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001720
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001721 def removecoveredtask(tid):
1722 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1723 taskname = taskname + '_setscene'
1724 bb.build.del_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
1725 self.rq.scenequeue_covered.remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001726
1727 toremove = covered_remove
1728 for task in toremove:
1729 logger.debug(1, 'Not skipping task %s due to setsceneverify', task)
1730 while toremove:
1731 covered_remove = []
1732 for task in toremove:
1733 removecoveredtask(task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001734 for deptask in self.rqdata.runtaskentries[task].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001735 if deptask not in self.rq.scenequeue_covered:
1736 continue
1737 if deptask in toremove or deptask in covered_remove or deptask in initial_covered:
1738 continue
1739 logger.debug(1, 'Task %s depends on task %s so not skipping' % (task, deptask))
1740 covered_remove.append(deptask)
1741 toremove = covered_remove
1742
1743 logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered)
1744
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001745
1746 for mc in self.rqdata.dataCaches:
1747 target_pairs = []
1748 for tid in self.rqdata.target_tids:
1749 (tidmc, fn, taskname, _) = split_tid_mcfn(tid)
1750 if tidmc == mc:
1751 target_pairs.append((fn, taskname))
1752
1753 event.fire(bb.event.StampUpdate(target_pairs, self.rqdata.dataCaches[mc].stamp), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001754
1755 schedulers = self.get_schedulers()
1756 for scheduler in schedulers:
1757 if self.scheduler == scheduler.name:
1758 self.sched = scheduler(self, self.rqdata)
1759 logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
1760 break
1761 else:
1762 bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
1763 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1764
1765 def get_schedulers(self):
1766 schedulers = set(obj for obj in globals().values()
1767 if type(obj) is type and
1768 issubclass(obj, RunQueueScheduler))
1769
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001770 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001771 if user_schedulers:
1772 for sched in user_schedulers.split():
1773 if not "." in sched:
1774 bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
1775 continue
1776
1777 modname, name = sched.rsplit(".", 1)
1778 try:
1779 module = __import__(modname, fromlist=(name,))
1780 except ImportError as exc:
1781 logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
1782 raise SystemExit(1)
1783 else:
1784 schedulers.add(getattr(module, name))
1785 return schedulers
1786
1787 def setbuildable(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001788 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001789 self.sched.newbuilable(task)
1790
1791 def task_completeoutright(self, task):
1792 """
1793 Mark a task as completed
1794 Look at the reverse dependencies and mark any task with
1795 completed dependencies as buildable
1796 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001797 self.runq_complete.add(task)
1798 for revdep in self.rqdata.runtaskentries[task].revdeps:
1799 if revdep in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001800 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001801 if revdep in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001802 continue
1803 alldeps = 1
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001804 for dep in self.rqdata.runtaskentries[revdep].depends:
1805 if dep not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001806 alldeps = 0
1807 if alldeps == 1:
1808 self.setbuildable(revdep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001809 fn = fn_from_tid(revdep)
1810 taskname = taskname_from_tid(revdep)
1811 logger.debug(1, "Marking task %s as buildable", revdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001812
1813 def task_complete(self, task):
1814 self.stats.taskCompleted()
1815 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1816 self.task_completeoutright(task)
1817
1818 def task_fail(self, task, exitcode):
1819 """
1820 Called when a task has failed
1821 Updates the state engine with the failure
1822 """
1823 self.stats.taskFailed()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001824 self.failed_tids.append(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001825 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001826 if self.rqdata.taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001827 self.rq.state = runQueueCleanUp
1828
1829 def task_skip(self, task, reason):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001830 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001831 self.setbuildable(task)
1832 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
1833 self.task_completeoutright(task)
1834 self.stats.taskCompleted()
1835 self.stats.taskSkipped()
1836
1837 def execute(self):
1838 """
1839 Run the tasks in a queue prepared by rqdata.prepare()
1840 """
1841
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001842 if self.rqdata.setscenewhitelist and not self.rqdata.setscenewhitelist_checked:
1843 self.rqdata.setscenewhitelist_checked = True
1844
1845 # Check tasks that are going to run against the whitelist
1846 def check_norun_task(tid, showerror=False):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001847 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001848 # Ignore covered tasks
1849 if tid in self.rq.scenequeue_covered:
1850 return False
1851 # Ignore stamped tasks
1852 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
1853 return False
1854 # Ignore noexec tasks
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001855 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001856 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1857 return False
1858
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001859 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001860 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
1861 if showerror:
1862 if tid in self.rqdata.runq_setscene_tids:
1863 logger.error('Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname))
1864 else:
1865 logger.error('Task %s.%s attempted to execute unexpectedly' % (pn, taskname))
1866 return True
1867 return False
1868 # Look to see if any tasks that we think shouldn't run are going to
1869 unexpected = False
1870 for tid in self.rqdata.runtaskentries:
1871 if check_norun_task(tid):
1872 unexpected = True
1873 break
1874 if unexpected:
1875 # Run through the tasks in the rough order they'd have executed and print errors
1876 # (since the order can be useful - usually missing sstate for the last few tasks
1877 # is the cause of the problem)
1878 task = self.sched.next()
1879 while task is not None:
1880 check_norun_task(task, showerror=True)
1881 self.task_skip(task, 'Setscene enforcement check')
1882 task = self.sched.next()
1883
1884 self.rq.state = runQueueCleanUp
1885 return True
1886
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001887 self.rq.read_workers()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001888
1889 if self.stats.total == 0:
1890 # nothing to do
1891 self.rq.state = runQueueCleanUp
1892
1893 task = self.sched.next()
1894 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001895 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001896
1897 if task in self.rq.scenequeue_covered:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001898 logger.debug(2, "Setscene covered task %s", task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001899 self.task_skip(task, "covered")
1900 return True
1901
1902 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001903 logger.debug(2, "Stamp current task %s", task)
1904
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001905 self.task_skip(task, "existing")
1906 return True
1907
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001908 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001909 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1910 startevent = runQueueTaskStarted(task, self.stats, self.rq,
1911 noexec=True)
1912 bb.event.fire(startevent, self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001913 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001914 self.stats.taskActive()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001915 if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001916 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001917 self.task_complete(task)
1918 return True
1919 else:
1920 startevent = runQueueTaskStarted(task, self.stats, self.rq)
1921 bb.event.fire(startevent, self.cfgData)
1922
1923 taskdepdata = self.build_taskdepdata(task)
1924
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001925 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001926 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001927 if not mc in self.rq.fakeworker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001928 try:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001929 self.rq.start_fakeworker(self, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001930 except OSError as exc:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001931 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001932 self.rq.state = runQueueFailed
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001933 self.stats.taskFailed()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001934 return True
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001935 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001936 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001937 else:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001938 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001939 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001940
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001941 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
1942 self.build_stamps2.append(self.build_stamps[task])
1943 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001944 self.stats.taskActive()
1945 if self.stats.active < self.number_tasks:
1946 return True
1947
1948 if self.stats.active > 0:
1949 self.rq.read_workers()
1950 return self.rq.active_fds()
1951
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001952 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001953 self.rq.state = runQueueFailed
1954 return True
1955
1956 # Sanity Checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001957 for task in self.rqdata.runtaskentries:
1958 if task not in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001959 logger.error("Task %s never buildable!", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001960 if task not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001961 logger.error("Task %s never ran!", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001962 if task not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001963 logger.error("Task %s never completed!", task)
1964 self.rq.state = runQueueComplete
1965
1966 return True
1967
1968 def build_taskdepdata(self, task):
1969 taskdepdata = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001970 next = self.rqdata.runtaskentries[task].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001971 next.add(task)
1972 while next:
1973 additional = []
1974 for revdep in next:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001975 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
1976 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
1977 deps = self.rqdata.runtaskentries[revdep].depends
1978 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001979 taskhash = self.rqdata.runtaskentries[revdep].hash
1980 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001981 for revdep2 in deps:
1982 if revdep2 not in taskdepdata:
1983 additional.append(revdep2)
1984 next = additional
1985
1986 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
1987 return taskdepdata
1988
1989class RunQueueExecuteScenequeue(RunQueueExecute):
1990 def __init__(self, rq):
1991 RunQueueExecute.__init__(self, rq)
1992
1993 self.scenequeue_covered = set()
1994 self.scenequeue_notcovered = set()
1995 self.scenequeue_notneeded = set()
1996
1997 # If we don't have any setscene functions, skip this step
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001998 if len(self.rqdata.runq_setscene_tids) == 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001999 rq.scenequeue_covered = set()
2000 rq.state = runQueueRunInit
2001 return
2002
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002003 self.stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002004
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002005 sq_revdeps = {}
2006 sq_revdeps_new = {}
2007 sq_revdeps_squash = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002008 self.sq_harddeps = {}
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002009 self.stamps = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002010
2011 # We need to construct a dependency graph for the setscene functions. Intermediate
2012 # dependencies between the setscene tasks only complicate the code. This code
2013 # therefore aims to collapse the huge runqueue dependency tree into a smaller one
2014 # only containing the setscene functions.
2015
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002016 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002017
2018 # First process the chains up to the first setscene task.
2019 endpoints = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002020 for tid in self.rqdata.runtaskentries:
2021 sq_revdeps[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
2022 sq_revdeps_new[tid] = set()
2023 if (len(sq_revdeps[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
2024 #bb.warn("Added endpoint %s" % (tid))
2025 endpoints[tid] = set()
2026
2027 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002028
2029 # Secondly process the chains between setscene tasks.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002030 for tid in self.rqdata.runq_setscene_tids:
2031 #bb.warn("Added endpoint 2 %s" % (tid))
2032 for dep in self.rqdata.runtaskentries[tid].depends:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002033 if tid in sq_revdeps[dep]:
2034 sq_revdeps[dep].remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002035 if dep not in endpoints:
2036 endpoints[dep] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002037 #bb.warn(" Added endpoint 3 %s" % (dep))
2038 endpoints[dep].add(tid)
2039
2040 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002041
2042 def process_endpoints(endpoints):
2043 newendpoints = {}
2044 for point, task in endpoints.items():
2045 tasks = set()
2046 if task:
2047 tasks |= task
2048 if sq_revdeps_new[point]:
2049 tasks |= sq_revdeps_new[point]
2050 sq_revdeps_new[point] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002051 if point in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002052 sq_revdeps_new[point] = tasks
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05002053 tasks = set()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002054 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002055 for dep in self.rqdata.runtaskentries[point].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002056 if point in sq_revdeps[dep]:
2057 sq_revdeps[dep].remove(point)
2058 if tasks:
2059 sq_revdeps_new[dep] |= tasks
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002060 if len(sq_revdeps[dep]) == 0 and dep not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002061 newendpoints[dep] = task
2062 if len(newendpoints) != 0:
2063 process_endpoints(newendpoints)
2064
2065 process_endpoints(endpoints)
2066
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002067 self.rqdata.init_progress_reporter.next_stage()
2068
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002069 # Build a list of setscene tasks which are "unskippable"
2070 # These are direct endpoints referenced by the build
2071 endpoints2 = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002072 sq_revdeps2 = {}
2073 sq_revdeps_new2 = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002074 def process_endpoints2(endpoints):
2075 newendpoints = {}
2076 for point, task in endpoints.items():
2077 tasks = set([point])
2078 if task:
2079 tasks |= task
2080 if sq_revdeps_new2[point]:
2081 tasks |= sq_revdeps_new2[point]
2082 sq_revdeps_new2[point] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002083 if point in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002084 sq_revdeps_new2[point] = tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002085 for dep in self.rqdata.runtaskentries[point].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002086 if point in sq_revdeps2[dep]:
2087 sq_revdeps2[dep].remove(point)
2088 if tasks:
2089 sq_revdeps_new2[dep] |= tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002090 if (len(sq_revdeps2[dep]) == 0 or len(sq_revdeps_new2[dep]) != 0) and dep not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002091 newendpoints[dep] = tasks
2092 if len(newendpoints) != 0:
2093 process_endpoints2(newendpoints)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002094 for tid in self.rqdata.runtaskentries:
2095 sq_revdeps2[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
2096 sq_revdeps_new2[tid] = set()
2097 if (len(sq_revdeps2[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
2098 endpoints2[tid] = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002099 process_endpoints2(endpoints2)
2100 self.unskippable = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002101 for tid in self.rqdata.runq_setscene_tids:
2102 if sq_revdeps_new2[tid]:
2103 self.unskippable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002104
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002105 self.rqdata.init_progress_reporter.next_stage(len(self.rqdata.runtaskentries))
2106
2107 for taskcounter, tid in enumerate(self.rqdata.runtaskentries):
2108 if tid in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002109 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002110 for dep in sq_revdeps_new[tid]:
2111 deps.add(dep)
2112 sq_revdeps_squash[tid] = deps
2113 elif len(sq_revdeps_new[tid]) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002114 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002115 self.rqdata.init_progress_reporter.update(taskcounter)
2116
2117 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002118
2119 # Resolve setscene inter-task dependencies
2120 # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
2121 # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002122 for tid in self.rqdata.runq_setscene_tids:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002123 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2124 realtid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002125 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002126 self.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002127 for (depname, idependtask) in idepends:
2128
2129 if depname not in self.rqdata.taskData[mc].build_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002130 continue
2131
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002132 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2133 if depfn is None:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002134 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002135 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2136 if deptid not in self.rqdata.runtaskentries:
2137 bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002138
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002139 if not deptid in self.sq_harddeps:
2140 self.sq_harddeps[deptid] = set()
2141 self.sq_harddeps[deptid].add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002142
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002143 sq_revdeps_squash[tid].add(deptid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002144 # Have to zero this to avoid circular dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002145 sq_revdeps_squash[deptid] = set()
2146
2147 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002148
2149 for task in self.sq_harddeps:
2150 for dep in self.sq_harddeps[task]:
2151 sq_revdeps_squash[dep].add(task)
2152
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002153 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002154
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002155 #for tid in sq_revdeps_squash:
2156 # for dep in sq_revdeps_squash[tid]:
2157 # data = data + "\n %s" % dep
2158 # bb.warn("Task %s_setscene: is %s " % (tid, data
2159
2160 self.sq_deps = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002161 self.sq_revdeps = sq_revdeps_squash
2162 self.sq_revdeps2 = copy.deepcopy(self.sq_revdeps)
2163
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002164 for tid in self.sq_revdeps:
2165 self.sq_deps[tid] = set()
2166 for tid in self.sq_revdeps:
2167 for dep in self.sq_revdeps[tid]:
2168 self.sq_deps[dep].add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002169
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002170 self.rqdata.init_progress_reporter.next_stage()
2171
2172 for tid in self.sq_revdeps:
2173 if len(self.sq_revdeps[tid]) == 0:
2174 self.runq_buildable.add(tid)
2175
2176 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002177
2178 self.outrightfail = []
2179 if self.rq.hashvalidate:
2180 sq_hash = []
2181 sq_hashfn = []
2182 sq_fn = []
2183 sq_taskname = []
2184 sq_task = []
2185 noexec = []
2186 stamppresent = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002187 for tid in self.sq_revdeps:
2188 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2189
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002190 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002191
2192 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002193 noexec.append(tid)
2194 self.task_skip(tid)
2195 bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002196 continue
2197
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002198 if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
2199 logger.debug(2, 'Setscene stamp current for task %s', tid)
2200 stamppresent.append(tid)
2201 self.task_skip(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002202 continue
2203
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002204 if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
2205 logger.debug(2, 'Normal stamp current for task %s', tid)
2206 stamppresent.append(tid)
2207 self.task_skip(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002208 continue
2209
2210 sq_fn.append(fn)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002211 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002212 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002213 sq_taskname.append(taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002214 sq_task.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002215 call = self.rq.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002216 locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002217 valid = bb.utils.better_eval(call, locs)
2218
2219 valid_new = stamppresent
2220 for v in valid:
2221 valid_new.append(sq_task[v])
2222
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002223 for tid in self.sq_revdeps:
2224 if tid not in valid_new and tid not in noexec:
2225 logger.debug(2, 'No package found, so skipping setscene task %s', tid)
2226 self.outrightfail.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002227
2228 logger.info('Executing SetScene Tasks')
2229
2230 self.rq.state = runQueueSceneRun
2231
2232 def scenequeue_updatecounters(self, task, fail = False):
2233 for dep in self.sq_deps[task]:
2234 if fail and task in self.sq_harddeps and dep in self.sq_harddeps[task]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002235 logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002236 self.scenequeue_updatecounters(dep, fail)
2237 continue
2238 if task not in self.sq_revdeps2[dep]:
2239 # May already have been removed by the fail case above
2240 continue
2241 self.sq_revdeps2[dep].remove(task)
2242 if len(self.sq_revdeps2[dep]) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002243 self.runq_buildable.add(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002244
2245 def task_completeoutright(self, task):
2246 """
2247 Mark a task as completed
2248 Look at the reverse dependencies and mark any task with
2249 completed dependencies as buildable
2250 """
2251
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002252 logger.debug(1, 'Found task %s which could be accelerated', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002253 self.scenequeue_covered.add(task)
2254 self.scenequeue_updatecounters(task)
2255
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002256 def check_taskfail(self, task):
2257 if self.rqdata.setscenewhitelist:
2258 realtask = task.split('_setscene')[0]
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002259 (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
2260 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002261 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2262 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2263 self.rq.state = runQueueCleanUp
2264
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002265 def task_complete(self, task):
2266 self.stats.taskCompleted()
2267 bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
2268 self.task_completeoutright(task)
2269
2270 def task_fail(self, task, result):
2271 self.stats.taskFailed()
2272 bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
2273 self.scenequeue_notcovered.add(task)
2274 self.scenequeue_updatecounters(task, True)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002275 self.check_taskfail(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002276
2277 def task_failoutright(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002278 self.runq_running.add(task)
2279 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002280 self.stats.taskCompleted()
2281 self.stats.taskSkipped()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002282 self.scenequeue_notcovered.add(task)
2283 self.scenequeue_updatecounters(task, True)
2284
2285 def task_skip(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002286 self.runq_running.add(task)
2287 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002288 self.task_completeoutright(task)
2289 self.stats.taskCompleted()
2290 self.stats.taskSkipped()
2291
2292 def execute(self):
2293 """
2294 Run the tasks in a queue prepared by prepare_runqueue
2295 """
2296
2297 self.rq.read_workers()
2298
2299 task = None
2300 if self.stats.active < self.number_tasks:
2301 # Find the next setscene to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002302 for nexttask in self.rqdata.runq_setscene_tids:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002303 if nexttask in self.runq_buildable and nexttask not in self.runq_running and self.stamps[nexttask] not in self.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002304 if nexttask in self.unskippable:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002305 logger.debug(2, "Setscene task %s is unskippable" % nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002306 if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002307 fn = fn_from_tid(nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002308 foundtarget = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002309
2310 if nexttask in self.rqdata.target_tids:
2311 foundtarget = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002312 if not foundtarget:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002313 logger.debug(2, "Skipping setscene for task %s" % nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002314 self.task_skip(nexttask)
2315 self.scenequeue_notneeded.add(nexttask)
2316 return True
2317 if nexttask in self.outrightfail:
2318 self.task_failoutright(nexttask)
2319 return True
2320 task = nexttask
2321 break
2322 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002323 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
2324 taskname = taskname + "_setscene"
2325 if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
2326 logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002327 self.task_failoutright(task)
2328 return True
2329
2330 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002331 if task in self.rqdata.target_tids:
2332 self.task_failoutright(task)
2333 return True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002334
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002335 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
2336 logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002337 self.task_skip(task)
2338 return True
2339
2340 startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
2341 bb.event.fire(startevent, self.cfgData)
2342
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002343 taskdepdata = self.build_taskdepdata(task)
2344
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002345 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2346 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002347 if not mc in self.rq.fakeworker:
2348 self.rq.start_fakeworker(self, mc)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002349 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002350 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002351 else:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002352 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002353 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002354
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002355 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2356 self.build_stamps2.append(self.build_stamps[task])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002357 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002358 self.stats.taskActive()
2359 if self.stats.active < self.number_tasks:
2360 return True
2361
2362 if self.stats.active > 0:
2363 self.rq.read_workers()
2364 return self.rq.active_fds()
2365
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002366 #for tid in self.sq_revdeps:
2367 # if tid not in self.runq_running:
2368 # buildable = tid in self.runq_buildable
2369 # revdeps = self.sq_revdeps[tid]
2370 # bb.warn("Found we didn't run %s %s %s" % (tid, buildable, str(revdeps)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002371
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002372 self.rq.scenequeue_covered = self.scenequeue_covered
2373 self.rq.scenequeue_notcovered = self.scenequeue_notcovered
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002374
2375 logger.debug(1, 'We can skip tasks %s', sorted(self.rq.scenequeue_covered))
2376
2377 self.rq.state = runQueueRunInit
2378
2379 completeevent = sceneQueueComplete(self.stats, self.rq)
2380 bb.event.fire(completeevent, self.cfgData)
2381
2382 return True
2383
2384 def runqueue_process_waitpid(self, task, status):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002385 RunQueueExecute.runqueue_process_waitpid(self, task, status)
2386
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002387
2388 def build_taskdepdata(self, task):
2389 def getsetscenedeps(tid):
2390 deps = set()
2391 (mc, fn, taskname, _) = split_tid_mcfn(tid)
2392 realtid = tid + "_setscene"
2393 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
2394 for (depname, idependtask) in idepends:
2395 if depname not in self.rqdata.taskData[mc].build_targets:
2396 continue
2397
2398 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2399 if depfn is None:
2400 continue
2401 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2402 deps.add(deptid)
2403 return deps
2404
2405 taskdepdata = {}
2406 next = getsetscenedeps(task)
2407 next.add(task)
2408 while next:
2409 additional = []
2410 for revdep in next:
2411 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2412 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2413 deps = getsetscenedeps(revdep)
2414 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2415 taskhash = self.rqdata.runtaskentries[revdep].hash
2416 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash]
2417 for revdep2 in deps:
2418 if revdep2 not in taskdepdata:
2419 additional.append(revdep2)
2420 next = additional
2421
2422 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2423 return taskdepdata
2424
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002425class TaskFailure(Exception):
2426 """
2427 Exception raised when a task in a runqueue fails
2428 """
2429 def __init__(self, x):
2430 self.args = x
2431
2432
2433class runQueueExitWait(bb.event.Event):
2434 """
2435 Event when waiting for task processes to exit
2436 """
2437
2438 def __init__(self, remain):
2439 self.remain = remain
2440 self.message = "Waiting for %s active tasks to finish" % remain
2441 bb.event.Event.__init__(self)
2442
2443class runQueueEvent(bb.event.Event):
2444 """
2445 Base runQueue event class
2446 """
2447 def __init__(self, task, stats, rq):
2448 self.taskid = task
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002449 self.taskstring = task
2450 self.taskname = taskname_from_tid(task)
2451 self.taskfile = fn_from_tid(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002452 self.taskhash = rq.rqdata.get_task_hash(task)
2453 self.stats = stats.copy()
2454 bb.event.Event.__init__(self)
2455
2456class sceneQueueEvent(runQueueEvent):
2457 """
2458 Base sceneQueue event class
2459 """
2460 def __init__(self, task, stats, rq, noexec=False):
2461 runQueueEvent.__init__(self, task, stats, rq)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002462 self.taskstring = task + "_setscene"
2463 self.taskname = taskname_from_tid(task) + "_setscene"
2464 self.taskfile = fn_from_tid(task)
2465 self.taskhash = rq.rqdata.get_task_hash(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002466
2467class runQueueTaskStarted(runQueueEvent):
2468 """
2469 Event notifying a task was started
2470 """
2471 def __init__(self, task, stats, rq, noexec=False):
2472 runQueueEvent.__init__(self, task, stats, rq)
2473 self.noexec = noexec
2474
2475class sceneQueueTaskStarted(sceneQueueEvent):
2476 """
2477 Event notifying a setscene task was started
2478 """
2479 def __init__(self, task, stats, rq, noexec=False):
2480 sceneQueueEvent.__init__(self, task, stats, rq)
2481 self.noexec = noexec
2482
2483class runQueueTaskFailed(runQueueEvent):
2484 """
2485 Event notifying a task failed
2486 """
2487 def __init__(self, task, stats, exitcode, rq):
2488 runQueueEvent.__init__(self, task, stats, rq)
2489 self.exitcode = exitcode
2490
2491class sceneQueueTaskFailed(sceneQueueEvent):
2492 """
2493 Event notifying a setscene task failed
2494 """
2495 def __init__(self, task, stats, exitcode, rq):
2496 sceneQueueEvent.__init__(self, task, stats, rq)
2497 self.exitcode = exitcode
2498
2499class sceneQueueComplete(sceneQueueEvent):
2500 """
2501 Event when all the sceneQueue tasks are complete
2502 """
2503 def __init__(self, stats, rq):
2504 self.stats = stats.copy()
2505 bb.event.Event.__init__(self)
2506
2507class runQueueTaskCompleted(runQueueEvent):
2508 """
2509 Event notifying a task completed
2510 """
2511
2512class sceneQueueTaskCompleted(sceneQueueEvent):
2513 """
2514 Event notifying a setscene task completed
2515 """
2516
2517class runQueueTaskSkipped(runQueueEvent):
2518 """
2519 Event notifying a task was skipped
2520 """
2521 def __init__(self, task, stats, rq, reason):
2522 runQueueEvent.__init__(self, task, stats, rq)
2523 self.reason = reason
2524
2525class runQueuePipe():
2526 """
2527 Abstraction for a pipe between a worker thread and the server
2528 """
2529 def __init__(self, pipein, pipeout, d, rq, rqexec):
2530 self.input = pipein
2531 if pipeout:
2532 pipeout.close()
2533 bb.utils.nonblockingfd(self.input)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002534 self.queue = b""
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002535 self.d = d
2536 self.rq = rq
2537 self.rqexec = rqexec
2538
2539 def setrunqueueexec(self, rqexec):
2540 self.rqexec = rqexec
2541
2542 def read(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002543 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
2544 for worker in workers.values():
2545 worker.process.poll()
2546 if worker.process.returncode is not None and not self.rq.teardown:
2547 bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
2548 self.rq.finish_runqueue(True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002549
2550 start = len(self.queue)
2551 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002552 self.queue = self.queue + (self.input.read(102400) or b"")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002553 except (OSError, IOError) as e:
2554 if e.errno != errno.EAGAIN:
2555 raise
2556 end = len(self.queue)
2557 found = True
2558 while found and len(self.queue):
2559 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002560 index = self.queue.find(b"</event>")
2561 while index != -1 and self.queue.startswith(b"<event>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002562 try:
2563 event = pickle.loads(self.queue[7:index])
2564 except ValueError as e:
2565 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
2566 bb.event.fire_from_worker(event, self.d)
2567 found = True
2568 self.queue = self.queue[index+8:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002569 index = self.queue.find(b"</event>")
2570 index = self.queue.find(b"</exitcode>")
2571 while index != -1 and self.queue.startswith(b"<exitcode>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002572 try:
2573 task, status = pickle.loads(self.queue[10:index])
2574 except ValueError as e:
2575 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
2576 self.rqexec.runqueue_process_waitpid(task, status)
2577 found = True
2578 self.queue = self.queue[index+11:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002579 index = self.queue.find(b"</exitcode>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002580 return (end > start)
2581
2582 def close(self):
2583 while self.read():
2584 continue
2585 if len(self.queue) > 0:
2586 print("Warning, worker left partial message: %s" % self.queue)
2587 self.input.close()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002588
2589def get_setscene_enforce_whitelist(d):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002590 if d.getVar('BB_SETSCENE_ENFORCE') != '1':
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002591 return None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002592 whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002593 outlist = []
2594 for item in whitelist[:]:
2595 if item.startswith('%:'):
2596 for target in sys.argv[1:]:
2597 if not target.startswith('-'):
2598 outlist.append(target.split(':')[0] + ':' + item.split(':')[1])
2599 else:
2600 outlist.append(item)
2601 return outlist
2602
2603def check_setscene_enforce_whitelist(pn, taskname, whitelist):
2604 import fnmatch
2605 if whitelist:
2606 item = '%s:%s' % (pn, taskname)
2607 for whitelist_item in whitelist:
2608 if fnmatch.fnmatch(item, whitelist_item):
2609 return True
2610 return False
2611 return True