blob: 383c18323506badb6912fd4620b3d1a10b17b035 [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001#!/usr/bin/env python
2# ex:ts=4:sw=4:sts=4:et
3# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
4"""
5BitBake 'RunQueue' implementation
6
7Handles preparation and execution of a queue of tasks
8"""
9
10# Copyright (C) 2006-2007 Richard Purdie
11#
12# This program is free software; you can redistribute it and/or modify
13# it under the terms of the GNU General Public License version 2 as
14# published by the Free Software Foundation.
15#
16# This program is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19# GNU General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License along
22# with this program; if not, write to the Free Software Foundation, Inc.,
23# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24
25import copy
26import os
27import sys
28import signal
29import stat
30import fcntl
31import errno
32import logging
33import re
34import bb
35from bb import msg, data, event
36from bb import monitordisk
37import subprocess
Patrick Williamsc0f7c042017-02-23 20:41:17 -060038import pickle
Brad Bishop6e60e8b2018-02-01 10:27:11 -050039from multiprocessing import Process
Patrick Williamsc124f4f2015-09-15 14:41:29 -050040
41bblogger = logging.getLogger("BitBake")
42logger = logging.getLogger("BitBake.RunQueue")
43
44__find_md5__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{32}(?![a-z0-9])' )
45
Patrick Williamsc0f7c042017-02-23 20:41:17 -060046def fn_from_tid(tid):
47 return tid.rsplit(":", 1)[0]
48
49def taskname_from_tid(tid):
50 return tid.rsplit(":", 1)[1]
51
Andrew Geissler99467da2019-02-25 18:54:23 -060052def mc_from_tid(tid):
53 if tid.startswith('multiconfig:'):
54 return tid.split(':')[1]
55 return ""
56
Patrick Williamsc0f7c042017-02-23 20:41:17 -060057def split_tid(tid):
58 (mc, fn, taskname, _) = split_tid_mcfn(tid)
59 return (mc, fn, taskname)
60
61def split_tid_mcfn(tid):
62 if tid.startswith('multiconfig:'):
63 elems = tid.split(':')
64 mc = elems[1]
65 fn = ":".join(elems[2:-1])
66 taskname = elems[-1]
67 mcfn = "multiconfig:" + mc + ":" + fn
68 else:
69 tid = tid.rsplit(":", 1)
70 mc = ""
71 fn = tid[0]
72 taskname = tid[1]
73 mcfn = fn
74
75 return (mc, fn, taskname, mcfn)
76
77def build_tid(mc, fn, taskname):
78 if mc:
79 return "multiconfig:" + mc + ":" + fn + ":" + taskname
80 return fn + ":" + taskname
81
Patrick Williamsc124f4f2015-09-15 14:41:29 -050082class RunQueueStats:
83 """
84 Holds statistics on the tasks handled by the associated runQueue
85 """
86 def __init__(self, total):
87 self.completed = 0
88 self.skipped = 0
89 self.failed = 0
90 self.active = 0
91 self.total = total
92
93 def copy(self):
94 obj = self.__class__(self.total)
95 obj.__dict__.update(self.__dict__)
96 return obj
97
98 def taskFailed(self):
99 self.active = self.active - 1
100 self.failed = self.failed + 1
101
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800102 def taskCompleted(self):
103 self.active = self.active - 1
104 self.completed = self.completed + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500105
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800106 def taskSkipped(self):
107 self.active = self.active + 1
108 self.skipped = self.skipped + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500109
110 def taskActive(self):
111 self.active = self.active + 1
112
113# These values indicate the next step due to be run in the
114# runQueue state machine
115runQueuePrepare = 2
116runQueueSceneInit = 3
117runQueueSceneRun = 4
118runQueueRunInit = 5
119runQueueRunning = 6
120runQueueFailed = 7
121runQueueCleanUp = 8
122runQueueComplete = 9
123
124class RunQueueScheduler(object):
125 """
126 Control the order tasks are scheduled in.
127 """
128 name = "basic"
129
130 def __init__(self, runqueue, rqdata):
131 """
132 The default scheduler just returns the first buildable task (the
133 priority map is sorted by task number)
134 """
135 self.rq = runqueue
136 self.rqdata = rqdata
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600137 self.numTasks = len(self.rqdata.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500138
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600139 self.prio_map = [self.rqdata.runtaskentries.keys()]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500140
141 self.buildable = []
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800142 self.skip_maxthread = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500143 self.stamps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600144 for tid in self.rqdata.runtaskentries:
145 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
146 self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
147 if tid in self.rq.runq_buildable:
148 self.buildable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500149
150 self.rev_prio_map = None
151
152 def next_buildable_task(self):
153 """
154 Return the id of the first task we find that is buildable
155 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600156 self.buildable = [x for x in self.buildable if x not in self.rq.runq_running]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500157 if not self.buildable:
158 return None
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800159
160 # Filter out tasks that have a max number of threads that have been exceeded
161 skip_buildable = {}
162 for running in self.rq.runq_running.difference(self.rq.runq_complete):
163 rtaskname = taskname_from_tid(running)
164 if rtaskname not in self.skip_maxthread:
165 self.skip_maxthread[rtaskname] = self.rq.cfgData.getVarFlag(rtaskname, "number_threads")
166 if not self.skip_maxthread[rtaskname]:
167 continue
168 if rtaskname in skip_buildable:
169 skip_buildable[rtaskname] += 1
170 else:
171 skip_buildable[rtaskname] = 1
172
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500173 if len(self.buildable) == 1:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600174 tid = self.buildable[0]
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800175 taskname = taskname_from_tid(tid)
176 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
177 return None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600178 stamp = self.stamps[tid]
179 if stamp not in self.rq.build_stamps.values():
180 return tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500181
182 if not self.rev_prio_map:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600183 self.rev_prio_map = {}
184 for tid in self.rqdata.runtaskentries:
185 self.rev_prio_map[tid] = self.prio_map.index(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500186
187 best = None
188 bestprio = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600189 for tid in self.buildable:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800190 taskname = taskname_from_tid(tid)
191 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
192 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600193 prio = self.rev_prio_map[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500194 if bestprio is None or bestprio > prio:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600195 stamp = self.stamps[tid]
196 if stamp in self.rq.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500197 continue
198 bestprio = prio
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600199 best = tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500200
201 return best
202
203 def next(self):
204 """
205 Return the id of the task we should build next
206 """
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800207 if self.rq.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500208 return self.next_buildable_task()
209
Brad Bishop316dfdd2018-06-25 12:45:53 -0400210 def newbuildable(self, task):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500211 self.buildable.append(task)
212
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500213 def describe_task(self, taskid):
214 result = 'ID %s' % taskid
215 if self.rev_prio_map:
216 result = result + (' pri %d' % self.rev_prio_map[taskid])
217 return result
218
219 def dump_prio(self, comment):
220 bb.debug(3, '%s (most important first):\n%s' %
221 (comment,
222 '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
223 index, taskid in enumerate(self.prio_map)])))
224
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500225class RunQueueSchedulerSpeed(RunQueueScheduler):
226 """
227 A scheduler optimised for speed. The priority map is sorted by task weight,
228 heavier weighted tasks (tasks needed by the most other tasks) are run first.
229 """
230 name = "speed"
231
232 def __init__(self, runqueue, rqdata):
233 """
234 The priority map is sorted by task weight.
235 """
236 RunQueueScheduler.__init__(self, runqueue, rqdata)
237
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600238 weights = {}
239 for tid in self.rqdata.runtaskentries:
240 weight = self.rqdata.runtaskentries[tid].weight
241 if not weight in weights:
242 weights[weight] = []
243 weights[weight].append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500244
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600245 self.prio_map = []
246 for weight in sorted(weights):
247 for w in weights[weight]:
248 self.prio_map.append(w)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500249
250 self.prio_map.reverse()
251
252class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
253 """
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500254 A scheduler optimised to complete .bb files as quickly as possible. The
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500255 priority map is sorted by task weight, but then reordered so once a given
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500256 .bb file starts to build, it's completed as quickly as possible by
257 running all tasks related to the same .bb file one after the after.
258 This works well where disk space is at a premium and classes like OE's
259 rm_work are in force.
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500260 """
261 name = "completion"
262
263 def __init__(self, runqueue, rqdata):
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500264 super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500265
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500266 # Extract list of tasks for each recipe, with tasks sorted
267 # ascending from "must run first" (typically do_fetch) to
268 # "runs last" (do_build). The speed scheduler prioritizes
269 # tasks that must run first before the ones that run later;
270 # this is what we depend on here.
271 task_lists = {}
272 for taskid in self.prio_map:
273 fn, taskname = taskid.rsplit(':', 1)
274 task_lists.setdefault(fn, []).append(taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500275
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500276 # Now unify the different task lists. The strategy is that
277 # common tasks get skipped and new ones get inserted after the
278 # preceeding common one(s) as they are found. Because task
279 # lists should differ only by their number of tasks, but not
280 # the ordering of the common tasks, this should result in a
281 # deterministic result that is a superset of the individual
282 # task ordering.
283 all_tasks = []
284 for recipe, new_tasks in task_lists.items():
285 index = 0
286 old_task = all_tasks[index] if index < len(all_tasks) else None
287 for new_task in new_tasks:
288 if old_task == new_task:
289 # Common task, skip it. This is the fast-path which
290 # avoids a full search.
291 index += 1
292 old_task = all_tasks[index] if index < len(all_tasks) else None
293 else:
294 try:
295 index = all_tasks.index(new_task)
296 # Already present, just not at the current
297 # place. We re-synchronized by changing the
298 # index so that it matches again. Now
299 # move on to the next existing task.
300 index += 1
301 old_task = all_tasks[index] if index < len(all_tasks) else None
302 except ValueError:
303 # Not present. Insert before old_task, which
304 # remains the same (but gets shifted back).
305 all_tasks.insert(index, new_task)
306 index += 1
307 bb.debug(3, 'merged task list: %s' % all_tasks)
308
309 # Now reverse the order so that tasks that finish the work on one
310 # recipe are considered more imporant (= come first). The ordering
311 # is now so that do_build is most important.
312 all_tasks.reverse()
313
314 # Group tasks of the same kind before tasks of less important
315 # kinds at the head of the queue (because earlier = lower
316 # priority number = runs earlier), while preserving the
317 # ordering by recipe. If recipe foo is more important than
318 # bar, then the goal is to work on foo's do_populate_sysroot
319 # before bar's do_populate_sysroot and on the more important
320 # tasks of foo before any of the less important tasks in any
321 # other recipe (if those other recipes are more important than
322 # foo).
323 #
324 # All of this only applies when tasks are runable. Explicit
325 # dependencies still override this ordering by priority.
326 #
327 # Here's an example why this priority re-ordering helps with
328 # minimizing disk usage. Consider a recipe foo with a higher
329 # priority than bar where foo DEPENDS on bar. Then the
330 # implicit rule (from base.bbclass) is that foo's do_configure
331 # depends on bar's do_populate_sysroot. This ensures that
332 # bar's do_populate_sysroot gets done first. Normally the
333 # tasks from foo would continue to run once that is done, and
334 # bar only gets completed and cleaned up later. By ordering
335 # bar's task that depend on bar's do_populate_sysroot before foo's
336 # do_configure, that problem gets avoided.
337 task_index = 0
338 self.dump_prio('original priorities')
339 for task in all_tasks:
340 for index in range(task_index, self.numTasks):
341 taskid = self.prio_map[index]
342 taskname = taskid.rsplit(':', 1)[1]
343 if taskname == task:
344 del self.prio_map[index]
345 self.prio_map.insert(task_index, taskid)
346 task_index += 1
347 self.dump_prio('completion priorities')
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500348
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600349class RunTaskEntry(object):
350 def __init__(self):
351 self.depends = set()
352 self.revdeps = set()
353 self.hash = None
354 self.task = None
355 self.weight = 1
356
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500357class RunQueueData:
358 """
359 BitBake Run Queue implementation
360 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600361 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500362 self.cooker = cooker
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600363 self.dataCaches = dataCaches
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500364 self.taskData = taskData
365 self.targets = targets
366 self.rq = rq
367 self.warn_multi_bb = False
368
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500369 self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
370 self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600371 self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
372 self.setscenewhitelist_checked = False
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500373 self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600374 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500375
376 self.reset()
377
378 def reset(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600379 self.runtaskentries = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500380
381 def runq_depends_names(self, ids):
382 import re
383 ret = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600384 for id in ids:
385 nam = os.path.basename(id)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500386 nam = re.sub("_[^,]*,", ",", nam)
387 ret.extend([nam])
388 return ret
389
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600390 def get_task_hash(self, tid):
391 return self.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500392
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600393 def get_user_idstring(self, tid, task_name_suffix = ""):
394 return tid + task_name_suffix
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500395
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500396 def get_short_user_idstring(self, task, task_name_suffix = ""):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500397 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
398 pn = self.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600399 taskname = taskname_from_tid(task) + task_name_suffix
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500400 return "%s:%s" % (pn, taskname)
401
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500402 def circular_depchains_handler(self, tasks):
403 """
404 Some tasks aren't buildable, likely due to circular dependency issues.
405 Identify the circular dependencies and print them in a user readable format.
406 """
407 from copy import deepcopy
408
409 valid_chains = []
410 explored_deps = {}
411 msgs = []
412
Andrew Geissler99467da2019-02-25 18:54:23 -0600413 class TooManyLoops(Exception):
414 pass
415
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500416 def chain_reorder(chain):
417 """
418 Reorder a dependency chain so the lowest task id is first
419 """
420 lowest = 0
421 new_chain = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600422 for entry in range(len(chain)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500423 if chain[entry] < chain[lowest]:
424 lowest = entry
425 new_chain.extend(chain[lowest:])
426 new_chain.extend(chain[:lowest])
427 return new_chain
428
429 def chain_compare_equal(chain1, chain2):
430 """
431 Compare two dependency chains and see if they're the same
432 """
433 if len(chain1) != len(chain2):
434 return False
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600435 for index in range(len(chain1)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500436 if chain1[index] != chain2[index]:
437 return False
438 return True
439
440 def chain_array_contains(chain, chain_array):
441 """
442 Return True if chain_array contains chain
443 """
444 for ch in chain_array:
445 if chain_compare_equal(ch, chain):
446 return True
447 return False
448
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600449 def find_chains(tid, prev_chain):
450 prev_chain.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500451 total_deps = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600452 total_deps.extend(self.runtaskentries[tid].revdeps)
453 for revdep in self.runtaskentries[tid].revdeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500454 if revdep in prev_chain:
455 idx = prev_chain.index(revdep)
456 # To prevent duplicates, reorder the chain to start with the lowest taskid
457 # and search through an array of those we've already printed
458 chain = prev_chain[idx:]
459 new_chain = chain_reorder(chain)
460 if not chain_array_contains(new_chain, valid_chains):
461 valid_chains.append(new_chain)
462 msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
463 for dep in new_chain:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600464 msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500465 msgs.append("\n")
466 if len(valid_chains) > 10:
467 msgs.append("Aborted dependency loops search after 10 matches.\n")
Andrew Geissler99467da2019-02-25 18:54:23 -0600468 raise TooManyLoops
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500469 continue
470 scan = False
471 if revdep not in explored_deps:
472 scan = True
473 elif revdep in explored_deps[revdep]:
474 scan = True
475 else:
476 for dep in prev_chain:
477 if dep in explored_deps[revdep]:
478 scan = True
479 if scan:
480 find_chains(revdep, copy.deepcopy(prev_chain))
481 for dep in explored_deps[revdep]:
482 if dep not in total_deps:
483 total_deps.append(dep)
484
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600485 explored_deps[tid] = total_deps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500486
Andrew Geissler99467da2019-02-25 18:54:23 -0600487 try:
488 for task in tasks:
489 find_chains(task, [])
490 except TooManyLoops:
491 pass
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500492
493 return msgs
494
495 def calculate_task_weights(self, endpoints):
496 """
497 Calculate a number representing the "weight" of each task. Heavier weighted tasks
498 have more dependencies and hence should be executed sooner for maximum speed.
499
500 This function also sanity checks the task list finding tasks that are not
501 possible to execute due to circular dependencies.
502 """
503
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600504 numTasks = len(self.runtaskentries)
505 weight = {}
506 deps_left = {}
507 task_done = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500508
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600509 for tid in self.runtaskentries:
510 task_done[tid] = False
511 weight[tid] = 1
512 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500513
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600514 for tid in endpoints:
515 weight[tid] = 10
516 task_done[tid] = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500517
518 while True:
519 next_points = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600520 for tid in endpoints:
521 for revdep in self.runtaskentries[tid].depends:
522 weight[revdep] = weight[revdep] + weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500523 deps_left[revdep] = deps_left[revdep] - 1
524 if deps_left[revdep] == 0:
525 next_points.append(revdep)
526 task_done[revdep] = True
527 endpoints = next_points
528 if len(next_points) == 0:
529 break
530
531 # Circular dependency sanity check
532 problem_tasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600533 for tid in self.runtaskentries:
534 if task_done[tid] is False or deps_left[tid] != 0:
535 problem_tasks.append(tid)
536 logger.debug(2, "Task %s is not buildable", tid)
537 logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
538 self.runtaskentries[tid].weight = weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500539
540 if problem_tasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600541 message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500542 message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
543 message = message + "Identifying dependency loops (this may take a short while)...\n"
544 logger.error(message)
545
546 msgs = self.circular_depchains_handler(problem_tasks)
547
548 message = "\n"
549 for msg in msgs:
550 message = message + msg
551 bb.msg.fatal("RunQueue", message)
552
553 return weight
554
555 def prepare(self):
556 """
557 Turn a set of taskData into a RunQueue and compute data needed
558 to optimise the execution order.
559 """
560
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600561 runq_build = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500562 recursivetasks = {}
563 recursiveitasks = {}
564 recursivetasksselfref = set()
565
566 taskData = self.taskData
567
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600568 found = False
569 for mc in self.taskData:
570 if len(taskData[mc].taskentries) > 0:
571 found = True
572 break
573 if not found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500574 # Nothing to do
575 return 0
576
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600577 self.init_progress_reporter.start()
578 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500579
580 # Step A - Work out a list of tasks to run
581 #
582 # Taskdata gives us a list of possible providers for every build and run
583 # target ordered by priority. It also gives information on each of those
584 # providers.
585 #
586 # To create the actual list of tasks to execute we fix the list of
587 # providers and then resolve the dependencies into task IDs. This
588 # process is repeated for each type of dependency (tdepends, deptask,
589 # rdeptast, recrdeptask, idepends).
590
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600591 def add_build_dependencies(depids, tasknames, depends, mc):
592 for depname in depids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500593 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600594 if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500595 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600596 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500597 if depdata is None:
598 continue
599 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600600 t = depdata + ":" + taskname
601 if t in taskData[mc].taskentries:
602 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500603
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600604 def add_runtime_dependencies(depids, tasknames, depends, mc):
605 for depname in depids:
606 if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500607 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600608 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500609 if depdata is None:
610 continue
611 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600612 t = depdata + ":" + taskname
613 if t in taskData[mc].taskentries:
614 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500615
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800616 def add_mc_dependencies(mc, tid):
617 mcdeps = taskData[mc].get_mcdepends()
618 for dep in mcdeps:
619 mcdependency = dep.split(':')
620 pn = mcdependency[3]
621 frommc = mcdependency[1]
622 mcdep = mcdependency[2]
623 deptask = mcdependency[4]
624 if mc == frommc:
625 fn = taskData[mcdep].build_targets[pn][0]
626 newdep = '%s:%s' % (fn,deptask)
627 taskData[mc].taskentries[tid].tdepends.append(newdep)
628
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600629 for mc in taskData:
630 for tid in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500631
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600632 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
633 #runtid = build_tid(mc, fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500634
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600635 #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
636
637 depends = set()
638 task_deps = self.dataCaches[mc].task_deps[taskfn]
639
640 self.runtaskentries[tid] = RunTaskEntry()
641
642 if fn in taskData[mc].failed_fns:
643 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500644
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800645 # We add multiconfig dependencies before processing internal task deps (tdepends)
646 if 'mcdepends' in task_deps and taskname in task_deps['mcdepends']:
647 add_mc_dependencies(mc, tid)
648
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500649 # Resolve task internal dependencies
650 #
651 # e.g. addtask before X after Y
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600652 for t in taskData[mc].taskentries[tid].tdepends:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800653 (depmc, depfn, deptaskname, _) = split_tid_mcfn(t)
654 depends.add(build_tid(depmc, depfn, deptaskname))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500655
656 # Resolve 'deptask' dependencies
657 #
658 # e.g. do_sometask[deptask] = "do_someothertask"
659 # (makes sure sometask runs after someothertask of all DEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600660 if 'deptask' in task_deps and taskname in task_deps['deptask']:
661 tasknames = task_deps['deptask'][taskname].split()
662 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500663
664 # Resolve 'rdeptask' dependencies
665 #
666 # e.g. do_sometask[rdeptask] = "do_someothertask"
667 # (makes sure sometask runs after someothertask of all RDEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600668 if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
669 tasknames = task_deps['rdeptask'][taskname].split()
670 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500671
672 # Resolve inter-task dependencies
673 #
674 # e.g. do_sometask[depends] = "targetname:do_someothertask"
675 # (makes sure sometask runs after targetname's someothertask)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600676 idepends = taskData[mc].taskentries[tid].idepends
677 for (depname, idependtask) in idepends:
678 if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500679 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600680 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500681 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600682 t = depdata + ":" + idependtask
683 depends.add(t)
684 if t not in taskData[mc].taskentries:
685 bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
686 irdepends = taskData[mc].taskentries[tid].irdepends
687 for (depname, idependtask) in irdepends:
688 if depname in taskData[mc].run_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500689 # Won't be in run_targets if ASSUME_PROVIDED
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500690 if not taskData[mc].run_targets[depname]:
691 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600692 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500693 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600694 t = depdata + ":" + idependtask
695 depends.add(t)
696 if t not in taskData[mc].taskentries:
697 bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500698
699 # Resolve recursive 'recrdeptask' dependencies (Part A)
700 #
701 # e.g. do_sometask[recrdeptask] = "do_someothertask"
702 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
703 # We cover the recursive part of the dependencies below
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600704 if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
705 tasknames = task_deps['recrdeptask'][taskname].split()
706 recursivetasks[tid] = tasknames
707 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
708 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
709 if taskname in tasknames:
710 recursivetasksselfref.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500711
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600712 if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
713 recursiveitasks[tid] = []
714 for t in task_deps['recideptask'][taskname].split():
715 newdep = build_tid(mc, fn, t)
716 recursiveitasks[tid].append(newdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500717
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600718 self.runtaskentries[tid].depends = depends
Brad Bishop316dfdd2018-06-25 12:45:53 -0400719 # Remove all self references
720 self.runtaskentries[tid].depends.discard(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500721
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600722 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500723
Brad Bishop316dfdd2018-06-25 12:45:53 -0400724 self.init_progress_reporter.next_stage()
725
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500726 # Resolve recursive 'recrdeptask' dependencies (Part B)
727 #
728 # e.g. do_sometask[recrdeptask] = "do_someothertask"
729 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600730 # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600731
Brad Bishop316dfdd2018-06-25 12:45:53 -0400732 # Generating/interating recursive lists of dependencies is painful and potentially slow
733 # Precompute recursive task dependencies here by:
734 # a) create a temp list of reverse dependencies (revdeps)
735 # b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0)
736 # c) combine the total list of dependencies in cumulativedeps
737 # d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500738
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500739
Brad Bishop316dfdd2018-06-25 12:45:53 -0400740 revdeps = {}
741 deps = {}
742 cumulativedeps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600743 for tid in self.runtaskentries:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400744 deps[tid] = set(self.runtaskentries[tid].depends)
745 revdeps[tid] = set()
746 cumulativedeps[tid] = set()
747 # Generate a temp list of reverse dependencies
748 for tid in self.runtaskentries:
749 for dep in self.runtaskentries[tid].depends:
750 revdeps[dep].add(tid)
751 # Find the dependency chain endpoints
752 endpoints = set()
753 for tid in self.runtaskentries:
754 if len(deps[tid]) == 0:
755 endpoints.add(tid)
756 # Iterate the chains collating dependencies
757 while endpoints:
758 next = set()
759 for tid in endpoints:
760 for dep in revdeps[tid]:
761 cumulativedeps[dep].add(fn_from_tid(tid))
762 cumulativedeps[dep].update(cumulativedeps[tid])
763 if tid in deps[dep]:
764 deps[dep].remove(tid)
765 if len(deps[dep]) == 0:
766 next.add(dep)
767 endpoints = next
768 #for tid in deps:
769 # if len(deps[tid]) != 0:
770 # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
771
772 # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
773 # resolve these recursively until we aren't adding any further extra dependencies
774 extradeps = True
775 while extradeps:
776 extradeps = 0
777 for tid in recursivetasks:
778 tasknames = recursivetasks[tid]
779
780 totaldeps = set(self.runtaskentries[tid].depends)
781 if tid in recursiveitasks:
782 totaldeps.update(recursiveitasks[tid])
783 for dep in recursiveitasks[tid]:
784 if dep not in self.runtaskentries:
785 continue
786 totaldeps.update(self.runtaskentries[dep].depends)
787
788 deps = set()
789 for dep in totaldeps:
790 if dep in cumulativedeps:
791 deps.update(cumulativedeps[dep])
792
793 for t in deps:
794 for taskname in tasknames:
795 newtid = t + ":" + taskname
796 if newtid == tid:
797 continue
798 if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
799 extradeps += 1
800 self.runtaskentries[tid].depends.add(newtid)
801
802 # Handle recursive tasks which depend upon other recursive tasks
803 deps = set()
804 for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
805 deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
806 for newtid in deps:
807 for taskname in tasknames:
808 if not newtid.endswith(":" + taskname):
809 continue
810 if newtid in self.runtaskentries:
811 extradeps += 1
812 self.runtaskentries[tid].depends.add(newtid)
813
814 bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps)
815
816 # Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work
817 for tid in recursivetasksselfref:
818 self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600819
820 self.init_progress_reporter.next_stage()
821
822 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500823
824 # Step B - Mark all active tasks
825 #
826 # Start with the tasks we were asked to run and mark all dependencies
827 # as active too. If the task is to be 'forced', clear its stamp. Once
828 # all active tasks are marked, prune the ones we don't need.
829
830 logger.verbose("Marking Active Tasks")
831
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600832 def mark_active(tid, depth):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500833 """
834 Mark an item as active along with its depends
835 (calls itself recursively)
836 """
837
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600838 if tid in runq_build:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500839 return
840
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600841 runq_build[tid] = 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500842
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600843 depends = self.runtaskentries[tid].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500844 for depend in depends:
845 mark_active(depend, depth+1)
846
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600847 self.target_tids = []
848 for (mc, target, task, fn) in self.targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500849
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600850 if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500851 continue
852
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600853 if target in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500854 continue
855
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500856 parents = False
857 if task.endswith('-'):
858 parents = True
859 task = task[:-1]
860
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600861 if fn in taskData[mc].failed_fns:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500862 continue
863
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600864 # fn already has mc prefix
865 tid = fn + ":" + task
866 self.target_tids.append(tid)
867 if tid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500868 import difflib
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600869 tasks = []
870 for x in taskData[mc].taskentries:
871 if x.startswith(fn + ":"):
872 tasks.append(taskname_from_tid(x))
873 close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500874 if close_matches:
875 extra = ". Close matches:\n %s" % "\n ".join(close_matches)
876 else:
877 extra = ""
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600878 bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
879
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500880 # For tasks called "XXXX-", ony run their dependencies
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500881 if parents:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600882 for i in self.runtaskentries[tid].depends:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500883 mark_active(i, 1)
884 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600885 mark_active(tid, 1)
886
887 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500888
889 # Step C - Prune all inactive tasks
890 #
891 # Once all active tasks are marked, prune the ones we don't need.
892
Brad Bishop316dfdd2018-06-25 12:45:53 -0400893 delcount = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600894 for tid in list(self.runtaskentries.keys()):
895 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400896 delcount[tid] = self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600897 del self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600898
Brad Bishop316dfdd2018-06-25 12:45:53 -0400899 # Handle --runall
900 if self.cooker.configuration.runall:
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500901 # re-run the mark_active and then drop unused tasks from new list
902 runq_build = {}
Brad Bishop316dfdd2018-06-25 12:45:53 -0400903
904 for task in self.cooker.configuration.runall:
905 runall_tids = set()
906 for tid in list(self.runtaskentries):
907 wanttid = fn_from_tid(tid) + ":do_%s" % task
908 if wanttid in delcount:
909 self.runtaskentries[wanttid] = delcount[wanttid]
910 if wanttid in self.runtaskentries:
911 runall_tids.add(wanttid)
912
913 for tid in list(runall_tids):
914 mark_active(tid,1)
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500915
916 for tid in list(self.runtaskentries.keys()):
917 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400918 delcount[tid] = self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500919 del self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500920
921 if len(self.runtaskentries) == 0:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400922 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
923
924 self.init_progress_reporter.next_stage()
925
926 # Handle runonly
927 if self.cooker.configuration.runonly:
928 # re-run the mark_active and then drop unused tasks from new list
929 runq_build = {}
930
931 for task in self.cooker.configuration.runonly:
932 runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == "do_%s" % task }
933
934 for tid in list(runonly_tids):
935 mark_active(tid,1)
936
937 for tid in list(self.runtaskentries.keys()):
938 if tid not in runq_build:
939 delcount[tid] = self.runtaskentries[tid]
940 del self.runtaskentries[tid]
941
942 if len(self.runtaskentries) == 0:
943 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500944
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500945 #
946 # Step D - Sanity checks and computation
947 #
948
949 # Check to make sure we still have tasks to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600950 if len(self.runtaskentries) == 0:
951 if not taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500952 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
953 else:
954 bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
955
Brad Bishop316dfdd2018-06-25 12:45:53 -0400956 logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500957
958 logger.verbose("Assign Weightings")
959
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600960 self.init_progress_reporter.next_stage()
961
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500962 # Generate a list of reverse dependencies to ease future calculations
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600963 for tid in self.runtaskentries:
964 for dep in self.runtaskentries[tid].depends:
965 self.runtaskentries[dep].revdeps.add(tid)
966
967 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500968
969 # Identify tasks at the end of dependency chains
970 # Error on circular dependency loops (length two)
971 endpoints = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600972 for tid in self.runtaskentries:
973 revdeps = self.runtaskentries[tid].revdeps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500974 if len(revdeps) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600975 endpoints.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500976 for dep in revdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600977 if dep in self.runtaskentries[tid].depends:
978 bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
979
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500980
981 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
982
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600983 self.init_progress_reporter.next_stage()
984
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500985 # Calculate task weights
986 # Check of higher length circular dependencies
987 self.runq_weight = self.calculate_task_weights(endpoints)
988
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600989 self.init_progress_reporter.next_stage()
990
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500991 # Sanity Check - Check for multiple tasks building the same provider
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600992 for mc in self.dataCaches:
993 prov_list = {}
994 seen_fn = []
995 for tid in self.runtaskentries:
996 (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
997 if taskfn in seen_fn:
998 continue
999 if mc != tidmc:
1000 continue
1001 seen_fn.append(taskfn)
1002 for prov in self.dataCaches[mc].fn_provides[taskfn]:
1003 if prov not in prov_list:
1004 prov_list[prov] = [taskfn]
1005 elif taskfn not in prov_list[prov]:
1006 prov_list[prov].append(taskfn)
1007 for prov in prov_list:
1008 if len(prov_list[prov]) < 2:
1009 continue
1010 if prov in self.multi_provider_whitelist:
1011 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001012 seen_pn = []
1013 # If two versions of the same PN are being built its fatal, we don't support it.
1014 for fn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001015 pn = self.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001016 if pn not in seen_pn:
1017 seen_pn.append(pn)
1018 else:
1019 bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001020 msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
1021 #
1022 # Construct a list of things which uniquely depend on each provider
1023 # since this may help the user figure out which dependency is triggering this warning
1024 #
1025 msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
1026 deplist = {}
1027 commondeps = None
1028 for provfn in prov_list[prov]:
1029 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001030 for tid in self.runtaskentries:
1031 fn = fn_from_tid(tid)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001032 if fn != provfn:
1033 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001034 for dep in self.runtaskentries[tid].revdeps:
1035 fn = fn_from_tid(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001036 if fn == provfn:
1037 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001038 deps.add(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001039 if not commondeps:
1040 commondeps = set(deps)
1041 else:
1042 commondeps &= deps
1043 deplist[provfn] = deps
1044 for provfn in deplist:
1045 msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
1046 #
1047 # Construct a list of provides and runtime providers for each recipe
1048 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
1049 #
1050 msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
1051 provide_results = {}
1052 rprovide_results = {}
1053 commonprovs = None
1054 commonrprovs = None
1055 for provfn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001056 provides = set(self.dataCaches[mc].fn_provides[provfn])
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001057 rprovides = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001058 for rprovide in self.dataCaches[mc].rproviders:
1059 if provfn in self.dataCaches[mc].rproviders[rprovide]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001060 rprovides.add(rprovide)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001061 for package in self.dataCaches[mc].packages:
1062 if provfn in self.dataCaches[mc].packages[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001063 rprovides.add(package)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001064 for package in self.dataCaches[mc].packages_dynamic:
1065 if provfn in self.dataCaches[mc].packages_dynamic[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001066 rprovides.add(package)
1067 if not commonprovs:
1068 commonprovs = set(provides)
1069 else:
1070 commonprovs &= provides
1071 provide_results[provfn] = provides
1072 if not commonrprovs:
1073 commonrprovs = set(rprovides)
1074 else:
1075 commonrprovs &= rprovides
1076 rprovide_results[provfn] = rprovides
1077 #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
1078 #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
1079 for provfn in prov_list[prov]:
1080 msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
1081 msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
1082
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001083 if self.warn_multi_bb:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001084 logger.verbnote(msg)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001085 else:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001086 logger.error(msg)
1087
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001088 self.init_progress_reporter.next_stage()
1089
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001090 # Create a whitelist usable by the stamp checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001091 self.stampfnwhitelist = {}
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001092 for mc in self.taskData:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001093 self.stampfnwhitelist[mc] = []
1094 for entry in self.stampwhitelist.split():
1095 if entry not in self.taskData[mc].build_targets:
1096 continue
1097 fn = self.taskData.build_targets[entry][0]
1098 self.stampfnwhitelist[mc].append(fn)
1099
1100 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001101
1102 # Iterate over the task list looking for tasks with a 'setscene' function
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001103 self.runq_setscene_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001104 if not self.cooker.configuration.nosetscene:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001105 for tid in self.runtaskentries:
1106 (mc, fn, taskname, _) = split_tid_mcfn(tid)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001107 setscenetid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001108 if setscenetid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001109 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001110 self.runq_setscene_tids.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001111
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001112 def invalidate_task(tid, error_nostamp):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001113 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1114 taskdep = self.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001115 if fn + ":" + taskname not in taskData[mc].taskentries:
1116 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001117 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1118 if error_nostamp:
1119 bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
1120 else:
1121 bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
1122 else:
1123 logger.verbose("Invalidate task %s, %s", taskname, fn)
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001124 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001125
1126 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001127
1128 # Invalidate task if force mode active
1129 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001130 for tid in self.target_tids:
1131 invalidate_task(tid, False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001132
1133 # Invalidate task if invalidate mode active
1134 if self.cooker.configuration.invalidate_stamp:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001135 for tid in self.target_tids:
1136 fn = fn_from_tid(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001137 for st in self.cooker.configuration.invalidate_stamp.split(','):
1138 if not st.startswith("do_"):
1139 st = "do_%s" % st
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001140 invalidate_task(fn + ":" + st, True)
1141
1142 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001143
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001144 # Create and print to the logs a virtual/xxxx -> PN (fn) table
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001145 for mc in taskData:
1146 virtmap = taskData[mc].get_providermap(prefix="virtual/")
1147 virtpnmap = {}
1148 for v in virtmap:
1149 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
1150 bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
1151 if hasattr(bb.parse.siggen, "tasks_resolved"):
1152 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1153
1154 self.init_progress_reporter.next_stage()
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001155
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001156 # Iterate over the task list and call into the siggen code
1157 dealtwith = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001158 todeal = set(self.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001159 while len(todeal) > 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001160 for tid in todeal.copy():
1161 if len(self.runtaskentries[tid].depends - dealtwith) == 0:
1162 dealtwith.add(tid)
1163 todeal.remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001164 procdep = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001165 for dep in self.runtaskentries[tid].depends:
1166 procdep.append(fn_from_tid(dep) + "." + taskname_from_tid(dep))
1167 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1168 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(taskfn, taskname, procdep, self.dataCaches[mc])
1169 task = self.runtaskentries[tid].task
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001170
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001171 bb.parse.siggen.writeout_file_checksum_cache()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001172
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001173 #self.dump_data()
1174 return len(self.runtaskentries)
1175
1176 def dump_data(self):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001177 """
1178 Dump some debug information on the internal data structures
1179 """
1180 logger.debug(3, "run_tasks:")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001181 for tid in self.runtaskentries:
1182 logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
1183 self.runtaskentries[tid].weight,
1184 self.runtaskentries[tid].depends,
1185 self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001186
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001187class RunQueueWorker():
1188 def __init__(self, process, pipe):
1189 self.process = process
1190 self.pipe = pipe
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001191
1192class RunQueue:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001193 def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001194
1195 self.cooker = cooker
1196 self.cfgData = cfgData
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001197 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001198
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001199 self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
1200 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
1201 self.setsceneverify = cfgData.getVar("BB_SETSCENE_VERIFY_FUNCTION2") or None
1202 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001203
1204 self.state = runQueuePrepare
1205
1206 # For disk space monitor
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001207 # Invoked at regular time intervals via the bitbake heartbeat event
1208 # while the build is running. We generate a unique name for the handler
1209 # here, just in case that there ever is more than one RunQueue instance,
1210 # start the handler when reaching runQueueSceneRun, and stop it when
1211 # done with the build.
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001212 self.dm = monitordisk.diskMonitor(cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001213 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
1214 self.dm_event_handler_registered = False
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001215 self.rqexe = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001216 self.worker = {}
1217 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001218
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001219 def _start_worker(self, mc, fakeroot = False, rqexec = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001220 logger.debug(1, "Starting bitbake-worker")
1221 magic = "decafbad"
1222 if self.cooker.configuration.profile:
1223 magic = "decafbadbad"
1224 if fakeroot:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001225 magic = magic + "beef"
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001226 mcdata = self.cooker.databuilder.mcdata[mc]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001227 fakerootcmd = mcdata.getVar("FAKEROOTCMD")
1228 fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001229 env = os.environ.copy()
1230 for key, value in (var.split('=') for var in fakerootenv):
1231 env[key] = value
1232 worker = subprocess.Popen([fakerootcmd, "bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
1233 else:
1234 worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
1235 bb.utils.nonblockingfd(worker.stdout)
1236 workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
1237
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001238 runqhash = {}
1239 for tid in self.rqdata.runtaskentries:
1240 runqhash[tid] = self.rqdata.runtaskentries[tid].hash
1241
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001242 workerdata = {
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001243 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1244 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1245 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1246 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001247 "sigdata" : bb.parse.siggen.get_taskdata(),
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001248 "runq_hash" : runqhash,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001249 "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
1250 "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
1251 "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
1252 "logdefaultdomain" : bb.msg.loggerDefaultDomains,
1253 "prhost" : self.cooker.prhost,
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001254 "buildname" : self.cfgData.getVar("BUILDNAME"),
1255 "date" : self.cfgData.getVar("DATE"),
1256 "time" : self.cfgData.getVar("TIME"),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001257 }
1258
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001259 worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001260 worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001261 worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001262 worker.stdin.flush()
1263
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001264 return RunQueueWorker(worker, workerpipe)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001265
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001266 def _teardown_worker(self, worker):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001267 if not worker:
1268 return
1269 logger.debug(1, "Teardown for bitbake-worker")
1270 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001271 worker.process.stdin.write(b"<quit></quit>")
1272 worker.process.stdin.flush()
1273 worker.process.stdin.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001274 except IOError:
1275 pass
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001276 while worker.process.returncode is None:
1277 worker.pipe.read()
1278 worker.process.poll()
1279 while worker.pipe.read():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001280 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001281 worker.pipe.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001282
1283 def start_worker(self):
1284 if self.worker:
1285 self.teardown_workers()
1286 self.teardown = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001287 for mc in self.rqdata.dataCaches:
1288 self.worker[mc] = self._start_worker(mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001289
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001290 def start_fakeworker(self, rqexec, mc):
1291 if not mc in self.fakeworker:
1292 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001293
1294 def teardown_workers(self):
1295 self.teardown = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001296 for mc in self.worker:
1297 self._teardown_worker(self.worker[mc])
1298 self.worker = {}
1299 for mc in self.fakeworker:
1300 self._teardown_worker(self.fakeworker[mc])
1301 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001302
1303 def read_workers(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001304 for mc in self.worker:
1305 self.worker[mc].pipe.read()
1306 for mc in self.fakeworker:
1307 self.fakeworker[mc].pipe.read()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001308
1309 def active_fds(self):
1310 fds = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001311 for mc in self.worker:
1312 fds.append(self.worker[mc].pipe.input)
1313 for mc in self.fakeworker:
1314 fds.append(self.fakeworker[mc].pipe.input)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001315 return fds
1316
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001317 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001318 def get_timestamp(f):
1319 try:
1320 if not os.access(f, os.F_OK):
1321 return None
1322 return os.stat(f)[stat.ST_MTIME]
1323 except:
1324 return None
1325
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001326 (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
1327 if taskname is None:
1328 taskname = tn
1329
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001330 if self.stamppolicy == "perfile":
1331 fulldeptree = False
1332 else:
1333 fulldeptree = True
1334 stampwhitelist = []
1335 if self.stamppolicy == "whitelist":
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001336 stampwhitelist = self.rqdata.stampfnwhitelist[mc]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001337
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001338 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001339
1340 # If the stamp is missing, it's not current
1341 if not os.access(stampfile, os.F_OK):
1342 logger.debug(2, "Stampfile %s not available", stampfile)
1343 return False
1344 # If it's a 'nostamp' task, it's not current
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001345 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001346 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1347 logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
1348 return False
1349
1350 if taskname != "do_setscene" and taskname.endswith("_setscene"):
1351 return True
1352
1353 if cache is None:
1354 cache = {}
1355
1356 iscurrent = True
1357 t1 = get_timestamp(stampfile)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001358 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001359 if iscurrent:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001360 (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
1361 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1362 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001363 t2 = get_timestamp(stampfile2)
1364 t3 = get_timestamp(stampfile3)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001365 if t3 and not t2:
1366 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001367 if t3 and t3 > t2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001368 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001369 if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
1370 if not t2:
1371 logger.debug(2, 'Stampfile %s does not exist', stampfile2)
1372 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001373 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001374 if t1 < t2:
1375 logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
1376 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001377 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001378 if recurse and iscurrent:
1379 if dep in cache:
1380 iscurrent = cache[dep]
1381 if not iscurrent:
1382 logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
1383 else:
1384 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1385 cache[dep] = iscurrent
1386 if recurse:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001387 cache[tid] = iscurrent
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001388 return iscurrent
1389
1390 def _execute_runqueue(self):
1391 """
1392 Run the tasks in a queue prepared by rqdata.prepare()
1393 Upon failure, optionally try to recover the build using any alternate providers
1394 (if the abort on failure configuration option isn't set)
1395 """
1396
1397 retval = True
1398
1399 if self.state is runQueuePrepare:
1400 self.rqexe = RunQueueExecuteDummy(self)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001401 # NOTE: if you add, remove or significantly refactor the stages of this
1402 # process then you should recalculate the weightings here. This is quite
1403 # easy to do - just change the next line temporarily to pass debug=True as
1404 # the last parameter and you'll get a printout of the weightings as well
1405 # as a map to the lines where next_stage() was called. Of course this isn't
1406 # critical, but it helps to keep the progress reporting accurate.
1407 self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
1408 "Initialising tasks",
1409 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001410 if self.rqdata.prepare() == 0:
1411 self.state = runQueueComplete
1412 else:
1413 self.state = runQueueSceneInit
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001414 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001415
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001416 # we are ready to run, emit dependency info to any UI or class which
1417 # needs it
1418 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1419 self.rqdata.init_progress_reporter.next_stage()
1420 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001421
1422 if self.state is runQueueSceneInit:
Brad Bishope2d5b612018-11-23 10:55:50 +13001423 if not self.dm_event_handler_registered:
1424 res = bb.event.register(self.dm_event_handler_name,
1425 lambda x: self.dm.check(self) if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp] else False,
1426 ('bb.event.HeartbeatEvent',))
1427 self.dm_event_handler_registered = True
1428
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001429 dump = self.cooker.configuration.dump_signatures
1430 if dump:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001431 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001432 if 'printdiff' in dump:
1433 invalidtasks = self.print_diffscenetasks()
1434 self.dump_signatures(dump)
1435 if 'printdiff' in dump:
1436 self.write_diffscenetasks(invalidtasks)
1437 self.state = runQueueComplete
1438 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001439 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001440 self.start_worker()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001441 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001442 self.rqexe = RunQueueExecuteScenequeue(self)
1443
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001444 if self.state is runQueueSceneRun:
1445 retval = self.rqexe.execute()
1446
1447 if self.state is runQueueRunInit:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001448 if self.cooker.configuration.setsceneonly:
1449 self.state = runQueueComplete
1450 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001451 # Just in case we didn't setscene
1452 self.rqdata.init_progress_reporter.finish()
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001453 logger.info("Executing RunQueue Tasks")
1454 self.rqexe = RunQueueExecuteTasks(self)
1455 self.state = runQueueRunning
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001456
1457 if self.state is runQueueRunning:
1458 retval = self.rqexe.execute()
1459
1460 if self.state is runQueueCleanUp:
1461 retval = self.rqexe.finish()
1462
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001463 build_done = self.state is runQueueComplete or self.state is runQueueFailed
1464
1465 if build_done and self.dm_event_handler_registered:
1466 bb.event.remove(self.dm_event_handler_name, None)
1467 self.dm_event_handler_registered = False
1468
1469 if build_done and self.rqexe:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001470 self.teardown_workers()
1471 if self.rqexe.stats.failed:
1472 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
1473 else:
1474 # Let's avoid the word "failed" if nothing actually did
1475 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
1476
1477 if self.state is runQueueFailed:
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001478 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001479
1480 if self.state is runQueueComplete:
1481 # All done
1482 return False
1483
1484 # Loop
1485 return retval
1486
1487 def execute_runqueue(self):
1488 # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
1489 try:
1490 return self._execute_runqueue()
1491 except bb.runqueue.TaskFailure:
1492 raise
1493 except SystemExit:
1494 raise
1495 except bb.BBHandledException:
1496 try:
1497 self.teardown_workers()
1498 except:
1499 pass
1500 self.state = runQueueComplete
1501 raise
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001502 except Exception as err:
1503 logger.exception("An uncaught exception occurred in runqueue")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001504 try:
1505 self.teardown_workers()
1506 except:
1507 pass
1508 self.state = runQueueComplete
1509 raise
1510
1511 def finish_runqueue(self, now = False):
1512 if not self.rqexe:
1513 self.state = runQueueComplete
1514 return
1515
1516 if now:
1517 self.rqexe.finish_now()
1518 else:
1519 self.rqexe.finish()
1520
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001521 def rq_dump_sigfn(self, fn, options):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001522 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001523 the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
1524 siggen = bb.parse.siggen
1525 dataCaches = self.rqdata.dataCaches
1526 siggen.dump_sigfn(fn, dataCaches, options)
1527
1528 def dump_signatures(self, options):
1529 fns = set()
1530 bb.note("Reparsing files to collect dependency data")
1531
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001532 for tid in self.rqdata.runtaskentries:
1533 fn = fn_from_tid(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001534 fns.add(fn)
1535
1536 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1537 # We cannot use the real multiprocessing.Pool easily due to some local data
1538 # that can't be pickled. This is a cheap multi-process solution.
1539 launched = []
1540 while fns:
1541 if len(launched) < max_process:
1542 p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
1543 p.start()
1544 launched.append(p)
1545 for q in launched:
1546 # The finished processes are joined when calling is_alive()
1547 if not q.is_alive():
1548 launched.remove(q)
1549 for p in launched:
1550 p.join()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001551
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001552 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001553
1554 return
1555
1556 def print_diffscenetasks(self):
1557
1558 valid = []
1559 sq_hash = []
1560 sq_hashfn = []
1561 sq_fn = []
1562 sq_taskname = []
1563 sq_task = []
1564 noexec = []
1565 stamppresent = []
1566 valid_new = set()
1567
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001568 for tid in self.rqdata.runtaskentries:
1569 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1570 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001571
1572 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001573 noexec.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001574 continue
1575
1576 sq_fn.append(fn)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001577 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001578 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001579 sq_taskname.append(taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001580 sq_task.append(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001581 locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001582 try:
1583 call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=True)"
1584 valid = bb.utils.better_eval(call, locs)
1585 # Handle version with no siginfo parameter
1586 except TypeError:
1587 call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
1588 valid = bb.utils.better_eval(call, locs)
1589 for v in valid:
1590 valid_new.add(sq_task[v])
1591
1592 # Tasks which are both setscene and noexec never care about dependencies
1593 # We therefore find tasks which are setscene and noexec and mark their
1594 # unique dependencies as valid.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001595 for tid in noexec:
1596 if tid not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001597 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001598 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001599 hasnoexecparents = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001600 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1601 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001602 continue
1603 hasnoexecparents = False
1604 break
1605 if hasnoexecparents:
1606 valid_new.add(dep)
1607
1608 invalidtasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001609 for tid in self.rqdata.runtaskentries:
1610 if tid not in valid_new and tid not in noexec:
1611 invalidtasks.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001612
1613 found = set()
1614 processed = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001615 for tid in invalidtasks:
1616 toprocess = set([tid])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001617 while toprocess:
1618 next = set()
1619 for t in toprocess:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001620 for dep in self.rqdata.runtaskentries[t].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001621 if dep in invalidtasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001622 found.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001623 if dep not in processed:
1624 processed.add(dep)
1625 next.add(dep)
1626 toprocess = next
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001627 if tid in found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001628 toprocess = set()
1629
1630 tasklist = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001631 for tid in invalidtasks.difference(found):
1632 tasklist.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001633
1634 if tasklist:
1635 bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
1636
1637 return invalidtasks.difference(found)
1638
1639 def write_diffscenetasks(self, invalidtasks):
1640
1641 # Define recursion callback
1642 def recursecb(key, hash1, hash2):
1643 hashes = [hash1, hash2]
1644 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1645
1646 recout = []
1647 if len(hashfiles) == 2:
1648 out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
1649 recout.extend(list(' ' + l for l in out2))
1650 else:
1651 recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
1652
1653 return recout
1654
1655
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001656 for tid in invalidtasks:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001657 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1658 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001659 h = self.rqdata.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001660 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
1661 match = None
1662 for m in matches:
1663 if h in m:
1664 match = m
1665 if match is None:
1666 bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001667 matches = {k : v for k, v in iter(matches.items()) if h not in k}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001668 if matches:
1669 latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
1670 prevh = __find_md5__.search(latestmatch).group(0)
1671 output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
1672 bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
1673
1674class RunQueueExecute:
1675
1676 def __init__(self, rq):
1677 self.rq = rq
1678 self.cooker = rq.cooker
1679 self.cfgData = rq.cfgData
1680 self.rqdata = rq.rqdata
1681
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001682 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
1683 self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001684
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001685 self.runq_buildable = set()
1686 self.runq_running = set()
1687 self.runq_complete = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001688
1689 self.build_stamps = {}
1690 self.build_stamps2 = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001691 self.failed_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001692
1693 self.stampcache = {}
1694
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001695 for mc in rq.worker:
1696 rq.worker[mc].pipe.setrunqueueexec(self)
1697 for mc in rq.fakeworker:
1698 rq.fakeworker[mc].pipe.setrunqueueexec(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001699
1700 if self.number_tasks <= 0:
1701 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1702
1703 def runqueue_process_waitpid(self, task, status):
1704
1705 # self.build_stamps[pid] may not exist when use shared work directory.
1706 if task in self.build_stamps:
1707 self.build_stamps2.remove(self.build_stamps[task])
1708 del self.build_stamps[task]
1709
1710 if status != 0:
1711 self.task_fail(task, status)
1712 else:
1713 self.task_complete(task)
1714 return True
1715
1716 def finish_now(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001717 for mc in self.rq.worker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001718 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001719 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1720 self.rq.worker[mc].process.stdin.flush()
1721 except IOError:
1722 # worker must have died?
1723 pass
1724 for mc in self.rq.fakeworker:
1725 try:
1726 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1727 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001728 except IOError:
1729 # worker must have died?
1730 pass
1731
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001732 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001733 self.rq.state = runQueueFailed
1734 return
1735
1736 self.rq.state = runQueueComplete
1737 return
1738
1739 def finish(self):
1740 self.rq.state = runQueueCleanUp
1741
1742 if self.stats.active > 0:
1743 bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData)
1744 self.rq.read_workers()
1745 return self.rq.active_fds()
1746
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001747 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001748 self.rq.state = runQueueFailed
1749 return True
1750
1751 self.rq.state = runQueueComplete
1752 return True
1753
1754 def check_dependencies(self, task, taskdeps, setscene = False):
1755 if not self.rq.depvalidate:
1756 return False
1757
1758 taskdata = {}
1759 taskdeps.add(task)
1760 for dep in taskdeps:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001761 (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
1762 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001763 taskdata[dep] = [pn, taskname, fn]
1764 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001765 locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001766 valid = bb.utils.better_eval(call, locs)
1767 return valid
1768
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001769 def can_start_task(self):
1770 can_start = self.stats.active < self.number_tasks
1771 return can_start
1772
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001773class RunQueueExecuteDummy(RunQueueExecute):
1774 def __init__(self, rq):
1775 self.rq = rq
1776 self.stats = RunQueueStats(0)
1777
1778 def finish(self):
1779 self.rq.state = runQueueComplete
1780 return
1781
1782class RunQueueExecuteTasks(RunQueueExecute):
1783 def __init__(self, rq):
1784 RunQueueExecute.__init__(self, rq)
1785
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001786 self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001787
1788 self.stampcache = {}
1789
1790 initial_covered = self.rq.scenequeue_covered.copy()
1791
1792 # Mark initial buildable tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001793 for tid in self.rqdata.runtaskentries:
1794 if len(self.rqdata.runtaskentries[tid].depends) == 0:
1795 self.runq_buildable.add(tid)
1796 if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
1797 self.rq.scenequeue_covered.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001798
1799 found = True
1800 while found:
1801 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001802 for tid in self.rqdata.runtaskentries:
1803 if tid in self.rq.scenequeue_covered:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001804 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001805 logger.debug(1, 'Considering %s: %s' % (tid, str(self.rqdata.runtaskentries[tid].revdeps)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001806
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001807 if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
1808 if tid in self.rq.scenequeue_notcovered:
1809 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001810 found = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001811 self.rq.scenequeue_covered.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001812
1813 logger.debug(1, 'Skip list (pre setsceneverify) %s', sorted(self.rq.scenequeue_covered))
1814
1815 # Allow the metadata to elect for setscene tasks to run anyway
1816 covered_remove = set()
1817 if self.rq.setsceneverify:
1818 invalidtasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001819 tasknames = {}
1820 fns = {}
1821 for tid in self.rqdata.runtaskentries:
1822 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1823 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
1824 fns[tid] = taskfn
1825 tasknames[tid] = taskname
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001826 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1827 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001828 if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
1829 logger.debug(2, 'Setscene stamp current for task %s', tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001830 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001831 if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
1832 logger.debug(2, 'Normal stamp current for task %s', tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001833 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001834 invalidtasks.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001835
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001836 call = self.rq.setsceneverify + "(covered, tasknames, fns, d, invalidtasks=invalidtasks)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001837 locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : tasknames, "fns" : fns, "d" : self.cooker.data, "invalidtasks" : invalidtasks }
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001838 covered_remove = bb.utils.better_eval(call, locs)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001839
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001840 def removecoveredtask(tid):
1841 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1842 taskname = taskname + '_setscene'
1843 bb.build.del_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
1844 self.rq.scenequeue_covered.remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001845
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001846 toremove = covered_remove | self.rq.scenequeue_notcovered
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001847 for task in toremove:
1848 logger.debug(1, 'Not skipping task %s due to setsceneverify', task)
1849 while toremove:
1850 covered_remove = []
1851 for task in toremove:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001852 if task in self.rq.scenequeue_covered:
1853 removecoveredtask(task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001854 for deptask in self.rqdata.runtaskentries[task].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001855 if deptask not in self.rq.scenequeue_covered:
1856 continue
1857 if deptask in toremove or deptask in covered_remove or deptask in initial_covered:
1858 continue
1859 logger.debug(1, 'Task %s depends on task %s so not skipping' % (task, deptask))
1860 covered_remove.append(deptask)
1861 toremove = covered_remove
1862
1863 logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered)
1864
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001865
1866 for mc in self.rqdata.dataCaches:
1867 target_pairs = []
1868 for tid in self.rqdata.target_tids:
1869 (tidmc, fn, taskname, _) = split_tid_mcfn(tid)
1870 if tidmc == mc:
1871 target_pairs.append((fn, taskname))
1872
1873 event.fire(bb.event.StampUpdate(target_pairs, self.rqdata.dataCaches[mc].stamp), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001874
1875 schedulers = self.get_schedulers()
1876 for scheduler in schedulers:
1877 if self.scheduler == scheduler.name:
1878 self.sched = scheduler(self, self.rqdata)
1879 logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
1880 break
1881 else:
1882 bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
1883 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1884
1885 def get_schedulers(self):
1886 schedulers = set(obj for obj in globals().values()
1887 if type(obj) is type and
1888 issubclass(obj, RunQueueScheduler))
1889
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001890 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001891 if user_schedulers:
1892 for sched in user_schedulers.split():
1893 if not "." in sched:
1894 bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
1895 continue
1896
1897 modname, name = sched.rsplit(".", 1)
1898 try:
1899 module = __import__(modname, fromlist=(name,))
1900 except ImportError as exc:
1901 logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
1902 raise SystemExit(1)
1903 else:
1904 schedulers.add(getattr(module, name))
1905 return schedulers
1906
1907 def setbuildable(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001908 self.runq_buildable.add(task)
Brad Bishop316dfdd2018-06-25 12:45:53 -04001909 self.sched.newbuildable(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001910
1911 def task_completeoutright(self, task):
1912 """
1913 Mark a task as completed
1914 Look at the reverse dependencies and mark any task with
1915 completed dependencies as buildable
1916 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001917 self.runq_complete.add(task)
1918 for revdep in self.rqdata.runtaskentries[task].revdeps:
1919 if revdep in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001920 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001921 if revdep in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001922 continue
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001923 alldeps = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001924 for dep in self.rqdata.runtaskentries[revdep].depends:
1925 if dep not in self.runq_complete:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001926 alldeps = False
1927 break
1928 if alldeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001929 self.setbuildable(revdep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001930 logger.debug(1, "Marking task %s as buildable", revdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001931
1932 def task_complete(self, task):
1933 self.stats.taskCompleted()
1934 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1935 self.task_completeoutright(task)
1936
1937 def task_fail(self, task, exitcode):
1938 """
1939 Called when a task has failed
1940 Updates the state engine with the failure
1941 """
1942 self.stats.taskFailed()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001943 self.failed_tids.append(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001944 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001945 if self.rqdata.taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001946 self.rq.state = runQueueCleanUp
1947
1948 def task_skip(self, task, reason):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001949 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001950 self.setbuildable(task)
1951 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
1952 self.task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001953 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001954 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001955
1956 def execute(self):
1957 """
1958 Run the tasks in a queue prepared by rqdata.prepare()
1959 """
1960
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001961 if self.rqdata.setscenewhitelist is not None and not self.rqdata.setscenewhitelist_checked:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001962 self.rqdata.setscenewhitelist_checked = True
1963
1964 # Check tasks that are going to run against the whitelist
1965 def check_norun_task(tid, showerror=False):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001966 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001967 # Ignore covered tasks
1968 if tid in self.rq.scenequeue_covered:
1969 return False
1970 # Ignore stamped tasks
1971 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
1972 return False
1973 # Ignore noexec tasks
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001974 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001975 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1976 return False
1977
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001978 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001979 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
1980 if showerror:
1981 if tid in self.rqdata.runq_setscene_tids:
1982 logger.error('Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname))
1983 else:
1984 logger.error('Task %s.%s attempted to execute unexpectedly' % (pn, taskname))
1985 return True
1986 return False
1987 # Look to see if any tasks that we think shouldn't run are going to
1988 unexpected = False
1989 for tid in self.rqdata.runtaskentries:
1990 if check_norun_task(tid):
1991 unexpected = True
1992 break
1993 if unexpected:
1994 # Run through the tasks in the rough order they'd have executed and print errors
1995 # (since the order can be useful - usually missing sstate for the last few tasks
1996 # is the cause of the problem)
1997 task = self.sched.next()
1998 while task is not None:
1999 check_norun_task(task, showerror=True)
2000 self.task_skip(task, 'Setscene enforcement check')
2001 task = self.sched.next()
2002
2003 self.rq.state = runQueueCleanUp
2004 return True
2005
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002006 self.rq.read_workers()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002007
2008 if self.stats.total == 0:
2009 # nothing to do
2010 self.rq.state = runQueueCleanUp
2011
2012 task = self.sched.next()
2013 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002014 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002015
2016 if task in self.rq.scenequeue_covered:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002017 logger.debug(2, "Setscene covered task %s", task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002018 self.task_skip(task, "covered")
2019 return True
2020
2021 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002022 logger.debug(2, "Stamp current task %s", task)
2023
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002024 self.task_skip(task, "existing")
2025 return True
2026
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002027 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002028 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2029 startevent = runQueueTaskStarted(task, self.stats, self.rq,
2030 noexec=True)
2031 bb.event.fire(startevent, self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002032 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002033 self.stats.taskActive()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002034 if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002035 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002036 self.task_complete(task)
2037 return True
2038 else:
2039 startevent = runQueueTaskStarted(task, self.stats, self.rq)
2040 bb.event.fire(startevent, self.cfgData)
2041
2042 taskdepdata = self.build_taskdepdata(task)
2043
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002044 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002045 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002046 if not mc in self.rq.fakeworker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002047 try:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002048 self.rq.start_fakeworker(self, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002049 except OSError as exc:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002050 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002051 self.rq.state = runQueueFailed
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002052 self.stats.taskFailed()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002053 return True
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002054 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002055 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002056 else:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002057 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002058 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002059
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002060 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2061 self.build_stamps2.append(self.build_stamps[task])
2062 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002063 self.stats.taskActive()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002064 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002065 return True
2066
2067 if self.stats.active > 0:
2068 self.rq.read_workers()
2069 return self.rq.active_fds()
2070
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002071 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002072 self.rq.state = runQueueFailed
2073 return True
2074
2075 # Sanity Checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002076 for task in self.rqdata.runtaskentries:
2077 if task not in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002078 logger.error("Task %s never buildable!", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002079 if task not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002080 logger.error("Task %s never ran!", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002081 if task not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002082 logger.error("Task %s never completed!", task)
2083 self.rq.state = runQueueComplete
2084
2085 return True
2086
Andrew Geissler99467da2019-02-25 18:54:23 -06002087 def filtermcdeps(self, task, deps):
2088 ret = set()
2089 mainmc = mc_from_tid(task)
2090 for dep in deps:
2091 mc = mc_from_tid(dep)
2092 if mc != mainmc:
2093 continue
2094 ret.add(dep)
2095 return ret
2096
2097 # We filter out multiconfig dependencies from taskdepdata we pass to the tasks
2098 # as most code can't handle them
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002099 def build_taskdepdata(self, task):
2100 taskdepdata = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002101 next = self.rqdata.runtaskentries[task].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002102 next.add(task)
Andrew Geissler99467da2019-02-25 18:54:23 -06002103 next = self.filtermcdeps(task, next)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002104 while next:
2105 additional = []
2106 for revdep in next:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002107 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2108 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2109 deps = self.rqdata.runtaskentries[revdep].depends
2110 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002111 taskhash = self.rqdata.runtaskentries[revdep].hash
2112 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash]
Andrew Geissler99467da2019-02-25 18:54:23 -06002113 deps = self.filtermcdeps(task, deps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002114 for revdep2 in deps:
2115 if revdep2 not in taskdepdata:
2116 additional.append(revdep2)
2117 next = additional
2118
2119 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2120 return taskdepdata
2121
2122class RunQueueExecuteScenequeue(RunQueueExecute):
2123 def __init__(self, rq):
2124 RunQueueExecute.__init__(self, rq)
2125
2126 self.scenequeue_covered = set()
2127 self.scenequeue_notcovered = set()
2128 self.scenequeue_notneeded = set()
2129
2130 # If we don't have any setscene functions, skip this step
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002131 if len(self.rqdata.runq_setscene_tids) == 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002132 rq.scenequeue_covered = set()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002133 rq.scenequeue_notcovered = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002134 rq.state = runQueueRunInit
2135 return
2136
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002137 self.stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002138
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002139 sq_revdeps = {}
2140 sq_revdeps_new = {}
2141 sq_revdeps_squash = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002142 self.sq_harddeps = {}
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002143 self.stamps = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002144
2145 # We need to construct a dependency graph for the setscene functions. Intermediate
2146 # dependencies between the setscene tasks only complicate the code. This code
2147 # therefore aims to collapse the huge runqueue dependency tree into a smaller one
2148 # only containing the setscene functions.
2149
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002150 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002151
2152 # First process the chains up to the first setscene task.
2153 endpoints = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002154 for tid in self.rqdata.runtaskentries:
2155 sq_revdeps[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
2156 sq_revdeps_new[tid] = set()
2157 if (len(sq_revdeps[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
2158 #bb.warn("Added endpoint %s" % (tid))
2159 endpoints[tid] = set()
2160
2161 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002162
2163 # Secondly process the chains between setscene tasks.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002164 for tid in self.rqdata.runq_setscene_tids:
2165 #bb.warn("Added endpoint 2 %s" % (tid))
2166 for dep in self.rqdata.runtaskentries[tid].depends:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002167 if tid in sq_revdeps[dep]:
2168 sq_revdeps[dep].remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002169 if dep not in endpoints:
2170 endpoints[dep] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002171 #bb.warn(" Added endpoint 3 %s" % (dep))
2172 endpoints[dep].add(tid)
2173
2174 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002175
2176 def process_endpoints(endpoints):
2177 newendpoints = {}
2178 for point, task in endpoints.items():
2179 tasks = set()
2180 if task:
2181 tasks |= task
2182 if sq_revdeps_new[point]:
2183 tasks |= sq_revdeps_new[point]
2184 sq_revdeps_new[point] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002185 if point in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002186 sq_revdeps_new[point] = tasks
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05002187 tasks = set()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002188 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002189 for dep in self.rqdata.runtaskentries[point].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002190 if point in sq_revdeps[dep]:
2191 sq_revdeps[dep].remove(point)
2192 if tasks:
2193 sq_revdeps_new[dep] |= tasks
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002194 if len(sq_revdeps[dep]) == 0 and dep not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002195 newendpoints[dep] = task
2196 if len(newendpoints) != 0:
2197 process_endpoints(newendpoints)
2198
2199 process_endpoints(endpoints)
2200
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002201 self.rqdata.init_progress_reporter.next_stage()
2202
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002203 # Build a list of setscene tasks which are "unskippable"
2204 # These are direct endpoints referenced by the build
2205 endpoints2 = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002206 sq_revdeps2 = {}
2207 sq_revdeps_new2 = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002208 def process_endpoints2(endpoints):
2209 newendpoints = {}
2210 for point, task in endpoints.items():
2211 tasks = set([point])
2212 if task:
2213 tasks |= task
2214 if sq_revdeps_new2[point]:
2215 tasks |= sq_revdeps_new2[point]
2216 sq_revdeps_new2[point] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002217 if point in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002218 sq_revdeps_new2[point] = tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002219 for dep in self.rqdata.runtaskentries[point].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002220 if point in sq_revdeps2[dep]:
2221 sq_revdeps2[dep].remove(point)
2222 if tasks:
2223 sq_revdeps_new2[dep] |= tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002224 if (len(sq_revdeps2[dep]) == 0 or len(sq_revdeps_new2[dep]) != 0) and dep not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002225 newendpoints[dep] = tasks
2226 if len(newendpoints) != 0:
2227 process_endpoints2(newendpoints)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002228 for tid in self.rqdata.runtaskentries:
2229 sq_revdeps2[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
2230 sq_revdeps_new2[tid] = set()
2231 if (len(sq_revdeps2[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
2232 endpoints2[tid] = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002233 process_endpoints2(endpoints2)
2234 self.unskippable = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002235 for tid in self.rqdata.runq_setscene_tids:
2236 if sq_revdeps_new2[tid]:
2237 self.unskippable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002238
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002239 self.rqdata.init_progress_reporter.next_stage(len(self.rqdata.runtaskentries))
2240
2241 for taskcounter, tid in enumerate(self.rqdata.runtaskentries):
2242 if tid in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002243 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002244 for dep in sq_revdeps_new[tid]:
2245 deps.add(dep)
2246 sq_revdeps_squash[tid] = deps
2247 elif len(sq_revdeps_new[tid]) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002248 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002249 self.rqdata.init_progress_reporter.update(taskcounter)
2250
2251 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002252
2253 # Resolve setscene inter-task dependencies
2254 # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
2255 # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002256 for tid in self.rqdata.runq_setscene_tids:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002257 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2258 realtid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002259 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002260 self.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002261 for (depname, idependtask) in idepends:
2262
2263 if depname not in self.rqdata.taskData[mc].build_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002264 continue
2265
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002266 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2267 if depfn is None:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002268 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002269 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2270 if deptid not in self.rqdata.runtaskentries:
2271 bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002272
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002273 if not deptid in self.sq_harddeps:
2274 self.sq_harddeps[deptid] = set()
2275 self.sq_harddeps[deptid].add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002276
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002277 sq_revdeps_squash[tid].add(deptid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002278 # Have to zero this to avoid circular dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002279 sq_revdeps_squash[deptid] = set()
2280
2281 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002282
2283 for task in self.sq_harddeps:
2284 for dep in self.sq_harddeps[task]:
2285 sq_revdeps_squash[dep].add(task)
2286
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002287 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002288
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002289 #for tid in sq_revdeps_squash:
2290 # for dep in sq_revdeps_squash[tid]:
2291 # data = data + "\n %s" % dep
2292 # bb.warn("Task %s_setscene: is %s " % (tid, data
2293
2294 self.sq_deps = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002295 self.sq_revdeps = sq_revdeps_squash
2296 self.sq_revdeps2 = copy.deepcopy(self.sq_revdeps)
2297
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002298 for tid in self.sq_revdeps:
2299 self.sq_deps[tid] = set()
2300 for tid in self.sq_revdeps:
2301 for dep in self.sq_revdeps[tid]:
2302 self.sq_deps[dep].add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002303
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002304 self.rqdata.init_progress_reporter.next_stage()
2305
2306 for tid in self.sq_revdeps:
2307 if len(self.sq_revdeps[tid]) == 0:
2308 self.runq_buildable.add(tid)
2309
2310 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002311
2312 self.outrightfail = []
2313 if self.rq.hashvalidate:
2314 sq_hash = []
2315 sq_hashfn = []
2316 sq_fn = []
2317 sq_taskname = []
2318 sq_task = []
2319 noexec = []
2320 stamppresent = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002321 for tid in self.sq_revdeps:
2322 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2323
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002324 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002325
2326 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002327 noexec.append(tid)
2328 self.task_skip(tid)
2329 bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002330 continue
2331
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002332 if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
2333 logger.debug(2, 'Setscene stamp current for task %s', tid)
2334 stamppresent.append(tid)
2335 self.task_skip(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002336 continue
2337
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002338 if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
2339 logger.debug(2, 'Normal stamp current for task %s', tid)
2340 stamppresent.append(tid)
2341 self.task_skip(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002342 continue
2343
2344 sq_fn.append(fn)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002345 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002346 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002347 sq_taskname.append(taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002348 sq_task.append(tid)
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002349
2350 self.cooker.data.setVar("BB_SETSCENE_STAMPCURRENT_COUNT", len(stamppresent))
2351
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002352 call = self.rq.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002353 locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002354 valid = bb.utils.better_eval(call, locs)
2355
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002356 self.cooker.data.delVar("BB_SETSCENE_STAMPCURRENT_COUNT")
2357
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002358 valid_new = stamppresent
2359 for v in valid:
2360 valid_new.append(sq_task[v])
2361
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002362 for tid in self.sq_revdeps:
2363 if tid not in valid_new and tid not in noexec:
2364 logger.debug(2, 'No package found, so skipping setscene task %s', tid)
2365 self.outrightfail.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002366
2367 logger.info('Executing SetScene Tasks')
2368
2369 self.rq.state = runQueueSceneRun
2370
2371 def scenequeue_updatecounters(self, task, fail = False):
2372 for dep in self.sq_deps[task]:
2373 if fail and task in self.sq_harddeps and dep in self.sq_harddeps[task]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002374 logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002375 self.scenequeue_updatecounters(dep, fail)
2376 continue
2377 if task not in self.sq_revdeps2[dep]:
2378 # May already have been removed by the fail case above
2379 continue
2380 self.sq_revdeps2[dep].remove(task)
2381 if len(self.sq_revdeps2[dep]) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002382 self.runq_buildable.add(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002383
2384 def task_completeoutright(self, task):
2385 """
2386 Mark a task as completed
2387 Look at the reverse dependencies and mark any task with
2388 completed dependencies as buildable
2389 """
2390
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002391 logger.debug(1, 'Found task %s which could be accelerated', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002392 self.scenequeue_covered.add(task)
2393 self.scenequeue_updatecounters(task)
2394
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002395 def check_taskfail(self, task):
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002396 if self.rqdata.setscenewhitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002397 realtask = task.split('_setscene')[0]
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002398 (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
2399 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002400 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2401 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2402 self.rq.state = runQueueCleanUp
2403
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002404 def task_complete(self, task):
2405 self.stats.taskCompleted()
2406 bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
2407 self.task_completeoutright(task)
2408
2409 def task_fail(self, task, result):
2410 self.stats.taskFailed()
2411 bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
2412 self.scenequeue_notcovered.add(task)
2413 self.scenequeue_updatecounters(task, True)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002414 self.check_taskfail(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002415
2416 def task_failoutright(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002417 self.runq_running.add(task)
2418 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002419 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002420 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002421 self.scenequeue_notcovered.add(task)
2422 self.scenequeue_updatecounters(task, True)
2423
2424 def task_skip(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002425 self.runq_running.add(task)
2426 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002427 self.task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002428 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002429 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002430
2431 def execute(self):
2432 """
2433 Run the tasks in a queue prepared by prepare_runqueue
2434 """
2435
2436 self.rq.read_workers()
2437
2438 task = None
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002439 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002440 # Find the next setscene to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002441 for nexttask in self.rqdata.runq_setscene_tids:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002442 if nexttask in self.runq_buildable and nexttask not in self.runq_running and self.stamps[nexttask] not in self.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002443 if nexttask in self.unskippable:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002444 logger.debug(2, "Setscene task %s is unskippable" % nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002445 if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002446 fn = fn_from_tid(nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002447 foundtarget = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002448
2449 if nexttask in self.rqdata.target_tids:
2450 foundtarget = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002451 if not foundtarget:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002452 logger.debug(2, "Skipping setscene for task %s" % nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002453 self.task_skip(nexttask)
2454 self.scenequeue_notneeded.add(nexttask)
2455 return True
2456 if nexttask in self.outrightfail:
2457 self.task_failoutright(nexttask)
2458 return True
2459 task = nexttask
2460 break
2461 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002462 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
2463 taskname = taskname + "_setscene"
2464 if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
2465 logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002466 self.task_failoutright(task)
2467 return True
2468
2469 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002470 if task in self.rqdata.target_tids:
2471 self.task_failoutright(task)
2472 return True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002473
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002474 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
2475 logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002476 self.task_skip(task)
2477 return True
2478
2479 startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
2480 bb.event.fire(startevent, self.cfgData)
2481
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002482 taskdepdata = self.build_taskdepdata(task)
2483
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002484 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2485 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002486 if not mc in self.rq.fakeworker:
2487 self.rq.start_fakeworker(self, mc)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002488 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002489 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002490 else:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002491 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002492 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002493
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002494 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2495 self.build_stamps2.append(self.build_stamps[task])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002496 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002497 self.stats.taskActive()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002498 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002499 return True
2500
2501 if self.stats.active > 0:
2502 self.rq.read_workers()
2503 return self.rq.active_fds()
2504
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002505 #for tid in self.sq_revdeps:
2506 # if tid not in self.runq_running:
2507 # buildable = tid in self.runq_buildable
2508 # revdeps = self.sq_revdeps[tid]
2509 # bb.warn("Found we didn't run %s %s %s" % (tid, buildable, str(revdeps)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002510
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002511 self.rq.scenequeue_covered = self.scenequeue_covered
2512 self.rq.scenequeue_notcovered = self.scenequeue_notcovered
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002513
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002514 logger.debug(1, 'We can skip tasks %s', "\n".join(sorted(self.rq.scenequeue_covered)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002515
2516 self.rq.state = runQueueRunInit
2517
2518 completeevent = sceneQueueComplete(self.stats, self.rq)
2519 bb.event.fire(completeevent, self.cfgData)
2520
2521 return True
2522
2523 def runqueue_process_waitpid(self, task, status):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002524 RunQueueExecute.runqueue_process_waitpid(self, task, status)
2525
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002526
2527 def build_taskdepdata(self, task):
2528 def getsetscenedeps(tid):
2529 deps = set()
2530 (mc, fn, taskname, _) = split_tid_mcfn(tid)
2531 realtid = tid + "_setscene"
2532 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
2533 for (depname, idependtask) in idepends:
2534 if depname not in self.rqdata.taskData[mc].build_targets:
2535 continue
2536
2537 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2538 if depfn is None:
2539 continue
2540 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2541 deps.add(deptid)
2542 return deps
2543
2544 taskdepdata = {}
2545 next = getsetscenedeps(task)
2546 next.add(task)
2547 while next:
2548 additional = []
2549 for revdep in next:
2550 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2551 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2552 deps = getsetscenedeps(revdep)
2553 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2554 taskhash = self.rqdata.runtaskentries[revdep].hash
2555 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash]
2556 for revdep2 in deps:
2557 if revdep2 not in taskdepdata:
2558 additional.append(revdep2)
2559 next = additional
2560
2561 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2562 return taskdepdata
2563
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002564class TaskFailure(Exception):
2565 """
2566 Exception raised when a task in a runqueue fails
2567 """
2568 def __init__(self, x):
2569 self.args = x
2570
2571
2572class runQueueExitWait(bb.event.Event):
2573 """
2574 Event when waiting for task processes to exit
2575 """
2576
2577 def __init__(self, remain):
2578 self.remain = remain
2579 self.message = "Waiting for %s active tasks to finish" % remain
2580 bb.event.Event.__init__(self)
2581
2582class runQueueEvent(bb.event.Event):
2583 """
2584 Base runQueue event class
2585 """
2586 def __init__(self, task, stats, rq):
2587 self.taskid = task
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002588 self.taskstring = task
2589 self.taskname = taskname_from_tid(task)
2590 self.taskfile = fn_from_tid(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002591 self.taskhash = rq.rqdata.get_task_hash(task)
2592 self.stats = stats.copy()
2593 bb.event.Event.__init__(self)
2594
2595class sceneQueueEvent(runQueueEvent):
2596 """
2597 Base sceneQueue event class
2598 """
2599 def __init__(self, task, stats, rq, noexec=False):
2600 runQueueEvent.__init__(self, task, stats, rq)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002601 self.taskstring = task + "_setscene"
2602 self.taskname = taskname_from_tid(task) + "_setscene"
2603 self.taskfile = fn_from_tid(task)
2604 self.taskhash = rq.rqdata.get_task_hash(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002605
2606class runQueueTaskStarted(runQueueEvent):
2607 """
2608 Event notifying a task was started
2609 """
2610 def __init__(self, task, stats, rq, noexec=False):
2611 runQueueEvent.__init__(self, task, stats, rq)
2612 self.noexec = noexec
2613
2614class sceneQueueTaskStarted(sceneQueueEvent):
2615 """
2616 Event notifying a setscene task was started
2617 """
2618 def __init__(self, task, stats, rq, noexec=False):
2619 sceneQueueEvent.__init__(self, task, stats, rq)
2620 self.noexec = noexec
2621
2622class runQueueTaskFailed(runQueueEvent):
2623 """
2624 Event notifying a task failed
2625 """
2626 def __init__(self, task, stats, exitcode, rq):
2627 runQueueEvent.__init__(self, task, stats, rq)
2628 self.exitcode = exitcode
2629
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002630 def __str__(self):
2631 return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
2632
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002633class sceneQueueTaskFailed(sceneQueueEvent):
2634 """
2635 Event notifying a setscene task failed
2636 """
2637 def __init__(self, task, stats, exitcode, rq):
2638 sceneQueueEvent.__init__(self, task, stats, rq)
2639 self.exitcode = exitcode
2640
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002641 def __str__(self):
2642 return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
2643
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002644class sceneQueueComplete(sceneQueueEvent):
2645 """
2646 Event when all the sceneQueue tasks are complete
2647 """
2648 def __init__(self, stats, rq):
2649 self.stats = stats.copy()
2650 bb.event.Event.__init__(self)
2651
2652class runQueueTaskCompleted(runQueueEvent):
2653 """
2654 Event notifying a task completed
2655 """
2656
2657class sceneQueueTaskCompleted(sceneQueueEvent):
2658 """
2659 Event notifying a setscene task completed
2660 """
2661
2662class runQueueTaskSkipped(runQueueEvent):
2663 """
2664 Event notifying a task was skipped
2665 """
2666 def __init__(self, task, stats, rq, reason):
2667 runQueueEvent.__init__(self, task, stats, rq)
2668 self.reason = reason
2669
2670class runQueuePipe():
2671 """
2672 Abstraction for a pipe between a worker thread and the server
2673 """
2674 def __init__(self, pipein, pipeout, d, rq, rqexec):
2675 self.input = pipein
2676 if pipeout:
2677 pipeout.close()
2678 bb.utils.nonblockingfd(self.input)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002679 self.queue = b""
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002680 self.d = d
2681 self.rq = rq
2682 self.rqexec = rqexec
2683
2684 def setrunqueueexec(self, rqexec):
2685 self.rqexec = rqexec
2686
2687 def read(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002688 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
2689 for worker in workers.values():
2690 worker.process.poll()
2691 if worker.process.returncode is not None and not self.rq.teardown:
2692 bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
2693 self.rq.finish_runqueue(True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002694
2695 start = len(self.queue)
2696 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002697 self.queue = self.queue + (self.input.read(102400) or b"")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002698 except (OSError, IOError) as e:
2699 if e.errno != errno.EAGAIN:
2700 raise
2701 end = len(self.queue)
2702 found = True
2703 while found and len(self.queue):
2704 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002705 index = self.queue.find(b"</event>")
2706 while index != -1 and self.queue.startswith(b"<event>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002707 try:
2708 event = pickle.loads(self.queue[7:index])
2709 except ValueError as e:
2710 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
2711 bb.event.fire_from_worker(event, self.d)
2712 found = True
2713 self.queue = self.queue[index+8:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002714 index = self.queue.find(b"</event>")
2715 index = self.queue.find(b"</exitcode>")
2716 while index != -1 and self.queue.startswith(b"<exitcode>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002717 try:
2718 task, status = pickle.loads(self.queue[10:index])
2719 except ValueError as e:
2720 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
2721 self.rqexec.runqueue_process_waitpid(task, status)
2722 found = True
2723 self.queue = self.queue[index+11:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002724 index = self.queue.find(b"</exitcode>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002725 return (end > start)
2726
2727 def close(self):
2728 while self.read():
2729 continue
2730 if len(self.queue) > 0:
2731 print("Warning, worker left partial message: %s" % self.queue)
2732 self.input.close()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002733
2734def get_setscene_enforce_whitelist(d):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002735 if d.getVar('BB_SETSCENE_ENFORCE') != '1':
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002736 return None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002737 whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002738 outlist = []
2739 for item in whitelist[:]:
2740 if item.startswith('%:'):
2741 for target in sys.argv[1:]:
2742 if not target.startswith('-'):
2743 outlist.append(target.split(':')[0] + ':' + item.split(':')[1])
2744 else:
2745 outlist.append(item)
2746 return outlist
2747
2748def check_setscene_enforce_whitelist(pn, taskname, whitelist):
2749 import fnmatch
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002750 if whitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002751 item = '%s:%s' % (pn, taskname)
2752 for whitelist_item in whitelist:
2753 if fnmatch.fnmatch(item, whitelist_item):
2754 return True
2755 return False
2756 return True