blob: 329cda33a4feeaa5c33b3dde83a5d75e49a43c30 [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001#!/usr/bin/env python
2# ex:ts=4:sw=4:sts=4:et
3# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
4"""
5BitBake 'RunQueue' implementation
6
7Handles preparation and execution of a queue of tasks
8"""
9
10# Copyright (C) 2006-2007 Richard Purdie
11#
12# This program is free software; you can redistribute it and/or modify
13# it under the terms of the GNU General Public License version 2 as
14# published by the Free Software Foundation.
15#
16# This program is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19# GNU General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License along
22# with this program; if not, write to the Free Software Foundation, Inc.,
23# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24
25import copy
26import os
27import sys
28import signal
29import stat
30import fcntl
31import errno
32import logging
33import re
34import bb
35from bb import msg, data, event
36from bb import monitordisk
37import subprocess
Patrick Williamsc0f7c042017-02-23 20:41:17 -060038import pickle
Brad Bishop6e60e8b2018-02-01 10:27:11 -050039from multiprocessing import Process
Brad Bishop19323692019-04-05 15:28:33 -040040import shlex
Patrick Williamsc124f4f2015-09-15 14:41:29 -050041
42bblogger = logging.getLogger("BitBake")
43logger = logging.getLogger("BitBake.RunQueue")
44
Brad Bishop19323692019-04-05 15:28:33 -040045__find_sha256__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{64}(?![a-z0-9])' )
Patrick Williamsc124f4f2015-09-15 14:41:29 -050046
Patrick Williamsc0f7c042017-02-23 20:41:17 -060047def fn_from_tid(tid):
48 return tid.rsplit(":", 1)[0]
49
50def taskname_from_tid(tid):
51 return tid.rsplit(":", 1)[1]
52
Andrew Geissler99467da2019-02-25 18:54:23 -060053def mc_from_tid(tid):
54 if tid.startswith('multiconfig:'):
55 return tid.split(':')[1]
56 return ""
57
Patrick Williamsc0f7c042017-02-23 20:41:17 -060058def split_tid(tid):
59 (mc, fn, taskname, _) = split_tid_mcfn(tid)
60 return (mc, fn, taskname)
61
62def split_tid_mcfn(tid):
63 if tid.startswith('multiconfig:'):
64 elems = tid.split(':')
65 mc = elems[1]
66 fn = ":".join(elems[2:-1])
67 taskname = elems[-1]
68 mcfn = "multiconfig:" + mc + ":" + fn
69 else:
70 tid = tid.rsplit(":", 1)
71 mc = ""
72 fn = tid[0]
73 taskname = tid[1]
74 mcfn = fn
75
76 return (mc, fn, taskname, mcfn)
77
78def build_tid(mc, fn, taskname):
79 if mc:
80 return "multiconfig:" + mc + ":" + fn + ":" + taskname
81 return fn + ":" + taskname
82
Patrick Williamsc124f4f2015-09-15 14:41:29 -050083class RunQueueStats:
84 """
85 Holds statistics on the tasks handled by the associated runQueue
86 """
87 def __init__(self, total):
88 self.completed = 0
89 self.skipped = 0
90 self.failed = 0
91 self.active = 0
92 self.total = total
93
94 def copy(self):
95 obj = self.__class__(self.total)
96 obj.__dict__.update(self.__dict__)
97 return obj
98
99 def taskFailed(self):
100 self.active = self.active - 1
101 self.failed = self.failed + 1
102
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800103 def taskCompleted(self):
104 self.active = self.active - 1
105 self.completed = self.completed + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500106
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800107 def taskSkipped(self):
108 self.active = self.active + 1
109 self.skipped = self.skipped + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500110
111 def taskActive(self):
112 self.active = self.active + 1
113
114# These values indicate the next step due to be run in the
115# runQueue state machine
116runQueuePrepare = 2
117runQueueSceneInit = 3
118runQueueSceneRun = 4
119runQueueRunInit = 5
120runQueueRunning = 6
121runQueueFailed = 7
122runQueueCleanUp = 8
123runQueueComplete = 9
124
125class RunQueueScheduler(object):
126 """
127 Control the order tasks are scheduled in.
128 """
129 name = "basic"
130
131 def __init__(self, runqueue, rqdata):
132 """
133 The default scheduler just returns the first buildable task (the
134 priority map is sorted by task number)
135 """
136 self.rq = runqueue
137 self.rqdata = rqdata
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600138 self.numTasks = len(self.rqdata.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500139
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600140 self.prio_map = [self.rqdata.runtaskentries.keys()]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500141
142 self.buildable = []
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800143 self.skip_maxthread = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500144 self.stamps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600145 for tid in self.rqdata.runtaskentries:
146 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
147 self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
148 if tid in self.rq.runq_buildable:
149 self.buildable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500150
151 self.rev_prio_map = None
152
153 def next_buildable_task(self):
154 """
155 Return the id of the first task we find that is buildable
156 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600157 self.buildable = [x for x in self.buildable if x not in self.rq.runq_running]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500158 if not self.buildable:
159 return None
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800160
161 # Filter out tasks that have a max number of threads that have been exceeded
162 skip_buildable = {}
163 for running in self.rq.runq_running.difference(self.rq.runq_complete):
164 rtaskname = taskname_from_tid(running)
165 if rtaskname not in self.skip_maxthread:
166 self.skip_maxthread[rtaskname] = self.rq.cfgData.getVarFlag(rtaskname, "number_threads")
167 if not self.skip_maxthread[rtaskname]:
168 continue
169 if rtaskname in skip_buildable:
170 skip_buildable[rtaskname] += 1
171 else:
172 skip_buildable[rtaskname] = 1
173
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500174 if len(self.buildable) == 1:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600175 tid = self.buildable[0]
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800176 taskname = taskname_from_tid(tid)
177 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
178 return None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600179 stamp = self.stamps[tid]
180 if stamp not in self.rq.build_stamps.values():
181 return tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500182
183 if not self.rev_prio_map:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600184 self.rev_prio_map = {}
185 for tid in self.rqdata.runtaskentries:
186 self.rev_prio_map[tid] = self.prio_map.index(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500187
188 best = None
189 bestprio = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600190 for tid in self.buildable:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800191 taskname = taskname_from_tid(tid)
192 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
193 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600194 prio = self.rev_prio_map[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500195 if bestprio is None or bestprio > prio:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600196 stamp = self.stamps[tid]
197 if stamp in self.rq.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500198 continue
199 bestprio = prio
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600200 best = tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500201
202 return best
203
204 def next(self):
205 """
206 Return the id of the task we should build next
207 """
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800208 if self.rq.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500209 return self.next_buildable_task()
210
Brad Bishop316dfdd2018-06-25 12:45:53 -0400211 def newbuildable(self, task):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500212 self.buildable.append(task)
213
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500214 def describe_task(self, taskid):
215 result = 'ID %s' % taskid
216 if self.rev_prio_map:
217 result = result + (' pri %d' % self.rev_prio_map[taskid])
218 return result
219
220 def dump_prio(self, comment):
221 bb.debug(3, '%s (most important first):\n%s' %
222 (comment,
223 '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
224 index, taskid in enumerate(self.prio_map)])))
225
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500226class RunQueueSchedulerSpeed(RunQueueScheduler):
227 """
228 A scheduler optimised for speed. The priority map is sorted by task weight,
229 heavier weighted tasks (tasks needed by the most other tasks) are run first.
230 """
231 name = "speed"
232
233 def __init__(self, runqueue, rqdata):
234 """
235 The priority map is sorted by task weight.
236 """
237 RunQueueScheduler.__init__(self, runqueue, rqdata)
238
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600239 weights = {}
240 for tid in self.rqdata.runtaskentries:
241 weight = self.rqdata.runtaskentries[tid].weight
242 if not weight in weights:
243 weights[weight] = []
244 weights[weight].append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500245
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600246 self.prio_map = []
247 for weight in sorted(weights):
248 for w in weights[weight]:
249 self.prio_map.append(w)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500250
251 self.prio_map.reverse()
252
253class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
254 """
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500255 A scheduler optimised to complete .bb files as quickly as possible. The
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500256 priority map is sorted by task weight, but then reordered so once a given
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500257 .bb file starts to build, it's completed as quickly as possible by
258 running all tasks related to the same .bb file one after the after.
259 This works well where disk space is at a premium and classes like OE's
260 rm_work are in force.
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500261 """
262 name = "completion"
263
264 def __init__(self, runqueue, rqdata):
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500265 super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500266
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500267 # Extract list of tasks for each recipe, with tasks sorted
268 # ascending from "must run first" (typically do_fetch) to
269 # "runs last" (do_build). The speed scheduler prioritizes
270 # tasks that must run first before the ones that run later;
271 # this is what we depend on here.
272 task_lists = {}
273 for taskid in self.prio_map:
274 fn, taskname = taskid.rsplit(':', 1)
275 task_lists.setdefault(fn, []).append(taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500276
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500277 # Now unify the different task lists. The strategy is that
278 # common tasks get skipped and new ones get inserted after the
279 # preceeding common one(s) as they are found. Because task
280 # lists should differ only by their number of tasks, but not
281 # the ordering of the common tasks, this should result in a
282 # deterministic result that is a superset of the individual
283 # task ordering.
284 all_tasks = []
285 for recipe, new_tasks in task_lists.items():
286 index = 0
287 old_task = all_tasks[index] if index < len(all_tasks) else None
288 for new_task in new_tasks:
289 if old_task == new_task:
290 # Common task, skip it. This is the fast-path which
291 # avoids a full search.
292 index += 1
293 old_task = all_tasks[index] if index < len(all_tasks) else None
294 else:
295 try:
296 index = all_tasks.index(new_task)
297 # Already present, just not at the current
298 # place. We re-synchronized by changing the
299 # index so that it matches again. Now
300 # move on to the next existing task.
301 index += 1
302 old_task = all_tasks[index] if index < len(all_tasks) else None
303 except ValueError:
304 # Not present. Insert before old_task, which
305 # remains the same (but gets shifted back).
306 all_tasks.insert(index, new_task)
307 index += 1
308 bb.debug(3, 'merged task list: %s' % all_tasks)
309
310 # Now reverse the order so that tasks that finish the work on one
311 # recipe are considered more imporant (= come first). The ordering
312 # is now so that do_build is most important.
313 all_tasks.reverse()
314
315 # Group tasks of the same kind before tasks of less important
316 # kinds at the head of the queue (because earlier = lower
317 # priority number = runs earlier), while preserving the
318 # ordering by recipe. If recipe foo is more important than
319 # bar, then the goal is to work on foo's do_populate_sysroot
320 # before bar's do_populate_sysroot and on the more important
321 # tasks of foo before any of the less important tasks in any
322 # other recipe (if those other recipes are more important than
323 # foo).
324 #
325 # All of this only applies when tasks are runable. Explicit
326 # dependencies still override this ordering by priority.
327 #
328 # Here's an example why this priority re-ordering helps with
329 # minimizing disk usage. Consider a recipe foo with a higher
330 # priority than bar where foo DEPENDS on bar. Then the
331 # implicit rule (from base.bbclass) is that foo's do_configure
332 # depends on bar's do_populate_sysroot. This ensures that
333 # bar's do_populate_sysroot gets done first. Normally the
334 # tasks from foo would continue to run once that is done, and
335 # bar only gets completed and cleaned up later. By ordering
336 # bar's task that depend on bar's do_populate_sysroot before foo's
337 # do_configure, that problem gets avoided.
338 task_index = 0
339 self.dump_prio('original priorities')
340 for task in all_tasks:
341 for index in range(task_index, self.numTasks):
342 taskid = self.prio_map[index]
343 taskname = taskid.rsplit(':', 1)[1]
344 if taskname == task:
345 del self.prio_map[index]
346 self.prio_map.insert(task_index, taskid)
347 task_index += 1
348 self.dump_prio('completion priorities')
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500349
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600350class RunTaskEntry(object):
351 def __init__(self):
352 self.depends = set()
353 self.revdeps = set()
354 self.hash = None
Brad Bishop19323692019-04-05 15:28:33 -0400355 self.unihash = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600356 self.task = None
357 self.weight = 1
358
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500359class RunQueueData:
360 """
361 BitBake Run Queue implementation
362 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600363 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500364 self.cooker = cooker
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600365 self.dataCaches = dataCaches
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500366 self.taskData = taskData
367 self.targets = targets
368 self.rq = rq
369 self.warn_multi_bb = False
370
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500371 self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
372 self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600373 self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
374 self.setscenewhitelist_checked = False
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500375 self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600376 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500377
378 self.reset()
379
380 def reset(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600381 self.runtaskentries = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500382
383 def runq_depends_names(self, ids):
384 import re
385 ret = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600386 for id in ids:
387 nam = os.path.basename(id)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500388 nam = re.sub("_[^,]*,", ",", nam)
389 ret.extend([nam])
390 return ret
391
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600392 def get_task_hash(self, tid):
393 return self.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500394
Brad Bishop19323692019-04-05 15:28:33 -0400395 def get_task_unihash(self, tid):
396 return self.runtaskentries[tid].unihash
397
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600398 def get_user_idstring(self, tid, task_name_suffix = ""):
399 return tid + task_name_suffix
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500400
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500401 def get_short_user_idstring(self, task, task_name_suffix = ""):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500402 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
403 pn = self.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600404 taskname = taskname_from_tid(task) + task_name_suffix
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500405 return "%s:%s" % (pn, taskname)
406
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500407 def circular_depchains_handler(self, tasks):
408 """
409 Some tasks aren't buildable, likely due to circular dependency issues.
410 Identify the circular dependencies and print them in a user readable format.
411 """
412 from copy import deepcopy
413
414 valid_chains = []
415 explored_deps = {}
416 msgs = []
417
Andrew Geissler99467da2019-02-25 18:54:23 -0600418 class TooManyLoops(Exception):
419 pass
420
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500421 def chain_reorder(chain):
422 """
423 Reorder a dependency chain so the lowest task id is first
424 """
425 lowest = 0
426 new_chain = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600427 for entry in range(len(chain)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500428 if chain[entry] < chain[lowest]:
429 lowest = entry
430 new_chain.extend(chain[lowest:])
431 new_chain.extend(chain[:lowest])
432 return new_chain
433
434 def chain_compare_equal(chain1, chain2):
435 """
436 Compare two dependency chains and see if they're the same
437 """
438 if len(chain1) != len(chain2):
439 return False
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600440 for index in range(len(chain1)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500441 if chain1[index] != chain2[index]:
442 return False
443 return True
444
445 def chain_array_contains(chain, chain_array):
446 """
447 Return True if chain_array contains chain
448 """
449 for ch in chain_array:
450 if chain_compare_equal(ch, chain):
451 return True
452 return False
453
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600454 def find_chains(tid, prev_chain):
455 prev_chain.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500456 total_deps = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600457 total_deps.extend(self.runtaskentries[tid].revdeps)
458 for revdep in self.runtaskentries[tid].revdeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500459 if revdep in prev_chain:
460 idx = prev_chain.index(revdep)
461 # To prevent duplicates, reorder the chain to start with the lowest taskid
462 # and search through an array of those we've already printed
463 chain = prev_chain[idx:]
464 new_chain = chain_reorder(chain)
465 if not chain_array_contains(new_chain, valid_chains):
466 valid_chains.append(new_chain)
467 msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
468 for dep in new_chain:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600469 msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500470 msgs.append("\n")
471 if len(valid_chains) > 10:
472 msgs.append("Aborted dependency loops search after 10 matches.\n")
Andrew Geissler99467da2019-02-25 18:54:23 -0600473 raise TooManyLoops
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500474 continue
475 scan = False
476 if revdep not in explored_deps:
477 scan = True
478 elif revdep in explored_deps[revdep]:
479 scan = True
480 else:
481 for dep in prev_chain:
482 if dep in explored_deps[revdep]:
483 scan = True
484 if scan:
485 find_chains(revdep, copy.deepcopy(prev_chain))
486 for dep in explored_deps[revdep]:
487 if dep not in total_deps:
488 total_deps.append(dep)
489
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600490 explored_deps[tid] = total_deps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500491
Andrew Geissler99467da2019-02-25 18:54:23 -0600492 try:
493 for task in tasks:
494 find_chains(task, [])
495 except TooManyLoops:
496 pass
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500497
498 return msgs
499
500 def calculate_task_weights(self, endpoints):
501 """
502 Calculate a number representing the "weight" of each task. Heavier weighted tasks
503 have more dependencies and hence should be executed sooner for maximum speed.
504
505 This function also sanity checks the task list finding tasks that are not
506 possible to execute due to circular dependencies.
507 """
508
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600509 numTasks = len(self.runtaskentries)
510 weight = {}
511 deps_left = {}
512 task_done = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500513
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600514 for tid in self.runtaskentries:
515 task_done[tid] = False
516 weight[tid] = 1
517 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500518
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600519 for tid in endpoints:
520 weight[tid] = 10
521 task_done[tid] = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500522
523 while True:
524 next_points = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600525 for tid in endpoints:
526 for revdep in self.runtaskentries[tid].depends:
527 weight[revdep] = weight[revdep] + weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500528 deps_left[revdep] = deps_left[revdep] - 1
529 if deps_left[revdep] == 0:
530 next_points.append(revdep)
531 task_done[revdep] = True
532 endpoints = next_points
533 if len(next_points) == 0:
534 break
535
536 # Circular dependency sanity check
537 problem_tasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600538 for tid in self.runtaskentries:
539 if task_done[tid] is False or deps_left[tid] != 0:
540 problem_tasks.append(tid)
541 logger.debug(2, "Task %s is not buildable", tid)
542 logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
543 self.runtaskentries[tid].weight = weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500544
545 if problem_tasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600546 message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500547 message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
548 message = message + "Identifying dependency loops (this may take a short while)...\n"
549 logger.error(message)
550
551 msgs = self.circular_depchains_handler(problem_tasks)
552
553 message = "\n"
554 for msg in msgs:
555 message = message + msg
556 bb.msg.fatal("RunQueue", message)
557
558 return weight
559
560 def prepare(self):
561 """
562 Turn a set of taskData into a RunQueue and compute data needed
563 to optimise the execution order.
564 """
565
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600566 runq_build = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500567 recursivetasks = {}
568 recursiveitasks = {}
569 recursivetasksselfref = set()
570
571 taskData = self.taskData
572
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600573 found = False
574 for mc in self.taskData:
575 if len(taskData[mc].taskentries) > 0:
576 found = True
577 break
578 if not found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500579 # Nothing to do
580 return 0
581
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600582 self.init_progress_reporter.start()
583 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500584
585 # Step A - Work out a list of tasks to run
586 #
587 # Taskdata gives us a list of possible providers for every build and run
588 # target ordered by priority. It also gives information on each of those
589 # providers.
590 #
591 # To create the actual list of tasks to execute we fix the list of
592 # providers and then resolve the dependencies into task IDs. This
593 # process is repeated for each type of dependency (tdepends, deptask,
594 # rdeptast, recrdeptask, idepends).
595
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600596 def add_build_dependencies(depids, tasknames, depends, mc):
597 for depname in depids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500598 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600599 if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500600 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600601 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500602 if depdata is None:
603 continue
604 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600605 t = depdata + ":" + taskname
606 if t in taskData[mc].taskentries:
607 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500608
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600609 def add_runtime_dependencies(depids, tasknames, depends, mc):
610 for depname in depids:
611 if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500612 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600613 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500614 if depdata is None:
615 continue
616 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600617 t = depdata + ":" + taskname
618 if t in taskData[mc].taskentries:
619 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500620
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800621 def add_mc_dependencies(mc, tid):
622 mcdeps = taskData[mc].get_mcdepends()
623 for dep in mcdeps:
624 mcdependency = dep.split(':')
625 pn = mcdependency[3]
626 frommc = mcdependency[1]
627 mcdep = mcdependency[2]
628 deptask = mcdependency[4]
629 if mc == frommc:
630 fn = taskData[mcdep].build_targets[pn][0]
631 newdep = '%s:%s' % (fn,deptask)
632 taskData[mc].taskentries[tid].tdepends.append(newdep)
633
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600634 for mc in taskData:
635 for tid in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500636
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600637 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
638 #runtid = build_tid(mc, fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500639
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600640 #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
641
642 depends = set()
643 task_deps = self.dataCaches[mc].task_deps[taskfn]
644
645 self.runtaskentries[tid] = RunTaskEntry()
646
647 if fn in taskData[mc].failed_fns:
648 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500649
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800650 # We add multiconfig dependencies before processing internal task deps (tdepends)
651 if 'mcdepends' in task_deps and taskname in task_deps['mcdepends']:
652 add_mc_dependencies(mc, tid)
653
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500654 # Resolve task internal dependencies
655 #
656 # e.g. addtask before X after Y
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600657 for t in taskData[mc].taskentries[tid].tdepends:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800658 (depmc, depfn, deptaskname, _) = split_tid_mcfn(t)
659 depends.add(build_tid(depmc, depfn, deptaskname))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500660
661 # Resolve 'deptask' dependencies
662 #
663 # e.g. do_sometask[deptask] = "do_someothertask"
664 # (makes sure sometask runs after someothertask of all DEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600665 if 'deptask' in task_deps and taskname in task_deps['deptask']:
666 tasknames = task_deps['deptask'][taskname].split()
667 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500668
669 # Resolve 'rdeptask' dependencies
670 #
671 # e.g. do_sometask[rdeptask] = "do_someothertask"
672 # (makes sure sometask runs after someothertask of all RDEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600673 if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
674 tasknames = task_deps['rdeptask'][taskname].split()
675 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500676
677 # Resolve inter-task dependencies
678 #
679 # e.g. do_sometask[depends] = "targetname:do_someothertask"
680 # (makes sure sometask runs after targetname's someothertask)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600681 idepends = taskData[mc].taskentries[tid].idepends
682 for (depname, idependtask) in idepends:
683 if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500684 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600685 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500686 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600687 t = depdata + ":" + idependtask
688 depends.add(t)
689 if t not in taskData[mc].taskentries:
690 bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
691 irdepends = taskData[mc].taskentries[tid].irdepends
692 for (depname, idependtask) in irdepends:
693 if depname in taskData[mc].run_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500694 # Won't be in run_targets if ASSUME_PROVIDED
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500695 if not taskData[mc].run_targets[depname]:
696 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600697 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500698 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600699 t = depdata + ":" + idependtask
700 depends.add(t)
701 if t not in taskData[mc].taskentries:
702 bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500703
704 # Resolve recursive 'recrdeptask' dependencies (Part A)
705 #
706 # e.g. do_sometask[recrdeptask] = "do_someothertask"
707 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
708 # We cover the recursive part of the dependencies below
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600709 if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
710 tasknames = task_deps['recrdeptask'][taskname].split()
711 recursivetasks[tid] = tasknames
712 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
713 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
714 if taskname in tasknames:
715 recursivetasksselfref.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500716
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600717 if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
718 recursiveitasks[tid] = []
719 for t in task_deps['recideptask'][taskname].split():
720 newdep = build_tid(mc, fn, t)
721 recursiveitasks[tid].append(newdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500722
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600723 self.runtaskentries[tid].depends = depends
Brad Bishop316dfdd2018-06-25 12:45:53 -0400724 # Remove all self references
725 self.runtaskentries[tid].depends.discard(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500726
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600727 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500728
Brad Bishop316dfdd2018-06-25 12:45:53 -0400729 self.init_progress_reporter.next_stage()
730
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500731 # Resolve recursive 'recrdeptask' dependencies (Part B)
732 #
733 # e.g. do_sometask[recrdeptask] = "do_someothertask"
734 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600735 # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600736
Brad Bishop316dfdd2018-06-25 12:45:53 -0400737 # Generating/interating recursive lists of dependencies is painful and potentially slow
738 # Precompute recursive task dependencies here by:
739 # a) create a temp list of reverse dependencies (revdeps)
740 # b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0)
741 # c) combine the total list of dependencies in cumulativedeps
742 # d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500743
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500744
Brad Bishop316dfdd2018-06-25 12:45:53 -0400745 revdeps = {}
746 deps = {}
747 cumulativedeps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600748 for tid in self.runtaskentries:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400749 deps[tid] = set(self.runtaskentries[tid].depends)
750 revdeps[tid] = set()
751 cumulativedeps[tid] = set()
752 # Generate a temp list of reverse dependencies
753 for tid in self.runtaskentries:
754 for dep in self.runtaskentries[tid].depends:
755 revdeps[dep].add(tid)
756 # Find the dependency chain endpoints
757 endpoints = set()
758 for tid in self.runtaskentries:
759 if len(deps[tid]) == 0:
760 endpoints.add(tid)
761 # Iterate the chains collating dependencies
762 while endpoints:
763 next = set()
764 for tid in endpoints:
765 for dep in revdeps[tid]:
766 cumulativedeps[dep].add(fn_from_tid(tid))
767 cumulativedeps[dep].update(cumulativedeps[tid])
768 if tid in deps[dep]:
769 deps[dep].remove(tid)
770 if len(deps[dep]) == 0:
771 next.add(dep)
772 endpoints = next
773 #for tid in deps:
774 # if len(deps[tid]) != 0:
775 # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
776
777 # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
778 # resolve these recursively until we aren't adding any further extra dependencies
779 extradeps = True
780 while extradeps:
781 extradeps = 0
782 for tid in recursivetasks:
783 tasknames = recursivetasks[tid]
784
785 totaldeps = set(self.runtaskentries[tid].depends)
786 if tid in recursiveitasks:
787 totaldeps.update(recursiveitasks[tid])
788 for dep in recursiveitasks[tid]:
789 if dep not in self.runtaskentries:
790 continue
791 totaldeps.update(self.runtaskentries[dep].depends)
792
793 deps = set()
794 for dep in totaldeps:
795 if dep in cumulativedeps:
796 deps.update(cumulativedeps[dep])
797
798 for t in deps:
799 for taskname in tasknames:
800 newtid = t + ":" + taskname
801 if newtid == tid:
802 continue
803 if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
804 extradeps += 1
805 self.runtaskentries[tid].depends.add(newtid)
806
807 # Handle recursive tasks which depend upon other recursive tasks
808 deps = set()
809 for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
810 deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
811 for newtid in deps:
812 for taskname in tasknames:
813 if not newtid.endswith(":" + taskname):
814 continue
815 if newtid in self.runtaskentries:
816 extradeps += 1
817 self.runtaskentries[tid].depends.add(newtid)
818
819 bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps)
820
821 # Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work
822 for tid in recursivetasksselfref:
823 self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600824
825 self.init_progress_reporter.next_stage()
826
827 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500828
829 # Step B - Mark all active tasks
830 #
831 # Start with the tasks we were asked to run and mark all dependencies
832 # as active too. If the task is to be 'forced', clear its stamp. Once
833 # all active tasks are marked, prune the ones we don't need.
834
835 logger.verbose("Marking Active Tasks")
836
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600837 def mark_active(tid, depth):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500838 """
839 Mark an item as active along with its depends
840 (calls itself recursively)
841 """
842
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600843 if tid in runq_build:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500844 return
845
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600846 runq_build[tid] = 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500847
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600848 depends = self.runtaskentries[tid].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500849 for depend in depends:
850 mark_active(depend, depth+1)
851
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600852 self.target_tids = []
853 for (mc, target, task, fn) in self.targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500854
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600855 if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500856 continue
857
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600858 if target in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500859 continue
860
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500861 parents = False
862 if task.endswith('-'):
863 parents = True
864 task = task[:-1]
865
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600866 if fn in taskData[mc].failed_fns:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500867 continue
868
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600869 # fn already has mc prefix
870 tid = fn + ":" + task
871 self.target_tids.append(tid)
872 if tid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500873 import difflib
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600874 tasks = []
875 for x in taskData[mc].taskentries:
876 if x.startswith(fn + ":"):
877 tasks.append(taskname_from_tid(x))
878 close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500879 if close_matches:
880 extra = ". Close matches:\n %s" % "\n ".join(close_matches)
881 else:
882 extra = ""
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600883 bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
884
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500885 # For tasks called "XXXX-", ony run their dependencies
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500886 if parents:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600887 for i in self.runtaskentries[tid].depends:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500888 mark_active(i, 1)
889 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600890 mark_active(tid, 1)
891
892 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500893
894 # Step C - Prune all inactive tasks
895 #
896 # Once all active tasks are marked, prune the ones we don't need.
897
Brad Bishop316dfdd2018-06-25 12:45:53 -0400898 delcount = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600899 for tid in list(self.runtaskentries.keys()):
900 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400901 delcount[tid] = self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600902 del self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600903
Brad Bishop316dfdd2018-06-25 12:45:53 -0400904 # Handle --runall
905 if self.cooker.configuration.runall:
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500906 # re-run the mark_active and then drop unused tasks from new list
907 runq_build = {}
Brad Bishop316dfdd2018-06-25 12:45:53 -0400908
909 for task in self.cooker.configuration.runall:
910 runall_tids = set()
911 for tid in list(self.runtaskentries):
912 wanttid = fn_from_tid(tid) + ":do_%s" % task
913 if wanttid in delcount:
914 self.runtaskentries[wanttid] = delcount[wanttid]
915 if wanttid in self.runtaskentries:
916 runall_tids.add(wanttid)
917
918 for tid in list(runall_tids):
919 mark_active(tid,1)
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500920
921 for tid in list(self.runtaskentries.keys()):
922 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400923 delcount[tid] = self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500924 del self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500925
926 if len(self.runtaskentries) == 0:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400927 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
928
929 self.init_progress_reporter.next_stage()
930
931 # Handle runonly
932 if self.cooker.configuration.runonly:
933 # re-run the mark_active and then drop unused tasks from new list
934 runq_build = {}
935
936 for task in self.cooker.configuration.runonly:
937 runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == "do_%s" % task }
938
939 for tid in list(runonly_tids):
940 mark_active(tid,1)
941
942 for tid in list(self.runtaskentries.keys()):
943 if tid not in runq_build:
944 delcount[tid] = self.runtaskentries[tid]
945 del self.runtaskentries[tid]
946
947 if len(self.runtaskentries) == 0:
948 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500949
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500950 #
951 # Step D - Sanity checks and computation
952 #
953
954 # Check to make sure we still have tasks to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600955 if len(self.runtaskentries) == 0:
956 if not taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500957 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
958 else:
959 bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
960
Brad Bishop316dfdd2018-06-25 12:45:53 -0400961 logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500962
963 logger.verbose("Assign Weightings")
964
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600965 self.init_progress_reporter.next_stage()
966
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500967 # Generate a list of reverse dependencies to ease future calculations
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600968 for tid in self.runtaskentries:
969 for dep in self.runtaskentries[tid].depends:
970 self.runtaskentries[dep].revdeps.add(tid)
971
972 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500973
974 # Identify tasks at the end of dependency chains
975 # Error on circular dependency loops (length two)
976 endpoints = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600977 for tid in self.runtaskentries:
978 revdeps = self.runtaskentries[tid].revdeps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500979 if len(revdeps) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600980 endpoints.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500981 for dep in revdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600982 if dep in self.runtaskentries[tid].depends:
983 bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
984
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500985
986 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
987
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600988 self.init_progress_reporter.next_stage()
989
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500990 # Calculate task weights
991 # Check of higher length circular dependencies
992 self.runq_weight = self.calculate_task_weights(endpoints)
993
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600994 self.init_progress_reporter.next_stage()
995
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500996 # Sanity Check - Check for multiple tasks building the same provider
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600997 for mc in self.dataCaches:
998 prov_list = {}
999 seen_fn = []
1000 for tid in self.runtaskentries:
1001 (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1002 if taskfn in seen_fn:
1003 continue
1004 if mc != tidmc:
1005 continue
1006 seen_fn.append(taskfn)
1007 for prov in self.dataCaches[mc].fn_provides[taskfn]:
1008 if prov not in prov_list:
1009 prov_list[prov] = [taskfn]
1010 elif taskfn not in prov_list[prov]:
1011 prov_list[prov].append(taskfn)
1012 for prov in prov_list:
1013 if len(prov_list[prov]) < 2:
1014 continue
1015 if prov in self.multi_provider_whitelist:
1016 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001017 seen_pn = []
1018 # If two versions of the same PN are being built its fatal, we don't support it.
1019 for fn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001020 pn = self.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001021 if pn not in seen_pn:
1022 seen_pn.append(pn)
1023 else:
1024 bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001025 msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
1026 #
1027 # Construct a list of things which uniquely depend on each provider
1028 # since this may help the user figure out which dependency is triggering this warning
1029 #
1030 msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
1031 deplist = {}
1032 commondeps = None
1033 for provfn in prov_list[prov]:
1034 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001035 for tid in self.runtaskentries:
1036 fn = fn_from_tid(tid)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001037 if fn != provfn:
1038 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001039 for dep in self.runtaskentries[tid].revdeps:
1040 fn = fn_from_tid(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001041 if fn == provfn:
1042 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001043 deps.add(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001044 if not commondeps:
1045 commondeps = set(deps)
1046 else:
1047 commondeps &= deps
1048 deplist[provfn] = deps
1049 for provfn in deplist:
1050 msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
1051 #
1052 # Construct a list of provides and runtime providers for each recipe
1053 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
1054 #
1055 msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
1056 provide_results = {}
1057 rprovide_results = {}
1058 commonprovs = None
1059 commonrprovs = None
1060 for provfn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001061 provides = set(self.dataCaches[mc].fn_provides[provfn])
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001062 rprovides = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001063 for rprovide in self.dataCaches[mc].rproviders:
1064 if provfn in self.dataCaches[mc].rproviders[rprovide]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001065 rprovides.add(rprovide)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001066 for package in self.dataCaches[mc].packages:
1067 if provfn in self.dataCaches[mc].packages[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001068 rprovides.add(package)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001069 for package in self.dataCaches[mc].packages_dynamic:
1070 if provfn in self.dataCaches[mc].packages_dynamic[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001071 rprovides.add(package)
1072 if not commonprovs:
1073 commonprovs = set(provides)
1074 else:
1075 commonprovs &= provides
1076 provide_results[provfn] = provides
1077 if not commonrprovs:
1078 commonrprovs = set(rprovides)
1079 else:
1080 commonrprovs &= rprovides
1081 rprovide_results[provfn] = rprovides
1082 #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
1083 #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
1084 for provfn in prov_list[prov]:
1085 msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
1086 msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
1087
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001088 if self.warn_multi_bb:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001089 logger.verbnote(msg)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001090 else:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001091 logger.error(msg)
1092
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001093 self.init_progress_reporter.next_stage()
1094
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001095 # Create a whitelist usable by the stamp checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001096 self.stampfnwhitelist = {}
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001097 for mc in self.taskData:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001098 self.stampfnwhitelist[mc] = []
1099 for entry in self.stampwhitelist.split():
1100 if entry not in self.taskData[mc].build_targets:
1101 continue
1102 fn = self.taskData.build_targets[entry][0]
1103 self.stampfnwhitelist[mc].append(fn)
1104
1105 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001106
1107 # Iterate over the task list looking for tasks with a 'setscene' function
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001108 self.runq_setscene_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001109 if not self.cooker.configuration.nosetscene:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001110 for tid in self.runtaskentries:
1111 (mc, fn, taskname, _) = split_tid_mcfn(tid)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001112 setscenetid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001113 if setscenetid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001114 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001115 self.runq_setscene_tids.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001116
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001117 def invalidate_task(tid, error_nostamp):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001118 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1119 taskdep = self.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001120 if fn + ":" + taskname not in taskData[mc].taskentries:
1121 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001122 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1123 if error_nostamp:
1124 bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
1125 else:
1126 bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
1127 else:
1128 logger.verbose("Invalidate task %s, %s", taskname, fn)
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001129 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001130
1131 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001132
1133 # Invalidate task if force mode active
1134 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001135 for tid in self.target_tids:
1136 invalidate_task(tid, False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001137
1138 # Invalidate task if invalidate mode active
1139 if self.cooker.configuration.invalidate_stamp:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001140 for tid in self.target_tids:
1141 fn = fn_from_tid(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001142 for st in self.cooker.configuration.invalidate_stamp.split(','):
1143 if not st.startswith("do_"):
1144 st = "do_%s" % st
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001145 invalidate_task(fn + ":" + st, True)
1146
1147 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001148
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001149 # Create and print to the logs a virtual/xxxx -> PN (fn) table
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001150 for mc in taskData:
1151 virtmap = taskData[mc].get_providermap(prefix="virtual/")
1152 virtpnmap = {}
1153 for v in virtmap:
1154 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
1155 bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
1156 if hasattr(bb.parse.siggen, "tasks_resolved"):
1157 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1158
1159 self.init_progress_reporter.next_stage()
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001160
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001161 # Iterate over the task list and call into the siggen code
1162 dealtwith = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001163 todeal = set(self.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001164 while len(todeal) > 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001165 for tid in todeal.copy():
1166 if len(self.runtaskentries[tid].depends - dealtwith) == 0:
1167 dealtwith.add(tid)
1168 todeal.remove(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001169 self.prepare_task_hash(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001170
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001171 bb.parse.siggen.writeout_file_checksum_cache()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001172
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001173 #self.dump_data()
1174 return len(self.runtaskentries)
1175
Brad Bishop19323692019-04-05 15:28:33 -04001176 def prepare_task_hash(self, tid):
1177 procdep = []
1178 for dep in self.runtaskentries[tid].depends:
1179 procdep.append(fn_from_tid(dep) + "." + taskname_from_tid(dep))
1180 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1181 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(taskfn, taskname, procdep, self.dataCaches[mc])
1182 self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(taskfn + "." + taskname)
1183
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001184 def dump_data(self):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001185 """
1186 Dump some debug information on the internal data structures
1187 """
1188 logger.debug(3, "run_tasks:")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001189 for tid in self.runtaskentries:
1190 logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
1191 self.runtaskentries[tid].weight,
1192 self.runtaskentries[tid].depends,
1193 self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001194
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001195class RunQueueWorker():
1196 def __init__(self, process, pipe):
1197 self.process = process
1198 self.pipe = pipe
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001199
1200class RunQueue:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001201 def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001202
1203 self.cooker = cooker
1204 self.cfgData = cfgData
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001205 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001206
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001207 self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
1208 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
1209 self.setsceneverify = cfgData.getVar("BB_SETSCENE_VERIFY_FUNCTION2") or None
1210 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001211
1212 self.state = runQueuePrepare
1213
1214 # For disk space monitor
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001215 # Invoked at regular time intervals via the bitbake heartbeat event
1216 # while the build is running. We generate a unique name for the handler
1217 # here, just in case that there ever is more than one RunQueue instance,
1218 # start the handler when reaching runQueueSceneRun, and stop it when
1219 # done with the build.
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001220 self.dm = monitordisk.diskMonitor(cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001221 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
1222 self.dm_event_handler_registered = False
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001223 self.rqexe = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001224 self.worker = {}
1225 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001226
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001227 def _start_worker(self, mc, fakeroot = False, rqexec = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001228 logger.debug(1, "Starting bitbake-worker")
1229 magic = "decafbad"
1230 if self.cooker.configuration.profile:
1231 magic = "decafbadbad"
1232 if fakeroot:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001233 magic = magic + "beef"
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001234 mcdata = self.cooker.databuilder.mcdata[mc]
Brad Bishop19323692019-04-05 15:28:33 -04001235 fakerootcmd = shlex.split(mcdata.getVar("FAKEROOTCMD"))
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001236 fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001237 env = os.environ.copy()
1238 for key, value in (var.split('=') for var in fakerootenv):
1239 env[key] = value
Brad Bishop19323692019-04-05 15:28:33 -04001240 worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001241 else:
1242 worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
1243 bb.utils.nonblockingfd(worker.stdout)
1244 workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
1245
1246 workerdata = {
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001247 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1248 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1249 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1250 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001251 "sigdata" : bb.parse.siggen.get_taskdata(),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001252 "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
1253 "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
1254 "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
1255 "logdefaultdomain" : bb.msg.loggerDefaultDomains,
1256 "prhost" : self.cooker.prhost,
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001257 "buildname" : self.cfgData.getVar("BUILDNAME"),
1258 "date" : self.cfgData.getVar("DATE"),
1259 "time" : self.cfgData.getVar("TIME"),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001260 }
1261
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001262 worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001263 worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001264 worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001265 worker.stdin.flush()
1266
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001267 return RunQueueWorker(worker, workerpipe)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001268
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001269 def _teardown_worker(self, worker):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001270 if not worker:
1271 return
1272 logger.debug(1, "Teardown for bitbake-worker")
1273 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001274 worker.process.stdin.write(b"<quit></quit>")
1275 worker.process.stdin.flush()
1276 worker.process.stdin.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001277 except IOError:
1278 pass
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001279 while worker.process.returncode is None:
1280 worker.pipe.read()
1281 worker.process.poll()
1282 while worker.pipe.read():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001283 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001284 worker.pipe.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001285
1286 def start_worker(self):
1287 if self.worker:
1288 self.teardown_workers()
1289 self.teardown = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001290 for mc in self.rqdata.dataCaches:
1291 self.worker[mc] = self._start_worker(mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001292
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001293 def start_fakeworker(self, rqexec, mc):
1294 if not mc in self.fakeworker:
1295 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001296
1297 def teardown_workers(self):
1298 self.teardown = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001299 for mc in self.worker:
1300 self._teardown_worker(self.worker[mc])
1301 self.worker = {}
1302 for mc in self.fakeworker:
1303 self._teardown_worker(self.fakeworker[mc])
1304 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001305
1306 def read_workers(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001307 for mc in self.worker:
1308 self.worker[mc].pipe.read()
1309 for mc in self.fakeworker:
1310 self.fakeworker[mc].pipe.read()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001311
1312 def active_fds(self):
1313 fds = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001314 for mc in self.worker:
1315 fds.append(self.worker[mc].pipe.input)
1316 for mc in self.fakeworker:
1317 fds.append(self.fakeworker[mc].pipe.input)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001318 return fds
1319
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001320 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001321 def get_timestamp(f):
1322 try:
1323 if not os.access(f, os.F_OK):
1324 return None
1325 return os.stat(f)[stat.ST_MTIME]
1326 except:
1327 return None
1328
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001329 (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
1330 if taskname is None:
1331 taskname = tn
1332
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001333 if self.stamppolicy == "perfile":
1334 fulldeptree = False
1335 else:
1336 fulldeptree = True
1337 stampwhitelist = []
1338 if self.stamppolicy == "whitelist":
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001339 stampwhitelist = self.rqdata.stampfnwhitelist[mc]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001340
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001341 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001342
1343 # If the stamp is missing, it's not current
1344 if not os.access(stampfile, os.F_OK):
1345 logger.debug(2, "Stampfile %s not available", stampfile)
1346 return False
1347 # If it's a 'nostamp' task, it's not current
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001348 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001349 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1350 logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
1351 return False
1352
1353 if taskname != "do_setscene" and taskname.endswith("_setscene"):
1354 return True
1355
1356 if cache is None:
1357 cache = {}
1358
1359 iscurrent = True
1360 t1 = get_timestamp(stampfile)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001361 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001362 if iscurrent:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001363 (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
1364 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1365 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001366 t2 = get_timestamp(stampfile2)
1367 t3 = get_timestamp(stampfile3)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001368 if t3 and not t2:
1369 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001370 if t3 and t3 > t2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001371 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001372 if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
1373 if not t2:
1374 logger.debug(2, 'Stampfile %s does not exist', stampfile2)
1375 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001376 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001377 if t1 < t2:
1378 logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
1379 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001380 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001381 if recurse and iscurrent:
1382 if dep in cache:
1383 iscurrent = cache[dep]
1384 if not iscurrent:
1385 logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
1386 else:
1387 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1388 cache[dep] = iscurrent
1389 if recurse:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001390 cache[tid] = iscurrent
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001391 return iscurrent
1392
Brad Bishop19323692019-04-05 15:28:33 -04001393 def validate_hash(self, *, sq_fn, sq_task, sq_hash, sq_hashfn, siginfo, sq_unihash, d):
1394 locs = {"sq_fn" : sq_fn, "sq_task" : sq_task, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn,
1395 "sq_unihash" : sq_unihash, "siginfo" : siginfo, "d" : d}
1396
1397 hashvalidate_args = ("(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=siginfo, sq_unihash=sq_unihash)",
1398 "(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=siginfo)",
1399 "(sq_fn, sq_task, sq_hash, sq_hashfn, d)")
1400
1401 for args in hashvalidate_args[:-1]:
1402 try:
1403 call = self.hashvalidate + args
1404 return bb.utils.better_eval(call, locs)
1405 except TypeError:
1406 continue
1407
1408 # Call the last entry without a try...catch to propagate any thrown
1409 # TypeError
1410 call = self.hashvalidate + hashvalidate_args[-1]
1411 return bb.utils.better_eval(call, locs)
1412
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001413 def _execute_runqueue(self):
1414 """
1415 Run the tasks in a queue prepared by rqdata.prepare()
1416 Upon failure, optionally try to recover the build using any alternate providers
1417 (if the abort on failure configuration option isn't set)
1418 """
1419
1420 retval = True
1421
1422 if self.state is runQueuePrepare:
1423 self.rqexe = RunQueueExecuteDummy(self)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001424 # NOTE: if you add, remove or significantly refactor the stages of this
1425 # process then you should recalculate the weightings here. This is quite
1426 # easy to do - just change the next line temporarily to pass debug=True as
1427 # the last parameter and you'll get a printout of the weightings as well
1428 # as a map to the lines where next_stage() was called. Of course this isn't
1429 # critical, but it helps to keep the progress reporting accurate.
1430 self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
1431 "Initialising tasks",
1432 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001433 if self.rqdata.prepare() == 0:
1434 self.state = runQueueComplete
1435 else:
1436 self.state = runQueueSceneInit
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001437 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001438
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001439 # we are ready to run, emit dependency info to any UI or class which
1440 # needs it
1441 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1442 self.rqdata.init_progress_reporter.next_stage()
1443 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001444
1445 if self.state is runQueueSceneInit:
Brad Bishope2d5b612018-11-23 10:55:50 +13001446 if not self.dm_event_handler_registered:
1447 res = bb.event.register(self.dm_event_handler_name,
1448 lambda x: self.dm.check(self) if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp] else False,
1449 ('bb.event.HeartbeatEvent',))
1450 self.dm_event_handler_registered = True
1451
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001452 dump = self.cooker.configuration.dump_signatures
1453 if dump:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001454 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001455 if 'printdiff' in dump:
1456 invalidtasks = self.print_diffscenetasks()
1457 self.dump_signatures(dump)
1458 if 'printdiff' in dump:
1459 self.write_diffscenetasks(invalidtasks)
1460 self.state = runQueueComplete
1461 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001462 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001463 self.start_worker()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001464 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001465 self.rqexe = RunQueueExecuteScenequeue(self)
1466
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001467 if self.state is runQueueSceneRun:
1468 retval = self.rqexe.execute()
1469
1470 if self.state is runQueueRunInit:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001471 if self.cooker.configuration.setsceneonly:
1472 self.state = runQueueComplete
1473 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001474 # Just in case we didn't setscene
1475 self.rqdata.init_progress_reporter.finish()
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001476 logger.info("Executing RunQueue Tasks")
1477 self.rqexe = RunQueueExecuteTasks(self)
1478 self.state = runQueueRunning
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001479
1480 if self.state is runQueueRunning:
1481 retval = self.rqexe.execute()
1482
1483 if self.state is runQueueCleanUp:
1484 retval = self.rqexe.finish()
1485
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001486 build_done = self.state is runQueueComplete or self.state is runQueueFailed
1487
1488 if build_done and self.dm_event_handler_registered:
1489 bb.event.remove(self.dm_event_handler_name, None)
1490 self.dm_event_handler_registered = False
1491
1492 if build_done and self.rqexe:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001493 self.teardown_workers()
1494 if self.rqexe.stats.failed:
1495 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
1496 else:
1497 # Let's avoid the word "failed" if nothing actually did
1498 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
1499
1500 if self.state is runQueueFailed:
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001501 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001502
1503 if self.state is runQueueComplete:
1504 # All done
1505 return False
1506
1507 # Loop
1508 return retval
1509
1510 def execute_runqueue(self):
1511 # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
1512 try:
1513 return self._execute_runqueue()
1514 except bb.runqueue.TaskFailure:
1515 raise
1516 except SystemExit:
1517 raise
1518 except bb.BBHandledException:
1519 try:
1520 self.teardown_workers()
1521 except:
1522 pass
1523 self.state = runQueueComplete
1524 raise
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001525 except Exception as err:
1526 logger.exception("An uncaught exception occurred in runqueue")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001527 try:
1528 self.teardown_workers()
1529 except:
1530 pass
1531 self.state = runQueueComplete
1532 raise
1533
1534 def finish_runqueue(self, now = False):
1535 if not self.rqexe:
1536 self.state = runQueueComplete
1537 return
1538
1539 if now:
1540 self.rqexe.finish_now()
1541 else:
1542 self.rqexe.finish()
1543
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001544 def rq_dump_sigfn(self, fn, options):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001545 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001546 the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
1547 siggen = bb.parse.siggen
1548 dataCaches = self.rqdata.dataCaches
1549 siggen.dump_sigfn(fn, dataCaches, options)
1550
1551 def dump_signatures(self, options):
1552 fns = set()
1553 bb.note("Reparsing files to collect dependency data")
1554
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001555 for tid in self.rqdata.runtaskentries:
1556 fn = fn_from_tid(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001557 fns.add(fn)
1558
1559 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1560 # We cannot use the real multiprocessing.Pool easily due to some local data
1561 # that can't be pickled. This is a cheap multi-process solution.
1562 launched = []
1563 while fns:
1564 if len(launched) < max_process:
1565 p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
1566 p.start()
1567 launched.append(p)
1568 for q in launched:
1569 # The finished processes are joined when calling is_alive()
1570 if not q.is_alive():
1571 launched.remove(q)
1572 for p in launched:
1573 p.join()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001574
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001575 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001576
1577 return
1578
1579 def print_diffscenetasks(self):
1580
1581 valid = []
1582 sq_hash = []
1583 sq_hashfn = []
Brad Bishop19323692019-04-05 15:28:33 -04001584 sq_unihash = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001585 sq_fn = []
1586 sq_taskname = []
1587 sq_task = []
1588 noexec = []
1589 stamppresent = []
1590 valid_new = set()
1591
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001592 for tid in self.rqdata.runtaskentries:
1593 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1594 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001595
1596 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001597 noexec.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001598 continue
1599
1600 sq_fn.append(fn)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001601 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001602 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
Brad Bishop19323692019-04-05 15:28:33 -04001603 sq_unihash.append(self.rqdata.runtaskentries[tid].unihash)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001604 sq_taskname.append(taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001605 sq_task.append(tid)
Brad Bishop19323692019-04-05 15:28:33 -04001606
1607 valid = self.validate_hash(sq_fn=sq_fn, sq_task=sq_taskname, sq_hash=sq_hash, sq_hashfn=sq_hashfn,
1608 siginfo=True, sq_unihash=sq_unihash, d=self.cooker.data)
1609
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001610 for v in valid:
1611 valid_new.add(sq_task[v])
1612
1613 # Tasks which are both setscene and noexec never care about dependencies
1614 # We therefore find tasks which are setscene and noexec and mark their
1615 # unique dependencies as valid.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001616 for tid in noexec:
1617 if tid not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001618 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001619 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001620 hasnoexecparents = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001621 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1622 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001623 continue
1624 hasnoexecparents = False
1625 break
1626 if hasnoexecparents:
1627 valid_new.add(dep)
1628
1629 invalidtasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001630 for tid in self.rqdata.runtaskentries:
1631 if tid not in valid_new and tid not in noexec:
1632 invalidtasks.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001633
1634 found = set()
1635 processed = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001636 for tid in invalidtasks:
1637 toprocess = set([tid])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001638 while toprocess:
1639 next = set()
1640 for t in toprocess:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001641 for dep in self.rqdata.runtaskentries[t].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001642 if dep in invalidtasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001643 found.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001644 if dep not in processed:
1645 processed.add(dep)
1646 next.add(dep)
1647 toprocess = next
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001648 if tid in found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001649 toprocess = set()
1650
1651 tasklist = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001652 for tid in invalidtasks.difference(found):
1653 tasklist.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001654
1655 if tasklist:
1656 bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
1657
1658 return invalidtasks.difference(found)
1659
1660 def write_diffscenetasks(self, invalidtasks):
1661
1662 # Define recursion callback
1663 def recursecb(key, hash1, hash2):
1664 hashes = [hash1, hash2]
1665 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1666
1667 recout = []
1668 if len(hashfiles) == 2:
1669 out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
1670 recout.extend(list(' ' + l for l in out2))
1671 else:
1672 recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
1673
1674 return recout
1675
1676
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001677 for tid in invalidtasks:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001678 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1679 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001680 h = self.rqdata.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001681 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
1682 match = None
1683 for m in matches:
1684 if h in m:
1685 match = m
1686 if match is None:
1687 bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001688 matches = {k : v for k, v in iter(matches.items()) if h not in k}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001689 if matches:
1690 latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
Brad Bishop19323692019-04-05 15:28:33 -04001691 prevh = __find_sha256__.search(latestmatch).group(0)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001692 output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
1693 bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
1694
1695class RunQueueExecute:
1696
1697 def __init__(self, rq):
1698 self.rq = rq
1699 self.cooker = rq.cooker
1700 self.cfgData = rq.cfgData
1701 self.rqdata = rq.rqdata
1702
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001703 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
1704 self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001705
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001706 self.runq_buildable = set()
1707 self.runq_running = set()
1708 self.runq_complete = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001709
1710 self.build_stamps = {}
1711 self.build_stamps2 = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001712 self.failed_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001713
1714 self.stampcache = {}
1715
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001716 for mc in rq.worker:
1717 rq.worker[mc].pipe.setrunqueueexec(self)
1718 for mc in rq.fakeworker:
1719 rq.fakeworker[mc].pipe.setrunqueueexec(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001720
1721 if self.number_tasks <= 0:
1722 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1723
1724 def runqueue_process_waitpid(self, task, status):
1725
1726 # self.build_stamps[pid] may not exist when use shared work directory.
1727 if task in self.build_stamps:
1728 self.build_stamps2.remove(self.build_stamps[task])
1729 del self.build_stamps[task]
1730
1731 if status != 0:
1732 self.task_fail(task, status)
1733 else:
1734 self.task_complete(task)
1735 return True
1736
1737 def finish_now(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001738 for mc in self.rq.worker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001739 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001740 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1741 self.rq.worker[mc].process.stdin.flush()
1742 except IOError:
1743 # worker must have died?
1744 pass
1745 for mc in self.rq.fakeworker:
1746 try:
1747 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1748 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001749 except IOError:
1750 # worker must have died?
1751 pass
1752
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001753 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001754 self.rq.state = runQueueFailed
1755 return
1756
1757 self.rq.state = runQueueComplete
1758 return
1759
1760 def finish(self):
1761 self.rq.state = runQueueCleanUp
1762
1763 if self.stats.active > 0:
1764 bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData)
1765 self.rq.read_workers()
1766 return self.rq.active_fds()
1767
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001768 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001769 self.rq.state = runQueueFailed
1770 return True
1771
1772 self.rq.state = runQueueComplete
1773 return True
1774
1775 def check_dependencies(self, task, taskdeps, setscene = False):
1776 if not self.rq.depvalidate:
1777 return False
1778
1779 taskdata = {}
1780 taskdeps.add(task)
1781 for dep in taskdeps:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001782 (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
1783 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001784 taskdata[dep] = [pn, taskname, fn]
1785 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001786 locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001787 valid = bb.utils.better_eval(call, locs)
1788 return valid
1789
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001790 def can_start_task(self):
1791 can_start = self.stats.active < self.number_tasks
1792 return can_start
1793
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001794class RunQueueExecuteDummy(RunQueueExecute):
1795 def __init__(self, rq):
1796 self.rq = rq
1797 self.stats = RunQueueStats(0)
1798
1799 def finish(self):
1800 self.rq.state = runQueueComplete
1801 return
1802
1803class RunQueueExecuteTasks(RunQueueExecute):
1804 def __init__(self, rq):
1805 RunQueueExecute.__init__(self, rq)
1806
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001807 self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001808
1809 self.stampcache = {}
1810
1811 initial_covered = self.rq.scenequeue_covered.copy()
1812
1813 # Mark initial buildable tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001814 for tid in self.rqdata.runtaskentries:
1815 if len(self.rqdata.runtaskentries[tid].depends) == 0:
1816 self.runq_buildable.add(tid)
1817 if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
1818 self.rq.scenequeue_covered.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001819
1820 found = True
1821 while found:
1822 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001823 for tid in self.rqdata.runtaskentries:
1824 if tid in self.rq.scenequeue_covered:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001825 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001826 logger.debug(1, 'Considering %s: %s' % (tid, str(self.rqdata.runtaskentries[tid].revdeps)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001827
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001828 if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
1829 if tid in self.rq.scenequeue_notcovered:
1830 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001831 found = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001832 self.rq.scenequeue_covered.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001833
1834 logger.debug(1, 'Skip list (pre setsceneverify) %s', sorted(self.rq.scenequeue_covered))
1835
1836 # Allow the metadata to elect for setscene tasks to run anyway
1837 covered_remove = set()
1838 if self.rq.setsceneverify:
1839 invalidtasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001840 tasknames = {}
1841 fns = {}
1842 for tid in self.rqdata.runtaskentries:
1843 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1844 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
1845 fns[tid] = taskfn
1846 tasknames[tid] = taskname
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001847 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1848 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001849 if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
1850 logger.debug(2, 'Setscene stamp current for task %s', tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001851 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001852 if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
1853 logger.debug(2, 'Normal stamp current for task %s', tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001854 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001855 invalidtasks.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001856
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001857 call = self.rq.setsceneverify + "(covered, tasknames, fns, d, invalidtasks=invalidtasks)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001858 locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : tasknames, "fns" : fns, "d" : self.cooker.data, "invalidtasks" : invalidtasks }
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001859 covered_remove = bb.utils.better_eval(call, locs)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001860
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001861 def removecoveredtask(tid):
1862 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1863 taskname = taskname + '_setscene'
1864 bb.build.del_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
1865 self.rq.scenequeue_covered.remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001866
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001867 toremove = covered_remove | self.rq.scenequeue_notcovered
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001868 for task in toremove:
1869 logger.debug(1, 'Not skipping task %s due to setsceneverify', task)
1870 while toremove:
1871 covered_remove = []
1872 for task in toremove:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001873 if task in self.rq.scenequeue_covered:
1874 removecoveredtask(task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001875 for deptask in self.rqdata.runtaskentries[task].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001876 if deptask not in self.rq.scenequeue_covered:
1877 continue
1878 if deptask in toremove or deptask in covered_remove or deptask in initial_covered:
1879 continue
1880 logger.debug(1, 'Task %s depends on task %s so not skipping' % (task, deptask))
1881 covered_remove.append(deptask)
1882 toremove = covered_remove
1883
1884 logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered)
1885
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001886
1887 for mc in self.rqdata.dataCaches:
1888 target_pairs = []
1889 for tid in self.rqdata.target_tids:
1890 (tidmc, fn, taskname, _) = split_tid_mcfn(tid)
1891 if tidmc == mc:
1892 target_pairs.append((fn, taskname))
1893
1894 event.fire(bb.event.StampUpdate(target_pairs, self.rqdata.dataCaches[mc].stamp), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001895
1896 schedulers = self.get_schedulers()
1897 for scheduler in schedulers:
1898 if self.scheduler == scheduler.name:
1899 self.sched = scheduler(self, self.rqdata)
1900 logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
1901 break
1902 else:
1903 bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
1904 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1905
1906 def get_schedulers(self):
1907 schedulers = set(obj for obj in globals().values()
1908 if type(obj) is type and
1909 issubclass(obj, RunQueueScheduler))
1910
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001911 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001912 if user_schedulers:
1913 for sched in user_schedulers.split():
1914 if not "." in sched:
1915 bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
1916 continue
1917
1918 modname, name = sched.rsplit(".", 1)
1919 try:
1920 module = __import__(modname, fromlist=(name,))
1921 except ImportError as exc:
1922 logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
1923 raise SystemExit(1)
1924 else:
1925 schedulers.add(getattr(module, name))
1926 return schedulers
1927
1928 def setbuildable(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001929 self.runq_buildable.add(task)
Brad Bishop316dfdd2018-06-25 12:45:53 -04001930 self.sched.newbuildable(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001931
1932 def task_completeoutright(self, task):
1933 """
1934 Mark a task as completed
1935 Look at the reverse dependencies and mark any task with
1936 completed dependencies as buildable
1937 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001938 self.runq_complete.add(task)
1939 for revdep in self.rqdata.runtaskentries[task].revdeps:
1940 if revdep in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001941 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001942 if revdep in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001943 continue
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001944 alldeps = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001945 for dep in self.rqdata.runtaskentries[revdep].depends:
1946 if dep not in self.runq_complete:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001947 alldeps = False
1948 break
1949 if alldeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001950 self.setbuildable(revdep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001951 logger.debug(1, "Marking task %s as buildable", revdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001952
1953 def task_complete(self, task):
1954 self.stats.taskCompleted()
1955 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1956 self.task_completeoutright(task)
1957
1958 def task_fail(self, task, exitcode):
1959 """
1960 Called when a task has failed
1961 Updates the state engine with the failure
1962 """
1963 self.stats.taskFailed()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001964 self.failed_tids.append(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001965 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001966 if self.rqdata.taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001967 self.rq.state = runQueueCleanUp
1968
1969 def task_skip(self, task, reason):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001970 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001971 self.setbuildable(task)
1972 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
1973 self.task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001974 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001975 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001976
1977 def execute(self):
1978 """
1979 Run the tasks in a queue prepared by rqdata.prepare()
1980 """
1981
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001982 if self.rqdata.setscenewhitelist is not None and not self.rqdata.setscenewhitelist_checked:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001983 self.rqdata.setscenewhitelist_checked = True
1984
1985 # Check tasks that are going to run against the whitelist
1986 def check_norun_task(tid, showerror=False):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001987 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001988 # Ignore covered tasks
1989 if tid in self.rq.scenequeue_covered:
1990 return False
1991 # Ignore stamped tasks
1992 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
1993 return False
1994 # Ignore noexec tasks
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001995 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001996 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1997 return False
1998
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001999 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002000 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2001 if showerror:
2002 if tid in self.rqdata.runq_setscene_tids:
2003 logger.error('Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname))
2004 else:
2005 logger.error('Task %s.%s attempted to execute unexpectedly' % (pn, taskname))
2006 return True
2007 return False
2008 # Look to see if any tasks that we think shouldn't run are going to
2009 unexpected = False
2010 for tid in self.rqdata.runtaskentries:
2011 if check_norun_task(tid):
2012 unexpected = True
2013 break
2014 if unexpected:
2015 # Run through the tasks in the rough order they'd have executed and print errors
2016 # (since the order can be useful - usually missing sstate for the last few tasks
2017 # is the cause of the problem)
2018 task = self.sched.next()
2019 while task is not None:
2020 check_norun_task(task, showerror=True)
2021 self.task_skip(task, 'Setscene enforcement check')
2022 task = self.sched.next()
2023
2024 self.rq.state = runQueueCleanUp
2025 return True
2026
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002027 self.rq.read_workers()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002028
2029 if self.stats.total == 0:
2030 # nothing to do
2031 self.rq.state = runQueueCleanUp
2032
2033 task = self.sched.next()
2034 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002035 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002036
2037 if task in self.rq.scenequeue_covered:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002038 logger.debug(2, "Setscene covered task %s", task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002039 self.task_skip(task, "covered")
2040 return True
2041
2042 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002043 logger.debug(2, "Stamp current task %s", task)
2044
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002045 self.task_skip(task, "existing")
2046 return True
2047
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002048 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002049 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2050 startevent = runQueueTaskStarted(task, self.stats, self.rq,
2051 noexec=True)
2052 bb.event.fire(startevent, self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002053 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002054 self.stats.taskActive()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002055 if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002056 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002057 self.task_complete(task)
2058 return True
2059 else:
2060 startevent = runQueueTaskStarted(task, self.stats, self.rq)
2061 bb.event.fire(startevent, self.cfgData)
2062
2063 taskdepdata = self.build_taskdepdata(task)
2064
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002065 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop19323692019-04-05 15:28:33 -04002066 taskhash = self.rqdata.get_task_hash(task)
2067 unihash = self.rqdata.get_task_unihash(task)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002068 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002069 if not mc in self.rq.fakeworker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002070 try:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002071 self.rq.start_fakeworker(self, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002072 except OSError as exc:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002073 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002074 self.rq.state = runQueueFailed
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002075 self.stats.taskFailed()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002076 return True
Brad Bishop19323692019-04-05 15:28:33 -04002077 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002078 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002079 else:
Brad Bishop19323692019-04-05 15:28:33 -04002080 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002081 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002082
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002083 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2084 self.build_stamps2.append(self.build_stamps[task])
2085 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002086 self.stats.taskActive()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002087 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002088 return True
2089
2090 if self.stats.active > 0:
2091 self.rq.read_workers()
2092 return self.rq.active_fds()
2093
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002094 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002095 self.rq.state = runQueueFailed
2096 return True
2097
2098 # Sanity Checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002099 for task in self.rqdata.runtaskentries:
2100 if task not in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002101 logger.error("Task %s never buildable!", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002102 if task not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002103 logger.error("Task %s never ran!", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002104 if task not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002105 logger.error("Task %s never completed!", task)
2106 self.rq.state = runQueueComplete
2107
2108 return True
2109
Andrew Geissler99467da2019-02-25 18:54:23 -06002110 def filtermcdeps(self, task, deps):
2111 ret = set()
2112 mainmc = mc_from_tid(task)
2113 for dep in deps:
2114 mc = mc_from_tid(dep)
2115 if mc != mainmc:
2116 continue
2117 ret.add(dep)
2118 return ret
2119
2120 # We filter out multiconfig dependencies from taskdepdata we pass to the tasks
2121 # as most code can't handle them
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002122 def build_taskdepdata(self, task):
2123 taskdepdata = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002124 next = self.rqdata.runtaskentries[task].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002125 next.add(task)
Andrew Geissler99467da2019-02-25 18:54:23 -06002126 next = self.filtermcdeps(task, next)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002127 while next:
2128 additional = []
2129 for revdep in next:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002130 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2131 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2132 deps = self.rqdata.runtaskentries[revdep].depends
2133 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002134 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002135 unihash = self.rqdata.runtaskentries[revdep].unihash
Andrew Geissler99467da2019-02-25 18:54:23 -06002136 deps = self.filtermcdeps(task, deps)
Brad Bishop19323692019-04-05 15:28:33 -04002137 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002138 for revdep2 in deps:
2139 if revdep2 not in taskdepdata:
2140 additional.append(revdep2)
2141 next = additional
2142
2143 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2144 return taskdepdata
2145
2146class RunQueueExecuteScenequeue(RunQueueExecute):
2147 def __init__(self, rq):
2148 RunQueueExecute.__init__(self, rq)
2149
2150 self.scenequeue_covered = set()
2151 self.scenequeue_notcovered = set()
2152 self.scenequeue_notneeded = set()
2153
2154 # If we don't have any setscene functions, skip this step
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002155 if len(self.rqdata.runq_setscene_tids) == 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002156 rq.scenequeue_covered = set()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002157 rq.scenequeue_notcovered = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002158 rq.state = runQueueRunInit
2159 return
2160
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002161 self.stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002162
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002163 sq_revdeps = {}
2164 sq_revdeps_new = {}
2165 sq_revdeps_squash = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002166 self.sq_harddeps = {}
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002167 self.stamps = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002168
2169 # We need to construct a dependency graph for the setscene functions. Intermediate
2170 # dependencies between the setscene tasks only complicate the code. This code
2171 # therefore aims to collapse the huge runqueue dependency tree into a smaller one
2172 # only containing the setscene functions.
2173
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002174 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002175
2176 # First process the chains up to the first setscene task.
2177 endpoints = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002178 for tid in self.rqdata.runtaskentries:
2179 sq_revdeps[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
2180 sq_revdeps_new[tid] = set()
2181 if (len(sq_revdeps[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
2182 #bb.warn("Added endpoint %s" % (tid))
2183 endpoints[tid] = set()
2184
2185 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002186
2187 # Secondly process the chains between setscene tasks.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002188 for tid in self.rqdata.runq_setscene_tids:
2189 #bb.warn("Added endpoint 2 %s" % (tid))
2190 for dep in self.rqdata.runtaskentries[tid].depends:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002191 if tid in sq_revdeps[dep]:
2192 sq_revdeps[dep].remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002193 if dep not in endpoints:
2194 endpoints[dep] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002195 #bb.warn(" Added endpoint 3 %s" % (dep))
2196 endpoints[dep].add(tid)
2197
2198 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002199
2200 def process_endpoints(endpoints):
2201 newendpoints = {}
2202 for point, task in endpoints.items():
2203 tasks = set()
2204 if task:
2205 tasks |= task
2206 if sq_revdeps_new[point]:
2207 tasks |= sq_revdeps_new[point]
2208 sq_revdeps_new[point] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002209 if point in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002210 sq_revdeps_new[point] = tasks
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05002211 tasks = set()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002212 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002213 for dep in self.rqdata.runtaskentries[point].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002214 if point in sq_revdeps[dep]:
2215 sq_revdeps[dep].remove(point)
2216 if tasks:
2217 sq_revdeps_new[dep] |= tasks
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002218 if len(sq_revdeps[dep]) == 0 and dep not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002219 newendpoints[dep] = task
2220 if len(newendpoints) != 0:
2221 process_endpoints(newendpoints)
2222
2223 process_endpoints(endpoints)
2224
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002225 self.rqdata.init_progress_reporter.next_stage()
2226
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002227 # Build a list of setscene tasks which are "unskippable"
2228 # These are direct endpoints referenced by the build
2229 endpoints2 = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002230 sq_revdeps2 = {}
2231 sq_revdeps_new2 = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002232 def process_endpoints2(endpoints):
2233 newendpoints = {}
2234 for point, task in endpoints.items():
2235 tasks = set([point])
2236 if task:
2237 tasks |= task
2238 if sq_revdeps_new2[point]:
2239 tasks |= sq_revdeps_new2[point]
2240 sq_revdeps_new2[point] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002241 if point in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002242 sq_revdeps_new2[point] = tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002243 for dep in self.rqdata.runtaskentries[point].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002244 if point in sq_revdeps2[dep]:
2245 sq_revdeps2[dep].remove(point)
2246 if tasks:
2247 sq_revdeps_new2[dep] |= tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002248 if (len(sq_revdeps2[dep]) == 0 or len(sq_revdeps_new2[dep]) != 0) and dep not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002249 newendpoints[dep] = tasks
2250 if len(newendpoints) != 0:
2251 process_endpoints2(newendpoints)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002252 for tid in self.rqdata.runtaskentries:
2253 sq_revdeps2[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
2254 sq_revdeps_new2[tid] = set()
2255 if (len(sq_revdeps2[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
2256 endpoints2[tid] = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002257 process_endpoints2(endpoints2)
2258 self.unskippable = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002259 for tid in self.rqdata.runq_setscene_tids:
2260 if sq_revdeps_new2[tid]:
2261 self.unskippable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002262
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002263 self.rqdata.init_progress_reporter.next_stage(len(self.rqdata.runtaskentries))
2264
2265 for taskcounter, tid in enumerate(self.rqdata.runtaskentries):
2266 if tid in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002267 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002268 for dep in sq_revdeps_new[tid]:
2269 deps.add(dep)
2270 sq_revdeps_squash[tid] = deps
2271 elif len(sq_revdeps_new[tid]) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002272 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002273 self.rqdata.init_progress_reporter.update(taskcounter)
2274
2275 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002276
2277 # Resolve setscene inter-task dependencies
2278 # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
2279 # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002280 for tid in self.rqdata.runq_setscene_tids:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002281 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2282 realtid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002283 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002284 self.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002285 for (depname, idependtask) in idepends:
2286
2287 if depname not in self.rqdata.taskData[mc].build_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002288 continue
2289
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002290 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2291 if depfn is None:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002292 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002293 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2294 if deptid not in self.rqdata.runtaskentries:
2295 bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002296
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002297 if not deptid in self.sq_harddeps:
2298 self.sq_harddeps[deptid] = set()
2299 self.sq_harddeps[deptid].add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002300
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002301 sq_revdeps_squash[tid].add(deptid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002302 # Have to zero this to avoid circular dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002303 sq_revdeps_squash[deptid] = set()
2304
2305 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002306
2307 for task in self.sq_harddeps:
2308 for dep in self.sq_harddeps[task]:
2309 sq_revdeps_squash[dep].add(task)
2310
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002311 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002312
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002313 #for tid in sq_revdeps_squash:
2314 # for dep in sq_revdeps_squash[tid]:
2315 # data = data + "\n %s" % dep
2316 # bb.warn("Task %s_setscene: is %s " % (tid, data
2317
2318 self.sq_deps = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002319 self.sq_revdeps = sq_revdeps_squash
2320 self.sq_revdeps2 = copy.deepcopy(self.sq_revdeps)
2321
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002322 for tid in self.sq_revdeps:
2323 self.sq_deps[tid] = set()
2324 for tid in self.sq_revdeps:
2325 for dep in self.sq_revdeps[tid]:
2326 self.sq_deps[dep].add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002327
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002328 self.rqdata.init_progress_reporter.next_stage()
2329
2330 for tid in self.sq_revdeps:
2331 if len(self.sq_revdeps[tid]) == 0:
2332 self.runq_buildable.add(tid)
2333
2334 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002335
2336 self.outrightfail = []
2337 if self.rq.hashvalidate:
2338 sq_hash = []
2339 sq_hashfn = []
Brad Bishop19323692019-04-05 15:28:33 -04002340 sq_unihash = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002341 sq_fn = []
2342 sq_taskname = []
2343 sq_task = []
2344 noexec = []
2345 stamppresent = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002346 for tid in self.sq_revdeps:
2347 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2348
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002349 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002350
2351 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002352 noexec.append(tid)
2353 self.task_skip(tid)
2354 bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002355 continue
2356
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002357 if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
2358 logger.debug(2, 'Setscene stamp current for task %s', tid)
2359 stamppresent.append(tid)
2360 self.task_skip(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002361 continue
2362
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002363 if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
2364 logger.debug(2, 'Normal stamp current for task %s', tid)
2365 stamppresent.append(tid)
2366 self.task_skip(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002367 continue
2368
2369 sq_fn.append(fn)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002370 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002371 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
Brad Bishop19323692019-04-05 15:28:33 -04002372 sq_unihash.append(self.rqdata.runtaskentries[tid].unihash)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002373 sq_taskname.append(taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002374 sq_task.append(tid)
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002375
2376 self.cooker.data.setVar("BB_SETSCENE_STAMPCURRENT_COUNT", len(stamppresent))
2377
Brad Bishop19323692019-04-05 15:28:33 -04002378 valid = self.rq.validate_hash(sq_fn=sq_fn, sq_task=sq_taskname, sq_hash=sq_hash, sq_hashfn=sq_hashfn,
2379 siginfo=False, sq_unihash=sq_unihash, d=self.cooker.data)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002380
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002381 self.cooker.data.delVar("BB_SETSCENE_STAMPCURRENT_COUNT")
2382
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002383 valid_new = stamppresent
2384 for v in valid:
2385 valid_new.append(sq_task[v])
2386
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002387 for tid in self.sq_revdeps:
2388 if tid not in valid_new and tid not in noexec:
2389 logger.debug(2, 'No package found, so skipping setscene task %s', tid)
2390 self.outrightfail.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002391
2392 logger.info('Executing SetScene Tasks')
2393
2394 self.rq.state = runQueueSceneRun
2395
2396 def scenequeue_updatecounters(self, task, fail = False):
2397 for dep in self.sq_deps[task]:
2398 if fail and task in self.sq_harddeps and dep in self.sq_harddeps[task]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002399 logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002400 self.scenequeue_updatecounters(dep, fail)
2401 continue
2402 if task not in self.sq_revdeps2[dep]:
2403 # May already have been removed by the fail case above
2404 continue
2405 self.sq_revdeps2[dep].remove(task)
2406 if len(self.sq_revdeps2[dep]) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002407 self.runq_buildable.add(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002408
2409 def task_completeoutright(self, task):
2410 """
2411 Mark a task as completed
2412 Look at the reverse dependencies and mark any task with
2413 completed dependencies as buildable
2414 """
2415
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002416 logger.debug(1, 'Found task %s which could be accelerated', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002417 self.scenequeue_covered.add(task)
2418 self.scenequeue_updatecounters(task)
2419
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002420 def check_taskfail(self, task):
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002421 if self.rqdata.setscenewhitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002422 realtask = task.split('_setscene')[0]
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002423 (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
2424 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002425 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2426 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2427 self.rq.state = runQueueCleanUp
2428
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002429 def task_complete(self, task):
2430 self.stats.taskCompleted()
2431 bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
2432 self.task_completeoutright(task)
2433
2434 def task_fail(self, task, result):
2435 self.stats.taskFailed()
2436 bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
2437 self.scenequeue_notcovered.add(task)
2438 self.scenequeue_updatecounters(task, True)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002439 self.check_taskfail(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002440
2441 def task_failoutright(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002442 self.runq_running.add(task)
2443 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002444 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002445 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002446 self.scenequeue_notcovered.add(task)
2447 self.scenequeue_updatecounters(task, True)
2448
2449 def task_skip(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002450 self.runq_running.add(task)
2451 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002452 self.task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002453 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002454 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002455
2456 def execute(self):
2457 """
2458 Run the tasks in a queue prepared by prepare_runqueue
2459 """
2460
2461 self.rq.read_workers()
2462
2463 task = None
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002464 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002465 # Find the next setscene to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002466 for nexttask in self.rqdata.runq_setscene_tids:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002467 if nexttask in self.runq_buildable and nexttask not in self.runq_running and self.stamps[nexttask] not in self.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002468 if nexttask in self.unskippable:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002469 logger.debug(2, "Setscene task %s is unskippable" % nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002470 if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002471 fn = fn_from_tid(nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002472 foundtarget = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002473
2474 if nexttask in self.rqdata.target_tids:
2475 foundtarget = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002476 if not foundtarget:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002477 logger.debug(2, "Skipping setscene for task %s" % nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002478 self.task_skip(nexttask)
2479 self.scenequeue_notneeded.add(nexttask)
2480 return True
2481 if nexttask in self.outrightfail:
2482 self.task_failoutright(nexttask)
2483 return True
2484 task = nexttask
2485 break
2486 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002487 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
2488 taskname = taskname + "_setscene"
2489 if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
2490 logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002491 self.task_failoutright(task)
2492 return True
2493
2494 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002495 if task in self.rqdata.target_tids:
2496 self.task_failoutright(task)
2497 return True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002498
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002499 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
2500 logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002501 self.task_skip(task)
2502 return True
2503
2504 startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
2505 bb.event.fire(startevent, self.cfgData)
2506
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002507 taskdepdata = self.build_taskdepdata(task)
2508
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002509 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop19323692019-04-05 15:28:33 -04002510 taskhash = self.rqdata.get_task_hash(task)
2511 unihash = self.rqdata.get_task_unihash(task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002512 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002513 if not mc in self.rq.fakeworker:
2514 self.rq.start_fakeworker(self, mc)
Brad Bishop19323692019-04-05 15:28:33 -04002515 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002516 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002517 else:
Brad Bishop19323692019-04-05 15:28:33 -04002518 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, taskhash, unihash, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002519 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002520
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002521 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2522 self.build_stamps2.append(self.build_stamps[task])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002523 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002524 self.stats.taskActive()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002525 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002526 return True
2527
2528 if self.stats.active > 0:
2529 self.rq.read_workers()
2530 return self.rq.active_fds()
2531
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002532 #for tid in self.sq_revdeps:
2533 # if tid not in self.runq_running:
2534 # buildable = tid in self.runq_buildable
2535 # revdeps = self.sq_revdeps[tid]
2536 # bb.warn("Found we didn't run %s %s %s" % (tid, buildable, str(revdeps)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002537
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002538 self.rq.scenequeue_covered = self.scenequeue_covered
2539 self.rq.scenequeue_notcovered = self.scenequeue_notcovered
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002540
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002541 logger.debug(1, 'We can skip tasks %s', "\n".join(sorted(self.rq.scenequeue_covered)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002542
2543 self.rq.state = runQueueRunInit
2544
2545 completeevent = sceneQueueComplete(self.stats, self.rq)
2546 bb.event.fire(completeevent, self.cfgData)
2547
2548 return True
2549
2550 def runqueue_process_waitpid(self, task, status):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002551 RunQueueExecute.runqueue_process_waitpid(self, task, status)
2552
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002553
2554 def build_taskdepdata(self, task):
2555 def getsetscenedeps(tid):
2556 deps = set()
2557 (mc, fn, taskname, _) = split_tid_mcfn(tid)
2558 realtid = tid + "_setscene"
2559 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
2560 for (depname, idependtask) in idepends:
2561 if depname not in self.rqdata.taskData[mc].build_targets:
2562 continue
2563
2564 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2565 if depfn is None:
2566 continue
2567 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2568 deps.add(deptid)
2569 return deps
2570
2571 taskdepdata = {}
2572 next = getsetscenedeps(task)
2573 next.add(task)
2574 while next:
2575 additional = []
2576 for revdep in next:
2577 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2578 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2579 deps = getsetscenedeps(revdep)
2580 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2581 taskhash = self.rqdata.runtaskentries[revdep].hash
Brad Bishop19323692019-04-05 15:28:33 -04002582 unihash = self.rqdata.runtaskentries[revdep].unihash
2583 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002584 for revdep2 in deps:
2585 if revdep2 not in taskdepdata:
2586 additional.append(revdep2)
2587 next = additional
2588
2589 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2590 return taskdepdata
2591
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002592class TaskFailure(Exception):
2593 """
2594 Exception raised when a task in a runqueue fails
2595 """
2596 def __init__(self, x):
2597 self.args = x
2598
2599
2600class runQueueExitWait(bb.event.Event):
2601 """
2602 Event when waiting for task processes to exit
2603 """
2604
2605 def __init__(self, remain):
2606 self.remain = remain
2607 self.message = "Waiting for %s active tasks to finish" % remain
2608 bb.event.Event.__init__(self)
2609
2610class runQueueEvent(bb.event.Event):
2611 """
2612 Base runQueue event class
2613 """
2614 def __init__(self, task, stats, rq):
2615 self.taskid = task
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002616 self.taskstring = task
2617 self.taskname = taskname_from_tid(task)
2618 self.taskfile = fn_from_tid(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002619 self.taskhash = rq.rqdata.get_task_hash(task)
2620 self.stats = stats.copy()
2621 bb.event.Event.__init__(self)
2622
2623class sceneQueueEvent(runQueueEvent):
2624 """
2625 Base sceneQueue event class
2626 """
2627 def __init__(self, task, stats, rq, noexec=False):
2628 runQueueEvent.__init__(self, task, stats, rq)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002629 self.taskstring = task + "_setscene"
2630 self.taskname = taskname_from_tid(task) + "_setscene"
2631 self.taskfile = fn_from_tid(task)
2632 self.taskhash = rq.rqdata.get_task_hash(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002633
2634class runQueueTaskStarted(runQueueEvent):
2635 """
2636 Event notifying a task was started
2637 """
2638 def __init__(self, task, stats, rq, noexec=False):
2639 runQueueEvent.__init__(self, task, stats, rq)
2640 self.noexec = noexec
2641
2642class sceneQueueTaskStarted(sceneQueueEvent):
2643 """
2644 Event notifying a setscene task was started
2645 """
2646 def __init__(self, task, stats, rq, noexec=False):
2647 sceneQueueEvent.__init__(self, task, stats, rq)
2648 self.noexec = noexec
2649
2650class runQueueTaskFailed(runQueueEvent):
2651 """
2652 Event notifying a task failed
2653 """
2654 def __init__(self, task, stats, exitcode, rq):
2655 runQueueEvent.__init__(self, task, stats, rq)
2656 self.exitcode = exitcode
2657
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002658 def __str__(self):
2659 return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
2660
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002661class sceneQueueTaskFailed(sceneQueueEvent):
2662 """
2663 Event notifying a setscene task failed
2664 """
2665 def __init__(self, task, stats, exitcode, rq):
2666 sceneQueueEvent.__init__(self, task, stats, rq)
2667 self.exitcode = exitcode
2668
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002669 def __str__(self):
2670 return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
2671
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002672class sceneQueueComplete(sceneQueueEvent):
2673 """
2674 Event when all the sceneQueue tasks are complete
2675 """
2676 def __init__(self, stats, rq):
2677 self.stats = stats.copy()
2678 bb.event.Event.__init__(self)
2679
2680class runQueueTaskCompleted(runQueueEvent):
2681 """
2682 Event notifying a task completed
2683 """
2684
2685class sceneQueueTaskCompleted(sceneQueueEvent):
2686 """
2687 Event notifying a setscene task completed
2688 """
2689
2690class runQueueTaskSkipped(runQueueEvent):
2691 """
2692 Event notifying a task was skipped
2693 """
2694 def __init__(self, task, stats, rq, reason):
2695 runQueueEvent.__init__(self, task, stats, rq)
2696 self.reason = reason
2697
2698class runQueuePipe():
2699 """
2700 Abstraction for a pipe between a worker thread and the server
2701 """
2702 def __init__(self, pipein, pipeout, d, rq, rqexec):
2703 self.input = pipein
2704 if pipeout:
2705 pipeout.close()
2706 bb.utils.nonblockingfd(self.input)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002707 self.queue = b""
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002708 self.d = d
2709 self.rq = rq
2710 self.rqexec = rqexec
2711
2712 def setrunqueueexec(self, rqexec):
2713 self.rqexec = rqexec
2714
2715 def read(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002716 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
2717 for worker in workers.values():
2718 worker.process.poll()
2719 if worker.process.returncode is not None and not self.rq.teardown:
2720 bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
2721 self.rq.finish_runqueue(True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002722
2723 start = len(self.queue)
2724 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002725 self.queue = self.queue + (self.input.read(102400) or b"")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002726 except (OSError, IOError) as e:
2727 if e.errno != errno.EAGAIN:
2728 raise
2729 end = len(self.queue)
2730 found = True
2731 while found and len(self.queue):
2732 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002733 index = self.queue.find(b"</event>")
2734 while index != -1 and self.queue.startswith(b"<event>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002735 try:
2736 event = pickle.loads(self.queue[7:index])
2737 except ValueError as e:
2738 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
2739 bb.event.fire_from_worker(event, self.d)
2740 found = True
2741 self.queue = self.queue[index+8:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002742 index = self.queue.find(b"</event>")
2743 index = self.queue.find(b"</exitcode>")
2744 while index != -1 and self.queue.startswith(b"<exitcode>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002745 try:
2746 task, status = pickle.loads(self.queue[10:index])
2747 except ValueError as e:
2748 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
2749 self.rqexec.runqueue_process_waitpid(task, status)
2750 found = True
2751 self.queue = self.queue[index+11:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002752 index = self.queue.find(b"</exitcode>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002753 return (end > start)
2754
2755 def close(self):
2756 while self.read():
2757 continue
2758 if len(self.queue) > 0:
2759 print("Warning, worker left partial message: %s" % self.queue)
2760 self.input.close()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002761
2762def get_setscene_enforce_whitelist(d):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002763 if d.getVar('BB_SETSCENE_ENFORCE') != '1':
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002764 return None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002765 whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002766 outlist = []
2767 for item in whitelist[:]:
2768 if item.startswith('%:'):
2769 for target in sys.argv[1:]:
2770 if not target.startswith('-'):
2771 outlist.append(target.split(':')[0] + ':' + item.split(':')[1])
2772 else:
2773 outlist.append(item)
2774 return outlist
2775
2776def check_setscene_enforce_whitelist(pn, taskname, whitelist):
2777 import fnmatch
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002778 if whitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002779 item = '%s:%s' % (pn, taskname)
2780 for whitelist_item in whitelist:
2781 if fnmatch.fnmatch(item, whitelist_item):
2782 return True
2783 return False
2784 return True