blob: 4d5d8767973c8bbfdca10be1742371911f0d8f89 [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001#!/usr/bin/env python
2# ex:ts=4:sw=4:sts=4:et
3# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
4"""
5BitBake 'RunQueue' implementation
6
7Handles preparation and execution of a queue of tasks
8"""
9
10# Copyright (C) 2006-2007 Richard Purdie
11#
12# This program is free software; you can redistribute it and/or modify
13# it under the terms of the GNU General Public License version 2 as
14# published by the Free Software Foundation.
15#
16# This program is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19# GNU General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License along
22# with this program; if not, write to the Free Software Foundation, Inc.,
23# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24
25import copy
26import os
27import sys
28import signal
29import stat
30import fcntl
31import errno
32import logging
33import re
34import bb
35from bb import msg, data, event
36from bb import monitordisk
37import subprocess
Patrick Williamsc0f7c042017-02-23 20:41:17 -060038import pickle
Brad Bishop6e60e8b2018-02-01 10:27:11 -050039from multiprocessing import Process
Patrick Williamsc124f4f2015-09-15 14:41:29 -050040
41bblogger = logging.getLogger("BitBake")
42logger = logging.getLogger("BitBake.RunQueue")
43
44__find_md5__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{32}(?![a-z0-9])' )
45
Patrick Williamsc0f7c042017-02-23 20:41:17 -060046def fn_from_tid(tid):
47 return tid.rsplit(":", 1)[0]
48
49def taskname_from_tid(tid):
50 return tid.rsplit(":", 1)[1]
51
52def split_tid(tid):
53 (mc, fn, taskname, _) = split_tid_mcfn(tid)
54 return (mc, fn, taskname)
55
56def split_tid_mcfn(tid):
57 if tid.startswith('multiconfig:'):
58 elems = tid.split(':')
59 mc = elems[1]
60 fn = ":".join(elems[2:-1])
61 taskname = elems[-1]
62 mcfn = "multiconfig:" + mc + ":" + fn
63 else:
64 tid = tid.rsplit(":", 1)
65 mc = ""
66 fn = tid[0]
67 taskname = tid[1]
68 mcfn = fn
69
70 return (mc, fn, taskname, mcfn)
71
72def build_tid(mc, fn, taskname):
73 if mc:
74 return "multiconfig:" + mc + ":" + fn + ":" + taskname
75 return fn + ":" + taskname
76
Patrick Williamsc124f4f2015-09-15 14:41:29 -050077class RunQueueStats:
78 """
79 Holds statistics on the tasks handled by the associated runQueue
80 """
81 def __init__(self, total):
82 self.completed = 0
83 self.skipped = 0
84 self.failed = 0
85 self.active = 0
86 self.total = total
87
88 def copy(self):
89 obj = self.__class__(self.total)
90 obj.__dict__.update(self.__dict__)
91 return obj
92
93 def taskFailed(self):
94 self.active = self.active - 1
95 self.failed = self.failed + 1
96
Brad Bishop1a4b7ee2018-12-16 17:11:34 -080097 def taskCompleted(self):
98 self.active = self.active - 1
99 self.completed = self.completed + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500100
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800101 def taskSkipped(self):
102 self.active = self.active + 1
103 self.skipped = self.skipped + 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500104
105 def taskActive(self):
106 self.active = self.active + 1
107
108# These values indicate the next step due to be run in the
109# runQueue state machine
110runQueuePrepare = 2
111runQueueSceneInit = 3
112runQueueSceneRun = 4
113runQueueRunInit = 5
114runQueueRunning = 6
115runQueueFailed = 7
116runQueueCleanUp = 8
117runQueueComplete = 9
118
119class RunQueueScheduler(object):
120 """
121 Control the order tasks are scheduled in.
122 """
123 name = "basic"
124
125 def __init__(self, runqueue, rqdata):
126 """
127 The default scheduler just returns the first buildable task (the
128 priority map is sorted by task number)
129 """
130 self.rq = runqueue
131 self.rqdata = rqdata
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600132 self.numTasks = len(self.rqdata.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500133
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600134 self.prio_map = [self.rqdata.runtaskentries.keys()]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500135
136 self.buildable = []
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800137 self.skip_maxthread = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500138 self.stamps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600139 for tid in self.rqdata.runtaskentries:
140 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
141 self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
142 if tid in self.rq.runq_buildable:
143 self.buildable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500144
145 self.rev_prio_map = None
146
147 def next_buildable_task(self):
148 """
149 Return the id of the first task we find that is buildable
150 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600151 self.buildable = [x for x in self.buildable if x not in self.rq.runq_running]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500152 if not self.buildable:
153 return None
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800154
155 # Filter out tasks that have a max number of threads that have been exceeded
156 skip_buildable = {}
157 for running in self.rq.runq_running.difference(self.rq.runq_complete):
158 rtaskname = taskname_from_tid(running)
159 if rtaskname not in self.skip_maxthread:
160 self.skip_maxthread[rtaskname] = self.rq.cfgData.getVarFlag(rtaskname, "number_threads")
161 if not self.skip_maxthread[rtaskname]:
162 continue
163 if rtaskname in skip_buildable:
164 skip_buildable[rtaskname] += 1
165 else:
166 skip_buildable[rtaskname] = 1
167
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500168 if len(self.buildable) == 1:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600169 tid = self.buildable[0]
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800170 taskname = taskname_from_tid(tid)
171 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
172 return None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600173 stamp = self.stamps[tid]
174 if stamp not in self.rq.build_stamps.values():
175 return tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500176
177 if not self.rev_prio_map:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600178 self.rev_prio_map = {}
179 for tid in self.rqdata.runtaskentries:
180 self.rev_prio_map[tid] = self.prio_map.index(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500181
182 best = None
183 bestprio = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600184 for tid in self.buildable:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800185 taskname = taskname_from_tid(tid)
186 if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
187 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600188 prio = self.rev_prio_map[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500189 if bestprio is None or bestprio > prio:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600190 stamp = self.stamps[tid]
191 if stamp in self.rq.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500192 continue
193 bestprio = prio
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600194 best = tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500195
196 return best
197
198 def next(self):
199 """
200 Return the id of the task we should build next
201 """
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800202 if self.rq.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500203 return self.next_buildable_task()
204
Brad Bishop316dfdd2018-06-25 12:45:53 -0400205 def newbuildable(self, task):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500206 self.buildable.append(task)
207
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500208 def describe_task(self, taskid):
209 result = 'ID %s' % taskid
210 if self.rev_prio_map:
211 result = result + (' pri %d' % self.rev_prio_map[taskid])
212 return result
213
214 def dump_prio(self, comment):
215 bb.debug(3, '%s (most important first):\n%s' %
216 (comment,
217 '\n'.join(['%d. %s' % (index + 1, self.describe_task(taskid)) for
218 index, taskid in enumerate(self.prio_map)])))
219
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500220class RunQueueSchedulerSpeed(RunQueueScheduler):
221 """
222 A scheduler optimised for speed. The priority map is sorted by task weight,
223 heavier weighted tasks (tasks needed by the most other tasks) are run first.
224 """
225 name = "speed"
226
227 def __init__(self, runqueue, rqdata):
228 """
229 The priority map is sorted by task weight.
230 """
231 RunQueueScheduler.__init__(self, runqueue, rqdata)
232
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600233 weights = {}
234 for tid in self.rqdata.runtaskentries:
235 weight = self.rqdata.runtaskentries[tid].weight
236 if not weight in weights:
237 weights[weight] = []
238 weights[weight].append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500239
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600240 self.prio_map = []
241 for weight in sorted(weights):
242 for w in weights[weight]:
243 self.prio_map.append(w)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500244
245 self.prio_map.reverse()
246
247class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
248 """
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500249 A scheduler optimised to complete .bb files as quickly as possible. The
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500250 priority map is sorted by task weight, but then reordered so once a given
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500251 .bb file starts to build, it's completed as quickly as possible by
252 running all tasks related to the same .bb file one after the after.
253 This works well where disk space is at a premium and classes like OE's
254 rm_work are in force.
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500255 """
256 name = "completion"
257
258 def __init__(self, runqueue, rqdata):
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500259 super(RunQueueSchedulerCompletion, self).__init__(runqueue, rqdata)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500260
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500261 # Extract list of tasks for each recipe, with tasks sorted
262 # ascending from "must run first" (typically do_fetch) to
263 # "runs last" (do_build). The speed scheduler prioritizes
264 # tasks that must run first before the ones that run later;
265 # this is what we depend on here.
266 task_lists = {}
267 for taskid in self.prio_map:
268 fn, taskname = taskid.rsplit(':', 1)
269 task_lists.setdefault(fn, []).append(taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500270
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500271 # Now unify the different task lists. The strategy is that
272 # common tasks get skipped and new ones get inserted after the
273 # preceeding common one(s) as they are found. Because task
274 # lists should differ only by their number of tasks, but not
275 # the ordering of the common tasks, this should result in a
276 # deterministic result that is a superset of the individual
277 # task ordering.
278 all_tasks = []
279 for recipe, new_tasks in task_lists.items():
280 index = 0
281 old_task = all_tasks[index] if index < len(all_tasks) else None
282 for new_task in new_tasks:
283 if old_task == new_task:
284 # Common task, skip it. This is the fast-path which
285 # avoids a full search.
286 index += 1
287 old_task = all_tasks[index] if index < len(all_tasks) else None
288 else:
289 try:
290 index = all_tasks.index(new_task)
291 # Already present, just not at the current
292 # place. We re-synchronized by changing the
293 # index so that it matches again. Now
294 # move on to the next existing task.
295 index += 1
296 old_task = all_tasks[index] if index < len(all_tasks) else None
297 except ValueError:
298 # Not present. Insert before old_task, which
299 # remains the same (but gets shifted back).
300 all_tasks.insert(index, new_task)
301 index += 1
302 bb.debug(3, 'merged task list: %s' % all_tasks)
303
304 # Now reverse the order so that tasks that finish the work on one
305 # recipe are considered more imporant (= come first). The ordering
306 # is now so that do_build is most important.
307 all_tasks.reverse()
308
309 # Group tasks of the same kind before tasks of less important
310 # kinds at the head of the queue (because earlier = lower
311 # priority number = runs earlier), while preserving the
312 # ordering by recipe. If recipe foo is more important than
313 # bar, then the goal is to work on foo's do_populate_sysroot
314 # before bar's do_populate_sysroot and on the more important
315 # tasks of foo before any of the less important tasks in any
316 # other recipe (if those other recipes are more important than
317 # foo).
318 #
319 # All of this only applies when tasks are runable. Explicit
320 # dependencies still override this ordering by priority.
321 #
322 # Here's an example why this priority re-ordering helps with
323 # minimizing disk usage. Consider a recipe foo with a higher
324 # priority than bar where foo DEPENDS on bar. Then the
325 # implicit rule (from base.bbclass) is that foo's do_configure
326 # depends on bar's do_populate_sysroot. This ensures that
327 # bar's do_populate_sysroot gets done first. Normally the
328 # tasks from foo would continue to run once that is done, and
329 # bar only gets completed and cleaned up later. By ordering
330 # bar's task that depend on bar's do_populate_sysroot before foo's
331 # do_configure, that problem gets avoided.
332 task_index = 0
333 self.dump_prio('original priorities')
334 for task in all_tasks:
335 for index in range(task_index, self.numTasks):
336 taskid = self.prio_map[index]
337 taskname = taskid.rsplit(':', 1)[1]
338 if taskname == task:
339 del self.prio_map[index]
340 self.prio_map.insert(task_index, taskid)
341 task_index += 1
342 self.dump_prio('completion priorities')
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500343
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600344class RunTaskEntry(object):
345 def __init__(self):
346 self.depends = set()
347 self.revdeps = set()
348 self.hash = None
349 self.task = None
350 self.weight = 1
351
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500352class RunQueueData:
353 """
354 BitBake Run Queue implementation
355 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600356 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500357 self.cooker = cooker
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600358 self.dataCaches = dataCaches
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500359 self.taskData = taskData
360 self.targets = targets
361 self.rq = rq
362 self.warn_multi_bb = False
363
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500364 self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or ""
365 self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600366 self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
367 self.setscenewhitelist_checked = False
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500368 self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1")
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600369 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500370
371 self.reset()
372
373 def reset(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600374 self.runtaskentries = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500375
376 def runq_depends_names(self, ids):
377 import re
378 ret = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600379 for id in ids:
380 nam = os.path.basename(id)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500381 nam = re.sub("_[^,]*,", ",", nam)
382 ret.extend([nam])
383 return ret
384
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600385 def get_task_hash(self, tid):
386 return self.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500387
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600388 def get_user_idstring(self, tid, task_name_suffix = ""):
389 return tid + task_name_suffix
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500390
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500391 def get_short_user_idstring(self, task, task_name_suffix = ""):
Brad Bishop37a0e4d2017-12-04 01:01:44 -0500392 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
393 pn = self.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600394 taskname = taskname_from_tid(task) + task_name_suffix
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500395 return "%s:%s" % (pn, taskname)
396
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500397 def circular_depchains_handler(self, tasks):
398 """
399 Some tasks aren't buildable, likely due to circular dependency issues.
400 Identify the circular dependencies and print them in a user readable format.
401 """
402 from copy import deepcopy
403
404 valid_chains = []
405 explored_deps = {}
406 msgs = []
407
408 def chain_reorder(chain):
409 """
410 Reorder a dependency chain so the lowest task id is first
411 """
412 lowest = 0
413 new_chain = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600414 for entry in range(len(chain)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500415 if chain[entry] < chain[lowest]:
416 lowest = entry
417 new_chain.extend(chain[lowest:])
418 new_chain.extend(chain[:lowest])
419 return new_chain
420
421 def chain_compare_equal(chain1, chain2):
422 """
423 Compare two dependency chains and see if they're the same
424 """
425 if len(chain1) != len(chain2):
426 return False
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600427 for index in range(len(chain1)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500428 if chain1[index] != chain2[index]:
429 return False
430 return True
431
432 def chain_array_contains(chain, chain_array):
433 """
434 Return True if chain_array contains chain
435 """
436 for ch in chain_array:
437 if chain_compare_equal(ch, chain):
438 return True
439 return False
440
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600441 def find_chains(tid, prev_chain):
442 prev_chain.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500443 total_deps = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600444 total_deps.extend(self.runtaskentries[tid].revdeps)
445 for revdep in self.runtaskentries[tid].revdeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500446 if revdep in prev_chain:
447 idx = prev_chain.index(revdep)
448 # To prevent duplicates, reorder the chain to start with the lowest taskid
449 # and search through an array of those we've already printed
450 chain = prev_chain[idx:]
451 new_chain = chain_reorder(chain)
452 if not chain_array_contains(new_chain, valid_chains):
453 valid_chains.append(new_chain)
454 msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
455 for dep in new_chain:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600456 msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500457 msgs.append("\n")
458 if len(valid_chains) > 10:
459 msgs.append("Aborted dependency loops search after 10 matches.\n")
460 return msgs
461 continue
462 scan = False
463 if revdep not in explored_deps:
464 scan = True
465 elif revdep in explored_deps[revdep]:
466 scan = True
467 else:
468 for dep in prev_chain:
469 if dep in explored_deps[revdep]:
470 scan = True
471 if scan:
472 find_chains(revdep, copy.deepcopy(prev_chain))
473 for dep in explored_deps[revdep]:
474 if dep not in total_deps:
475 total_deps.append(dep)
476
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600477 explored_deps[tid] = total_deps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500478
479 for task in tasks:
480 find_chains(task, [])
481
482 return msgs
483
484 def calculate_task_weights(self, endpoints):
485 """
486 Calculate a number representing the "weight" of each task. Heavier weighted tasks
487 have more dependencies and hence should be executed sooner for maximum speed.
488
489 This function also sanity checks the task list finding tasks that are not
490 possible to execute due to circular dependencies.
491 """
492
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600493 numTasks = len(self.runtaskentries)
494 weight = {}
495 deps_left = {}
496 task_done = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500497
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600498 for tid in self.runtaskentries:
499 task_done[tid] = False
500 weight[tid] = 1
501 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500502
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600503 for tid in endpoints:
504 weight[tid] = 10
505 task_done[tid] = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500506
507 while True:
508 next_points = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600509 for tid in endpoints:
510 for revdep in self.runtaskentries[tid].depends:
511 weight[revdep] = weight[revdep] + weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500512 deps_left[revdep] = deps_left[revdep] - 1
513 if deps_left[revdep] == 0:
514 next_points.append(revdep)
515 task_done[revdep] = True
516 endpoints = next_points
517 if len(next_points) == 0:
518 break
519
520 # Circular dependency sanity check
521 problem_tasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600522 for tid in self.runtaskentries:
523 if task_done[tid] is False or deps_left[tid] != 0:
524 problem_tasks.append(tid)
525 logger.debug(2, "Task %s is not buildable", tid)
526 logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
527 self.runtaskentries[tid].weight = weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500528
529 if problem_tasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600530 message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500531 message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
532 message = message + "Identifying dependency loops (this may take a short while)...\n"
533 logger.error(message)
534
535 msgs = self.circular_depchains_handler(problem_tasks)
536
537 message = "\n"
538 for msg in msgs:
539 message = message + msg
540 bb.msg.fatal("RunQueue", message)
541
542 return weight
543
544 def prepare(self):
545 """
546 Turn a set of taskData into a RunQueue and compute data needed
547 to optimise the execution order.
548 """
549
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600550 runq_build = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500551 recursivetasks = {}
552 recursiveitasks = {}
553 recursivetasksselfref = set()
554
555 taskData = self.taskData
556
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600557 found = False
558 for mc in self.taskData:
559 if len(taskData[mc].taskentries) > 0:
560 found = True
561 break
562 if not found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500563 # Nothing to do
564 return 0
565
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600566 self.init_progress_reporter.start()
567 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500568
569 # Step A - Work out a list of tasks to run
570 #
571 # Taskdata gives us a list of possible providers for every build and run
572 # target ordered by priority. It also gives information on each of those
573 # providers.
574 #
575 # To create the actual list of tasks to execute we fix the list of
576 # providers and then resolve the dependencies into task IDs. This
577 # process is repeated for each type of dependency (tdepends, deptask,
578 # rdeptast, recrdeptask, idepends).
579
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600580 def add_build_dependencies(depids, tasknames, depends, mc):
581 for depname in depids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500582 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600583 if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500584 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600585 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500586 if depdata is None:
587 continue
588 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600589 t = depdata + ":" + taskname
590 if t in taskData[mc].taskentries:
591 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500592
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600593 def add_runtime_dependencies(depids, tasknames, depends, mc):
594 for depname in depids:
595 if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500596 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600597 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500598 if depdata is None:
599 continue
600 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600601 t = depdata + ":" + taskname
602 if t in taskData[mc].taskentries:
603 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500604
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800605 def add_mc_dependencies(mc, tid):
606 mcdeps = taskData[mc].get_mcdepends()
607 for dep in mcdeps:
608 mcdependency = dep.split(':')
609 pn = mcdependency[3]
610 frommc = mcdependency[1]
611 mcdep = mcdependency[2]
612 deptask = mcdependency[4]
613 if mc == frommc:
614 fn = taskData[mcdep].build_targets[pn][0]
615 newdep = '%s:%s' % (fn,deptask)
616 taskData[mc].taskentries[tid].tdepends.append(newdep)
617
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600618 for mc in taskData:
619 for tid in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500620
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600621 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
622 #runtid = build_tid(mc, fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500623
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600624 #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
625
626 depends = set()
627 task_deps = self.dataCaches[mc].task_deps[taskfn]
628
629 self.runtaskentries[tid] = RunTaskEntry()
630
631 if fn in taskData[mc].failed_fns:
632 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500633
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800634 # We add multiconfig dependencies before processing internal task deps (tdepends)
635 if 'mcdepends' in task_deps and taskname in task_deps['mcdepends']:
636 add_mc_dependencies(mc, tid)
637
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500638 # Resolve task internal dependencies
639 #
640 # e.g. addtask before X after Y
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600641 for t in taskData[mc].taskentries[tid].tdepends:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -0800642 (depmc, depfn, deptaskname, _) = split_tid_mcfn(t)
643 depends.add(build_tid(depmc, depfn, deptaskname))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500644
645 # Resolve 'deptask' dependencies
646 #
647 # e.g. do_sometask[deptask] = "do_someothertask"
648 # (makes sure sometask runs after someothertask of all DEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600649 if 'deptask' in task_deps and taskname in task_deps['deptask']:
650 tasknames = task_deps['deptask'][taskname].split()
651 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500652
653 # Resolve 'rdeptask' dependencies
654 #
655 # e.g. do_sometask[rdeptask] = "do_someothertask"
656 # (makes sure sometask runs after someothertask of all RDEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600657 if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
658 tasknames = task_deps['rdeptask'][taskname].split()
659 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500660
661 # Resolve inter-task dependencies
662 #
663 # e.g. do_sometask[depends] = "targetname:do_someothertask"
664 # (makes sure sometask runs after targetname's someothertask)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600665 idepends = taskData[mc].taskentries[tid].idepends
666 for (depname, idependtask) in idepends:
667 if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500668 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600669 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500670 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600671 t = depdata + ":" + idependtask
672 depends.add(t)
673 if t not in taskData[mc].taskentries:
674 bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
675 irdepends = taskData[mc].taskentries[tid].irdepends
676 for (depname, idependtask) in irdepends:
677 if depname in taskData[mc].run_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500678 # Won't be in run_targets if ASSUME_PROVIDED
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500679 if not taskData[mc].run_targets[depname]:
680 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600681 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500682 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600683 t = depdata + ":" + idependtask
684 depends.add(t)
685 if t not in taskData[mc].taskentries:
686 bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500687
688 # Resolve recursive 'recrdeptask' dependencies (Part A)
689 #
690 # e.g. do_sometask[recrdeptask] = "do_someothertask"
691 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
692 # We cover the recursive part of the dependencies below
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600693 if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
694 tasknames = task_deps['recrdeptask'][taskname].split()
695 recursivetasks[tid] = tasknames
696 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
697 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
698 if taskname in tasknames:
699 recursivetasksselfref.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500700
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600701 if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
702 recursiveitasks[tid] = []
703 for t in task_deps['recideptask'][taskname].split():
704 newdep = build_tid(mc, fn, t)
705 recursiveitasks[tid].append(newdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500706
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600707 self.runtaskentries[tid].depends = depends
Brad Bishop316dfdd2018-06-25 12:45:53 -0400708 # Remove all self references
709 self.runtaskentries[tid].depends.discard(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500710
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600711 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500712
Brad Bishop316dfdd2018-06-25 12:45:53 -0400713 self.init_progress_reporter.next_stage()
714
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500715 # Resolve recursive 'recrdeptask' dependencies (Part B)
716 #
717 # e.g. do_sometask[recrdeptask] = "do_someothertask"
718 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600719 # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600720
Brad Bishop316dfdd2018-06-25 12:45:53 -0400721 # Generating/interating recursive lists of dependencies is painful and potentially slow
722 # Precompute recursive task dependencies here by:
723 # a) create a temp list of reverse dependencies (revdeps)
724 # b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0)
725 # c) combine the total list of dependencies in cumulativedeps
726 # d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500727
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500728
Brad Bishop316dfdd2018-06-25 12:45:53 -0400729 revdeps = {}
730 deps = {}
731 cumulativedeps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600732 for tid in self.runtaskentries:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400733 deps[tid] = set(self.runtaskentries[tid].depends)
734 revdeps[tid] = set()
735 cumulativedeps[tid] = set()
736 # Generate a temp list of reverse dependencies
737 for tid in self.runtaskentries:
738 for dep in self.runtaskentries[tid].depends:
739 revdeps[dep].add(tid)
740 # Find the dependency chain endpoints
741 endpoints = set()
742 for tid in self.runtaskentries:
743 if len(deps[tid]) == 0:
744 endpoints.add(tid)
745 # Iterate the chains collating dependencies
746 while endpoints:
747 next = set()
748 for tid in endpoints:
749 for dep in revdeps[tid]:
750 cumulativedeps[dep].add(fn_from_tid(tid))
751 cumulativedeps[dep].update(cumulativedeps[tid])
752 if tid in deps[dep]:
753 deps[dep].remove(tid)
754 if len(deps[dep]) == 0:
755 next.add(dep)
756 endpoints = next
757 #for tid in deps:
758 # if len(deps[tid]) != 0:
759 # bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
760
761 # Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
762 # resolve these recursively until we aren't adding any further extra dependencies
763 extradeps = True
764 while extradeps:
765 extradeps = 0
766 for tid in recursivetasks:
767 tasknames = recursivetasks[tid]
768
769 totaldeps = set(self.runtaskentries[tid].depends)
770 if tid in recursiveitasks:
771 totaldeps.update(recursiveitasks[tid])
772 for dep in recursiveitasks[tid]:
773 if dep not in self.runtaskentries:
774 continue
775 totaldeps.update(self.runtaskentries[dep].depends)
776
777 deps = set()
778 for dep in totaldeps:
779 if dep in cumulativedeps:
780 deps.update(cumulativedeps[dep])
781
782 for t in deps:
783 for taskname in tasknames:
784 newtid = t + ":" + taskname
785 if newtid == tid:
786 continue
787 if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
788 extradeps += 1
789 self.runtaskentries[tid].depends.add(newtid)
790
791 # Handle recursive tasks which depend upon other recursive tasks
792 deps = set()
793 for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
794 deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
795 for newtid in deps:
796 for taskname in tasknames:
797 if not newtid.endswith(":" + taskname):
798 continue
799 if newtid in self.runtaskentries:
800 extradeps += 1
801 self.runtaskentries[tid].depends.add(newtid)
802
803 bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps)
804
805 # Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work
806 for tid in recursivetasksselfref:
807 self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600808
809 self.init_progress_reporter.next_stage()
810
811 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500812
813 # Step B - Mark all active tasks
814 #
815 # Start with the tasks we were asked to run and mark all dependencies
816 # as active too. If the task is to be 'forced', clear its stamp. Once
817 # all active tasks are marked, prune the ones we don't need.
818
819 logger.verbose("Marking Active Tasks")
820
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600821 def mark_active(tid, depth):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500822 """
823 Mark an item as active along with its depends
824 (calls itself recursively)
825 """
826
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600827 if tid in runq_build:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500828 return
829
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600830 runq_build[tid] = 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500831
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600832 depends = self.runtaskentries[tid].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500833 for depend in depends:
834 mark_active(depend, depth+1)
835
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600836 self.target_tids = []
837 for (mc, target, task, fn) in self.targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500838
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600839 if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500840 continue
841
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600842 if target in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500843 continue
844
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500845 parents = False
846 if task.endswith('-'):
847 parents = True
848 task = task[:-1]
849
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600850 if fn in taskData[mc].failed_fns:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500851 continue
852
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600853 # fn already has mc prefix
854 tid = fn + ":" + task
855 self.target_tids.append(tid)
856 if tid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500857 import difflib
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600858 tasks = []
859 for x in taskData[mc].taskentries:
860 if x.startswith(fn + ":"):
861 tasks.append(taskname_from_tid(x))
862 close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500863 if close_matches:
864 extra = ". Close matches:\n %s" % "\n ".join(close_matches)
865 else:
866 extra = ""
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600867 bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
868
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500869 # For tasks called "XXXX-", ony run their dependencies
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500870 if parents:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600871 for i in self.runtaskentries[tid].depends:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500872 mark_active(i, 1)
873 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600874 mark_active(tid, 1)
875
876 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500877
878 # Step C - Prune all inactive tasks
879 #
880 # Once all active tasks are marked, prune the ones we don't need.
881
Brad Bishop316dfdd2018-06-25 12:45:53 -0400882 delcount = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600883 for tid in list(self.runtaskentries.keys()):
884 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400885 delcount[tid] = self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600886 del self.runtaskentries[tid]
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600887
Brad Bishop316dfdd2018-06-25 12:45:53 -0400888 # Handle --runall
889 if self.cooker.configuration.runall:
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500890 # re-run the mark_active and then drop unused tasks from new list
891 runq_build = {}
Brad Bishop316dfdd2018-06-25 12:45:53 -0400892
893 for task in self.cooker.configuration.runall:
894 runall_tids = set()
895 for tid in list(self.runtaskentries):
896 wanttid = fn_from_tid(tid) + ":do_%s" % task
897 if wanttid in delcount:
898 self.runtaskentries[wanttid] = delcount[wanttid]
899 if wanttid in self.runtaskentries:
900 runall_tids.add(wanttid)
901
902 for tid in list(runall_tids):
903 mark_active(tid,1)
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500904
905 for tid in list(self.runtaskentries.keys()):
906 if tid not in runq_build:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400907 delcount[tid] = self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500908 del self.runtaskentries[tid]
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500909
910 if len(self.runtaskentries) == 0:
Brad Bishop316dfdd2018-06-25 12:45:53 -0400911 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
912
913 self.init_progress_reporter.next_stage()
914
915 # Handle runonly
916 if self.cooker.configuration.runonly:
917 # re-run the mark_active and then drop unused tasks from new list
918 runq_build = {}
919
920 for task in self.cooker.configuration.runonly:
921 runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == "do_%s" % task }
922
923 for tid in list(runonly_tids):
924 mark_active(tid,1)
925
926 for tid in list(self.runtaskentries.keys()):
927 if tid not in runq_build:
928 delcount[tid] = self.runtaskentries[tid]
929 del self.runtaskentries[tid]
930
931 if len(self.runtaskentries) == 0:
932 bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
Brad Bishop6e60e8b2018-02-01 10:27:11 -0500933
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500934 #
935 # Step D - Sanity checks and computation
936 #
937
938 # Check to make sure we still have tasks to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600939 if len(self.runtaskentries) == 0:
940 if not taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500941 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
942 else:
943 bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
944
Brad Bishop316dfdd2018-06-25 12:45:53 -0400945 logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500946
947 logger.verbose("Assign Weightings")
948
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600949 self.init_progress_reporter.next_stage()
950
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500951 # Generate a list of reverse dependencies to ease future calculations
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600952 for tid in self.runtaskentries:
953 for dep in self.runtaskentries[tid].depends:
954 self.runtaskentries[dep].revdeps.add(tid)
955
956 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500957
958 # Identify tasks at the end of dependency chains
959 # Error on circular dependency loops (length two)
960 endpoints = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600961 for tid in self.runtaskentries:
962 revdeps = self.runtaskentries[tid].revdeps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500963 if len(revdeps) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600964 endpoints.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500965 for dep in revdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600966 if dep in self.runtaskentries[tid].depends:
967 bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
968
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500969
970 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
971
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600972 self.init_progress_reporter.next_stage()
973
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500974 # Calculate task weights
975 # Check of higher length circular dependencies
976 self.runq_weight = self.calculate_task_weights(endpoints)
977
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600978 self.init_progress_reporter.next_stage()
979
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500980 # Sanity Check - Check for multiple tasks building the same provider
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600981 for mc in self.dataCaches:
982 prov_list = {}
983 seen_fn = []
984 for tid in self.runtaskentries:
985 (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
986 if taskfn in seen_fn:
987 continue
988 if mc != tidmc:
989 continue
990 seen_fn.append(taskfn)
991 for prov in self.dataCaches[mc].fn_provides[taskfn]:
992 if prov not in prov_list:
993 prov_list[prov] = [taskfn]
994 elif taskfn not in prov_list[prov]:
995 prov_list[prov].append(taskfn)
996 for prov in prov_list:
997 if len(prov_list[prov]) < 2:
998 continue
999 if prov in self.multi_provider_whitelist:
1000 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001001 seen_pn = []
1002 # If two versions of the same PN are being built its fatal, we don't support it.
1003 for fn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001004 pn = self.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001005 if pn not in seen_pn:
1006 seen_pn.append(pn)
1007 else:
1008 bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001009 msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
1010 #
1011 # Construct a list of things which uniquely depend on each provider
1012 # since this may help the user figure out which dependency is triggering this warning
1013 #
1014 msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
1015 deplist = {}
1016 commondeps = None
1017 for provfn in prov_list[prov]:
1018 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001019 for tid in self.runtaskentries:
1020 fn = fn_from_tid(tid)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001021 if fn != provfn:
1022 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001023 for dep in self.runtaskentries[tid].revdeps:
1024 fn = fn_from_tid(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001025 if fn == provfn:
1026 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001027 deps.add(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001028 if not commondeps:
1029 commondeps = set(deps)
1030 else:
1031 commondeps &= deps
1032 deplist[provfn] = deps
1033 for provfn in deplist:
1034 msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
1035 #
1036 # Construct a list of provides and runtime providers for each recipe
1037 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
1038 #
1039 msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
1040 provide_results = {}
1041 rprovide_results = {}
1042 commonprovs = None
1043 commonrprovs = None
1044 for provfn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001045 provides = set(self.dataCaches[mc].fn_provides[provfn])
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001046 rprovides = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001047 for rprovide in self.dataCaches[mc].rproviders:
1048 if provfn in self.dataCaches[mc].rproviders[rprovide]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001049 rprovides.add(rprovide)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001050 for package in self.dataCaches[mc].packages:
1051 if provfn in self.dataCaches[mc].packages[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001052 rprovides.add(package)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001053 for package in self.dataCaches[mc].packages_dynamic:
1054 if provfn in self.dataCaches[mc].packages_dynamic[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001055 rprovides.add(package)
1056 if not commonprovs:
1057 commonprovs = set(provides)
1058 else:
1059 commonprovs &= provides
1060 provide_results[provfn] = provides
1061 if not commonrprovs:
1062 commonrprovs = set(rprovides)
1063 else:
1064 commonrprovs &= rprovides
1065 rprovide_results[provfn] = rprovides
1066 #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
1067 #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
1068 for provfn in prov_list[prov]:
1069 msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
1070 msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
1071
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001072 if self.warn_multi_bb:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001073 logger.verbnote(msg)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001074 else:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001075 logger.error(msg)
1076
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001077 self.init_progress_reporter.next_stage()
1078
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001079 # Create a whitelist usable by the stamp checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001080 self.stampfnwhitelist = {}
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001081 for mc in self.taskData:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001082 self.stampfnwhitelist[mc] = []
1083 for entry in self.stampwhitelist.split():
1084 if entry not in self.taskData[mc].build_targets:
1085 continue
1086 fn = self.taskData.build_targets[entry][0]
1087 self.stampfnwhitelist[mc].append(fn)
1088
1089 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001090
1091 # Iterate over the task list looking for tasks with a 'setscene' function
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001092 self.runq_setscene_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001093 if not self.cooker.configuration.nosetscene:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001094 for tid in self.runtaskentries:
1095 (mc, fn, taskname, _) = split_tid_mcfn(tid)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001096 setscenetid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001097 if setscenetid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001098 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001099 self.runq_setscene_tids.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001100
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001101 def invalidate_task(tid, error_nostamp):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001102 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1103 taskdep = self.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001104 if fn + ":" + taskname not in taskData[mc].taskentries:
1105 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001106 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1107 if error_nostamp:
1108 bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
1109 else:
1110 bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
1111 else:
1112 logger.verbose("Invalidate task %s, %s", taskname, fn)
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001113 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001114
1115 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001116
1117 # Invalidate task if force mode active
1118 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001119 for tid in self.target_tids:
1120 invalidate_task(tid, False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001121
1122 # Invalidate task if invalidate mode active
1123 if self.cooker.configuration.invalidate_stamp:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001124 for tid in self.target_tids:
1125 fn = fn_from_tid(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001126 for st in self.cooker.configuration.invalidate_stamp.split(','):
1127 if not st.startswith("do_"):
1128 st = "do_%s" % st
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001129 invalidate_task(fn + ":" + st, True)
1130
1131 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001132
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001133 # Create and print to the logs a virtual/xxxx -> PN (fn) table
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001134 for mc in taskData:
1135 virtmap = taskData[mc].get_providermap(prefix="virtual/")
1136 virtpnmap = {}
1137 for v in virtmap:
1138 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
1139 bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
1140 if hasattr(bb.parse.siggen, "tasks_resolved"):
1141 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
1142
1143 self.init_progress_reporter.next_stage()
Patrick Williamsf1e5d692016-03-30 15:21:19 -05001144
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001145 # Iterate over the task list and call into the siggen code
1146 dealtwith = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001147 todeal = set(self.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001148 while len(todeal) > 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001149 for tid in todeal.copy():
1150 if len(self.runtaskentries[tid].depends - dealtwith) == 0:
1151 dealtwith.add(tid)
1152 todeal.remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001153 procdep = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001154 for dep in self.runtaskentries[tid].depends:
1155 procdep.append(fn_from_tid(dep) + "." + taskname_from_tid(dep))
1156 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1157 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(taskfn, taskname, procdep, self.dataCaches[mc])
1158 task = self.runtaskentries[tid].task
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001159
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001160 bb.parse.siggen.writeout_file_checksum_cache()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001161
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001162 #self.dump_data()
1163 return len(self.runtaskentries)
1164
1165 def dump_data(self):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001166 """
1167 Dump some debug information on the internal data structures
1168 """
1169 logger.debug(3, "run_tasks:")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001170 for tid in self.runtaskentries:
1171 logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
1172 self.runtaskentries[tid].weight,
1173 self.runtaskentries[tid].depends,
1174 self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001175
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001176class RunQueueWorker():
1177 def __init__(self, process, pipe):
1178 self.process = process
1179 self.pipe = pipe
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001180
1181class RunQueue:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001182 def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001183
1184 self.cooker = cooker
1185 self.cfgData = cfgData
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001186 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001187
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001188 self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY") or "perfile"
1189 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION") or None
1190 self.setsceneverify = cfgData.getVar("BB_SETSCENE_VERIFY_FUNCTION2") or None
1191 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID") or None
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001192
1193 self.state = runQueuePrepare
1194
1195 # For disk space monitor
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001196 # Invoked at regular time intervals via the bitbake heartbeat event
1197 # while the build is running. We generate a unique name for the handler
1198 # here, just in case that there ever is more than one RunQueue instance,
1199 # start the handler when reaching runQueueSceneRun, and stop it when
1200 # done with the build.
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001201 self.dm = monitordisk.diskMonitor(cfgData)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001202 self.dm_event_handler_name = '_bb_diskmonitor_' + str(id(self))
1203 self.dm_event_handler_registered = False
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001204 self.rqexe = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001205 self.worker = {}
1206 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001207
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001208 def _start_worker(self, mc, fakeroot = False, rqexec = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001209 logger.debug(1, "Starting bitbake-worker")
1210 magic = "decafbad"
1211 if self.cooker.configuration.profile:
1212 magic = "decafbadbad"
1213 if fakeroot:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001214 magic = magic + "beef"
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001215 mcdata = self.cooker.databuilder.mcdata[mc]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001216 fakerootcmd = mcdata.getVar("FAKEROOTCMD")
1217 fakerootenv = (mcdata.getVar("FAKEROOTBASEENV") or "").split()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001218 env = os.environ.copy()
1219 for key, value in (var.split('=') for var in fakerootenv):
1220 env[key] = value
1221 worker = subprocess.Popen([fakerootcmd, "bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
1222 else:
1223 worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
1224 bb.utils.nonblockingfd(worker.stdout)
1225 workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
1226
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001227 runqhash = {}
1228 for tid in self.rqdata.runtaskentries:
1229 runqhash[tid] = self.rqdata.runtaskentries[tid].hash
1230
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001231 workerdata = {
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001232 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1233 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1234 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1235 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001236 "sigdata" : bb.parse.siggen.get_taskdata(),
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001237 "runq_hash" : runqhash,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001238 "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
1239 "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
1240 "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
1241 "logdefaultdomain" : bb.msg.loggerDefaultDomains,
1242 "prhost" : self.cooker.prhost,
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001243 "buildname" : self.cfgData.getVar("BUILDNAME"),
1244 "date" : self.cfgData.getVar("DATE"),
1245 "time" : self.cfgData.getVar("TIME"),
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001246 }
1247
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001248 worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001249 worker.stdin.write(b"<extraconfigdata>" + pickle.dumps(self.cooker.extraconfigdata) + b"</extraconfigdata>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001250 worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001251 worker.stdin.flush()
1252
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001253 return RunQueueWorker(worker, workerpipe)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001254
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001255 def _teardown_worker(self, worker):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001256 if not worker:
1257 return
1258 logger.debug(1, "Teardown for bitbake-worker")
1259 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001260 worker.process.stdin.write(b"<quit></quit>")
1261 worker.process.stdin.flush()
1262 worker.process.stdin.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001263 except IOError:
1264 pass
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001265 while worker.process.returncode is None:
1266 worker.pipe.read()
1267 worker.process.poll()
1268 while worker.pipe.read():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001269 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001270 worker.pipe.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001271
1272 def start_worker(self):
1273 if self.worker:
1274 self.teardown_workers()
1275 self.teardown = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001276 for mc in self.rqdata.dataCaches:
1277 self.worker[mc] = self._start_worker(mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001278
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001279 def start_fakeworker(self, rqexec, mc):
1280 if not mc in self.fakeworker:
1281 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001282
1283 def teardown_workers(self):
1284 self.teardown = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001285 for mc in self.worker:
1286 self._teardown_worker(self.worker[mc])
1287 self.worker = {}
1288 for mc in self.fakeworker:
1289 self._teardown_worker(self.fakeworker[mc])
1290 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001291
1292 def read_workers(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001293 for mc in self.worker:
1294 self.worker[mc].pipe.read()
1295 for mc in self.fakeworker:
1296 self.fakeworker[mc].pipe.read()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001297
1298 def active_fds(self):
1299 fds = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001300 for mc in self.worker:
1301 fds.append(self.worker[mc].pipe.input)
1302 for mc in self.fakeworker:
1303 fds.append(self.fakeworker[mc].pipe.input)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001304 return fds
1305
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001306 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001307 def get_timestamp(f):
1308 try:
1309 if not os.access(f, os.F_OK):
1310 return None
1311 return os.stat(f)[stat.ST_MTIME]
1312 except:
1313 return None
1314
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001315 (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
1316 if taskname is None:
1317 taskname = tn
1318
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001319 if self.stamppolicy == "perfile":
1320 fulldeptree = False
1321 else:
1322 fulldeptree = True
1323 stampwhitelist = []
1324 if self.stamppolicy == "whitelist":
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001325 stampwhitelist = self.rqdata.stampfnwhitelist[mc]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001326
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001327 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001328
1329 # If the stamp is missing, it's not current
1330 if not os.access(stampfile, os.F_OK):
1331 logger.debug(2, "Stampfile %s not available", stampfile)
1332 return False
1333 # If it's a 'nostamp' task, it's not current
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001334 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001335 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1336 logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
1337 return False
1338
1339 if taskname != "do_setscene" and taskname.endswith("_setscene"):
1340 return True
1341
1342 if cache is None:
1343 cache = {}
1344
1345 iscurrent = True
1346 t1 = get_timestamp(stampfile)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001347 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001348 if iscurrent:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001349 (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
1350 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1351 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001352 t2 = get_timestamp(stampfile2)
1353 t3 = get_timestamp(stampfile3)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001354 if t3 and not t2:
1355 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001356 if t3 and t3 > t2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001357 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001358 if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
1359 if not t2:
1360 logger.debug(2, 'Stampfile %s does not exist', stampfile2)
1361 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001362 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001363 if t1 < t2:
1364 logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
1365 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001366 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001367 if recurse and iscurrent:
1368 if dep in cache:
1369 iscurrent = cache[dep]
1370 if not iscurrent:
1371 logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
1372 else:
1373 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1374 cache[dep] = iscurrent
1375 if recurse:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001376 cache[tid] = iscurrent
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001377 return iscurrent
1378
1379 def _execute_runqueue(self):
1380 """
1381 Run the tasks in a queue prepared by rqdata.prepare()
1382 Upon failure, optionally try to recover the build using any alternate providers
1383 (if the abort on failure configuration option isn't set)
1384 """
1385
1386 retval = True
1387
1388 if self.state is runQueuePrepare:
1389 self.rqexe = RunQueueExecuteDummy(self)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001390 # NOTE: if you add, remove or significantly refactor the stages of this
1391 # process then you should recalculate the weightings here. This is quite
1392 # easy to do - just change the next line temporarily to pass debug=True as
1393 # the last parameter and you'll get a printout of the weightings as well
1394 # as a map to the lines where next_stage() was called. Of course this isn't
1395 # critical, but it helps to keep the progress reporting accurate.
1396 self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
1397 "Initialising tasks",
1398 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001399 if self.rqdata.prepare() == 0:
1400 self.state = runQueueComplete
1401 else:
1402 self.state = runQueueSceneInit
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001403 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001404
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001405 # we are ready to run, emit dependency info to any UI or class which
1406 # needs it
1407 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1408 self.rqdata.init_progress_reporter.next_stage()
1409 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001410
1411 if self.state is runQueueSceneInit:
Brad Bishope2d5b612018-11-23 10:55:50 +13001412 if not self.dm_event_handler_registered:
1413 res = bb.event.register(self.dm_event_handler_name,
1414 lambda x: self.dm.check(self) if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp] else False,
1415 ('bb.event.HeartbeatEvent',))
1416 self.dm_event_handler_registered = True
1417
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001418 dump = self.cooker.configuration.dump_signatures
1419 if dump:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001420 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001421 if 'printdiff' in dump:
1422 invalidtasks = self.print_diffscenetasks()
1423 self.dump_signatures(dump)
1424 if 'printdiff' in dump:
1425 self.write_diffscenetasks(invalidtasks)
1426 self.state = runQueueComplete
1427 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001428 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001429 self.start_worker()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001430 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001431 self.rqexe = RunQueueExecuteScenequeue(self)
1432
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001433 if self.state is runQueueSceneRun:
1434 retval = self.rqexe.execute()
1435
1436 if self.state is runQueueRunInit:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001437 if self.cooker.configuration.setsceneonly:
1438 self.state = runQueueComplete
1439 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001440 # Just in case we didn't setscene
1441 self.rqdata.init_progress_reporter.finish()
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001442 logger.info("Executing RunQueue Tasks")
1443 self.rqexe = RunQueueExecuteTasks(self)
1444 self.state = runQueueRunning
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001445
1446 if self.state is runQueueRunning:
1447 retval = self.rqexe.execute()
1448
1449 if self.state is runQueueCleanUp:
1450 retval = self.rqexe.finish()
1451
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001452 build_done = self.state is runQueueComplete or self.state is runQueueFailed
1453
1454 if build_done and self.dm_event_handler_registered:
1455 bb.event.remove(self.dm_event_handler_name, None)
1456 self.dm_event_handler_registered = False
1457
1458 if build_done and self.rqexe:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001459 self.teardown_workers()
1460 if self.rqexe.stats.failed:
1461 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
1462 else:
1463 # Let's avoid the word "failed" if nothing actually did
1464 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
1465
1466 if self.state is runQueueFailed:
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001467 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001468
1469 if self.state is runQueueComplete:
1470 # All done
1471 return False
1472
1473 # Loop
1474 return retval
1475
1476 def execute_runqueue(self):
1477 # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
1478 try:
1479 return self._execute_runqueue()
1480 except bb.runqueue.TaskFailure:
1481 raise
1482 except SystemExit:
1483 raise
1484 except bb.BBHandledException:
1485 try:
1486 self.teardown_workers()
1487 except:
1488 pass
1489 self.state = runQueueComplete
1490 raise
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001491 except Exception as err:
1492 logger.exception("An uncaught exception occurred in runqueue")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001493 try:
1494 self.teardown_workers()
1495 except:
1496 pass
1497 self.state = runQueueComplete
1498 raise
1499
1500 def finish_runqueue(self, now = False):
1501 if not self.rqexe:
1502 self.state = runQueueComplete
1503 return
1504
1505 if now:
1506 self.rqexe.finish_now()
1507 else:
1508 self.rqexe.finish()
1509
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001510 def rq_dump_sigfn(self, fn, options):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001511 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001512 the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
1513 siggen = bb.parse.siggen
1514 dataCaches = self.rqdata.dataCaches
1515 siggen.dump_sigfn(fn, dataCaches, options)
1516
1517 def dump_signatures(self, options):
1518 fns = set()
1519 bb.note("Reparsing files to collect dependency data")
1520
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001521 for tid in self.rqdata.runtaskentries:
1522 fn = fn_from_tid(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001523 fns.add(fn)
1524
1525 max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
1526 # We cannot use the real multiprocessing.Pool easily due to some local data
1527 # that can't be pickled. This is a cheap multi-process solution.
1528 launched = []
1529 while fns:
1530 if len(launched) < max_process:
1531 p = Process(target=self.rq_dump_sigfn, args=(fns.pop(), options))
1532 p.start()
1533 launched.append(p)
1534 for q in launched:
1535 # The finished processes are joined when calling is_alive()
1536 if not q.is_alive():
1537 launched.remove(q)
1538 for p in launched:
1539 p.join()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001540
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001541 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001542
1543 return
1544
1545 def print_diffscenetasks(self):
1546
1547 valid = []
1548 sq_hash = []
1549 sq_hashfn = []
1550 sq_fn = []
1551 sq_taskname = []
1552 sq_task = []
1553 noexec = []
1554 stamppresent = []
1555 valid_new = set()
1556
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001557 for tid in self.rqdata.runtaskentries:
1558 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1559 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001560
1561 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001562 noexec.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001563 continue
1564
1565 sq_fn.append(fn)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001566 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001567 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001568 sq_taskname.append(taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001569 sq_task.append(tid)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001570 locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001571 try:
1572 call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=True)"
1573 valid = bb.utils.better_eval(call, locs)
1574 # Handle version with no siginfo parameter
1575 except TypeError:
1576 call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
1577 valid = bb.utils.better_eval(call, locs)
1578 for v in valid:
1579 valid_new.add(sq_task[v])
1580
1581 # Tasks which are both setscene and noexec never care about dependencies
1582 # We therefore find tasks which are setscene and noexec and mark their
1583 # unique dependencies as valid.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001584 for tid in noexec:
1585 if tid not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001586 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001587 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001588 hasnoexecparents = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001589 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1590 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001591 continue
1592 hasnoexecparents = False
1593 break
1594 if hasnoexecparents:
1595 valid_new.add(dep)
1596
1597 invalidtasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001598 for tid in self.rqdata.runtaskentries:
1599 if tid not in valid_new and tid not in noexec:
1600 invalidtasks.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001601
1602 found = set()
1603 processed = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001604 for tid in invalidtasks:
1605 toprocess = set([tid])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001606 while toprocess:
1607 next = set()
1608 for t in toprocess:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001609 for dep in self.rqdata.runtaskentries[t].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001610 if dep in invalidtasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001611 found.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001612 if dep not in processed:
1613 processed.add(dep)
1614 next.add(dep)
1615 toprocess = next
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001616 if tid in found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001617 toprocess = set()
1618
1619 tasklist = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001620 for tid in invalidtasks.difference(found):
1621 tasklist.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001622
1623 if tasklist:
1624 bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
1625
1626 return invalidtasks.difference(found)
1627
1628 def write_diffscenetasks(self, invalidtasks):
1629
1630 # Define recursion callback
1631 def recursecb(key, hash1, hash2):
1632 hashes = [hash1, hash2]
1633 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1634
1635 recout = []
1636 if len(hashfiles) == 2:
1637 out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
1638 recout.extend(list(' ' + l for l in out2))
1639 else:
1640 recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
1641
1642 return recout
1643
1644
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001645 for tid in invalidtasks:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001646 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1647 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001648 h = self.rqdata.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001649 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
1650 match = None
1651 for m in matches:
1652 if h in m:
1653 match = m
1654 if match is None:
1655 bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001656 matches = {k : v for k, v in iter(matches.items()) if h not in k}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001657 if matches:
1658 latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
1659 prevh = __find_md5__.search(latestmatch).group(0)
1660 output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
1661 bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
1662
1663class RunQueueExecute:
1664
1665 def __init__(self, rq):
1666 self.rq = rq
1667 self.cooker = rq.cooker
1668 self.cfgData = rq.cfgData
1669 self.rqdata = rq.rqdata
1670
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001671 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
1672 self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001673
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001674 self.runq_buildable = set()
1675 self.runq_running = set()
1676 self.runq_complete = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001677
1678 self.build_stamps = {}
1679 self.build_stamps2 = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001680 self.failed_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001681
1682 self.stampcache = {}
1683
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001684 for mc in rq.worker:
1685 rq.worker[mc].pipe.setrunqueueexec(self)
1686 for mc in rq.fakeworker:
1687 rq.fakeworker[mc].pipe.setrunqueueexec(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001688
1689 if self.number_tasks <= 0:
1690 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1691
1692 def runqueue_process_waitpid(self, task, status):
1693
1694 # self.build_stamps[pid] may not exist when use shared work directory.
1695 if task in self.build_stamps:
1696 self.build_stamps2.remove(self.build_stamps[task])
1697 del self.build_stamps[task]
1698
1699 if status != 0:
1700 self.task_fail(task, status)
1701 else:
1702 self.task_complete(task)
1703 return True
1704
1705 def finish_now(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001706 for mc in self.rq.worker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001707 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001708 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1709 self.rq.worker[mc].process.stdin.flush()
1710 except IOError:
1711 # worker must have died?
1712 pass
1713 for mc in self.rq.fakeworker:
1714 try:
1715 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1716 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001717 except IOError:
1718 # worker must have died?
1719 pass
1720
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001721 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001722 self.rq.state = runQueueFailed
1723 return
1724
1725 self.rq.state = runQueueComplete
1726 return
1727
1728 def finish(self):
1729 self.rq.state = runQueueCleanUp
1730
1731 if self.stats.active > 0:
1732 bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData)
1733 self.rq.read_workers()
1734 return self.rq.active_fds()
1735
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001736 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001737 self.rq.state = runQueueFailed
1738 return True
1739
1740 self.rq.state = runQueueComplete
1741 return True
1742
1743 def check_dependencies(self, task, taskdeps, setscene = False):
1744 if not self.rq.depvalidate:
1745 return False
1746
1747 taskdata = {}
1748 taskdeps.add(task)
1749 for dep in taskdeps:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001750 (mc, fn, taskname, taskfn) = split_tid_mcfn(dep)
1751 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001752 taskdata[dep] = [pn, taskname, fn]
1753 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001754 locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001755 valid = bb.utils.better_eval(call, locs)
1756 return valid
1757
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001758 def can_start_task(self):
1759 can_start = self.stats.active < self.number_tasks
1760 return can_start
1761
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001762class RunQueueExecuteDummy(RunQueueExecute):
1763 def __init__(self, rq):
1764 self.rq = rq
1765 self.stats = RunQueueStats(0)
1766
1767 def finish(self):
1768 self.rq.state = runQueueComplete
1769 return
1770
1771class RunQueueExecuteTasks(RunQueueExecute):
1772 def __init__(self, rq):
1773 RunQueueExecute.__init__(self, rq)
1774
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001775 self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001776
1777 self.stampcache = {}
1778
1779 initial_covered = self.rq.scenequeue_covered.copy()
1780
1781 # Mark initial buildable tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001782 for tid in self.rqdata.runtaskentries:
1783 if len(self.rqdata.runtaskentries[tid].depends) == 0:
1784 self.runq_buildable.add(tid)
1785 if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
1786 self.rq.scenequeue_covered.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001787
1788 found = True
1789 while found:
1790 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001791 for tid in self.rqdata.runtaskentries:
1792 if tid in self.rq.scenequeue_covered:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001793 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001794 logger.debug(1, 'Considering %s: %s' % (tid, str(self.rqdata.runtaskentries[tid].revdeps)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001795
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001796 if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
1797 if tid in self.rq.scenequeue_notcovered:
1798 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001799 found = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001800 self.rq.scenequeue_covered.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001801
1802 logger.debug(1, 'Skip list (pre setsceneverify) %s', sorted(self.rq.scenequeue_covered))
1803
1804 # Allow the metadata to elect for setscene tasks to run anyway
1805 covered_remove = set()
1806 if self.rq.setsceneverify:
1807 invalidtasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001808 tasknames = {}
1809 fns = {}
1810 for tid in self.rqdata.runtaskentries:
1811 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1812 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
1813 fns[tid] = taskfn
1814 tasknames[tid] = taskname
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001815 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1816 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001817 if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
1818 logger.debug(2, 'Setscene stamp current for task %s', tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001819 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001820 if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
1821 logger.debug(2, 'Normal stamp current for task %s', tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001822 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001823 invalidtasks.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001824
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001825 call = self.rq.setsceneverify + "(covered, tasknames, fns, d, invalidtasks=invalidtasks)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001826 locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : tasknames, "fns" : fns, "d" : self.cooker.data, "invalidtasks" : invalidtasks }
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001827 covered_remove = bb.utils.better_eval(call, locs)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001828
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001829 def removecoveredtask(tid):
1830 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1831 taskname = taskname + '_setscene'
1832 bb.build.del_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
1833 self.rq.scenequeue_covered.remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001834
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001835 toremove = covered_remove | self.rq.scenequeue_notcovered
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001836 for task in toremove:
1837 logger.debug(1, 'Not skipping task %s due to setsceneverify', task)
1838 while toremove:
1839 covered_remove = []
1840 for task in toremove:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001841 if task in self.rq.scenequeue_covered:
1842 removecoveredtask(task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001843 for deptask in self.rqdata.runtaskentries[task].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001844 if deptask not in self.rq.scenequeue_covered:
1845 continue
1846 if deptask in toremove or deptask in covered_remove or deptask in initial_covered:
1847 continue
1848 logger.debug(1, 'Task %s depends on task %s so not skipping' % (task, deptask))
1849 covered_remove.append(deptask)
1850 toremove = covered_remove
1851
1852 logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered)
1853
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001854
1855 for mc in self.rqdata.dataCaches:
1856 target_pairs = []
1857 for tid in self.rqdata.target_tids:
1858 (tidmc, fn, taskname, _) = split_tid_mcfn(tid)
1859 if tidmc == mc:
1860 target_pairs.append((fn, taskname))
1861
1862 event.fire(bb.event.StampUpdate(target_pairs, self.rqdata.dataCaches[mc].stamp), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001863
1864 schedulers = self.get_schedulers()
1865 for scheduler in schedulers:
1866 if self.scheduler == scheduler.name:
1867 self.sched = scheduler(self, self.rqdata)
1868 logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
1869 break
1870 else:
1871 bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
1872 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1873
1874 def get_schedulers(self):
1875 schedulers = set(obj for obj in globals().values()
1876 if type(obj) is type and
1877 issubclass(obj, RunQueueScheduler))
1878
Brad Bishop6e60e8b2018-02-01 10:27:11 -05001879 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001880 if user_schedulers:
1881 for sched in user_schedulers.split():
1882 if not "." in sched:
1883 bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
1884 continue
1885
1886 modname, name = sched.rsplit(".", 1)
1887 try:
1888 module = __import__(modname, fromlist=(name,))
1889 except ImportError as exc:
1890 logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
1891 raise SystemExit(1)
1892 else:
1893 schedulers.add(getattr(module, name))
1894 return schedulers
1895
1896 def setbuildable(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001897 self.runq_buildable.add(task)
Brad Bishop316dfdd2018-06-25 12:45:53 -04001898 self.sched.newbuildable(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001899
1900 def task_completeoutright(self, task):
1901 """
1902 Mark a task as completed
1903 Look at the reverse dependencies and mark any task with
1904 completed dependencies as buildable
1905 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001906 self.runq_complete.add(task)
1907 for revdep in self.rqdata.runtaskentries[task].revdeps:
1908 if revdep in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001909 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001910 if revdep in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001911 continue
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001912 alldeps = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001913 for dep in self.rqdata.runtaskentries[revdep].depends:
1914 if dep not in self.runq_complete:
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001915 alldeps = False
1916 break
1917 if alldeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001918 self.setbuildable(revdep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001919 logger.debug(1, "Marking task %s as buildable", revdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001920
1921 def task_complete(self, task):
1922 self.stats.taskCompleted()
1923 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1924 self.task_completeoutright(task)
1925
1926 def task_fail(self, task, exitcode):
1927 """
1928 Called when a task has failed
1929 Updates the state engine with the failure
1930 """
1931 self.stats.taskFailed()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001932 self.failed_tids.append(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001933 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001934 if self.rqdata.taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001935 self.rq.state = runQueueCleanUp
1936
1937 def task_skip(self, task, reason):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001938 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001939 self.setbuildable(task)
1940 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
1941 self.task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001942 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08001943 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001944
1945 def execute(self):
1946 """
1947 Run the tasks in a queue prepared by rqdata.prepare()
1948 """
1949
Brad Bishopd7bf8c12018-02-25 22:55:05 -05001950 if self.rqdata.setscenewhitelist is not None and not self.rqdata.setscenewhitelist_checked:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001951 self.rqdata.setscenewhitelist_checked = True
1952
1953 # Check tasks that are going to run against the whitelist
1954 def check_norun_task(tid, showerror=False):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001955 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001956 # Ignore covered tasks
1957 if tid in self.rq.scenequeue_covered:
1958 return False
1959 # Ignore stamped tasks
1960 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
1961 return False
1962 # Ignore noexec tasks
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001963 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001964 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1965 return False
1966
Brad Bishop37a0e4d2017-12-04 01:01:44 -05001967 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001968 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
1969 if showerror:
1970 if tid in self.rqdata.runq_setscene_tids:
1971 logger.error('Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname))
1972 else:
1973 logger.error('Task %s.%s attempted to execute unexpectedly' % (pn, taskname))
1974 return True
1975 return False
1976 # Look to see if any tasks that we think shouldn't run are going to
1977 unexpected = False
1978 for tid in self.rqdata.runtaskentries:
1979 if check_norun_task(tid):
1980 unexpected = True
1981 break
1982 if unexpected:
1983 # Run through the tasks in the rough order they'd have executed and print errors
1984 # (since the order can be useful - usually missing sstate for the last few tasks
1985 # is the cause of the problem)
1986 task = self.sched.next()
1987 while task is not None:
1988 check_norun_task(task, showerror=True)
1989 self.task_skip(task, 'Setscene enforcement check')
1990 task = self.sched.next()
1991
1992 self.rq.state = runQueueCleanUp
1993 return True
1994
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001995 self.rq.read_workers()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001996
1997 if self.stats.total == 0:
1998 # nothing to do
1999 self.rq.state = runQueueCleanUp
2000
2001 task = self.sched.next()
2002 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002003 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002004
2005 if task in self.rq.scenequeue_covered:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002006 logger.debug(2, "Setscene covered task %s", task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002007 self.task_skip(task, "covered")
2008 return True
2009
2010 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002011 logger.debug(2, "Stamp current task %s", task)
2012
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002013 self.task_skip(task, "existing")
2014 return True
2015
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002016 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002017 if 'noexec' in taskdep and taskname in taskdep['noexec']:
2018 startevent = runQueueTaskStarted(task, self.stats, self.rq,
2019 noexec=True)
2020 bb.event.fire(startevent, self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002021 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002022 self.stats.taskActive()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002023 if not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002024 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002025 self.task_complete(task)
2026 return True
2027 else:
2028 startevent = runQueueTaskStarted(task, self.stats, self.rq)
2029 bb.event.fire(startevent, self.cfgData)
2030
2031 taskdepdata = self.build_taskdepdata(task)
2032
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002033 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002034 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not (self.cooker.configuration.dry_run or self.rqdata.setscene_enforce):
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002035 if not mc in self.rq.fakeworker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002036 try:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002037 self.rq.start_fakeworker(self, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002038 except OSError as exc:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002039 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002040 self.rq.state = runQueueFailed
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002041 self.stats.taskFailed()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002042 return True
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002043 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002044 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002045 else:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002046 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata, self.rqdata.setscene_enforce)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002047 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002048
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002049 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2050 self.build_stamps2.append(self.build_stamps[task])
2051 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002052 self.stats.taskActive()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002053 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002054 return True
2055
2056 if self.stats.active > 0:
2057 self.rq.read_workers()
2058 return self.rq.active_fds()
2059
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002060 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002061 self.rq.state = runQueueFailed
2062 return True
2063
2064 # Sanity Checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002065 for task in self.rqdata.runtaskentries:
2066 if task not in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002067 logger.error("Task %s never buildable!", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002068 if task not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002069 logger.error("Task %s never ran!", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002070 if task not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002071 logger.error("Task %s never completed!", task)
2072 self.rq.state = runQueueComplete
2073
2074 return True
2075
2076 def build_taskdepdata(self, task):
2077 taskdepdata = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002078 next = self.rqdata.runtaskentries[task].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002079 next.add(task)
2080 while next:
2081 additional = []
2082 for revdep in next:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002083 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2084 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2085 deps = self.rqdata.runtaskentries[revdep].depends
2086 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002087 taskhash = self.rqdata.runtaskentries[revdep].hash
2088 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002089 for revdep2 in deps:
2090 if revdep2 not in taskdepdata:
2091 additional.append(revdep2)
2092 next = additional
2093
2094 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2095 return taskdepdata
2096
2097class RunQueueExecuteScenequeue(RunQueueExecute):
2098 def __init__(self, rq):
2099 RunQueueExecute.__init__(self, rq)
2100
2101 self.scenequeue_covered = set()
2102 self.scenequeue_notcovered = set()
2103 self.scenequeue_notneeded = set()
2104
2105 # If we don't have any setscene functions, skip this step
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002106 if len(self.rqdata.runq_setscene_tids) == 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002107 rq.scenequeue_covered = set()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002108 rq.scenequeue_notcovered = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002109 rq.state = runQueueRunInit
2110 return
2111
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002112 self.stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002113
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002114 sq_revdeps = {}
2115 sq_revdeps_new = {}
2116 sq_revdeps_squash = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002117 self.sq_harddeps = {}
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002118 self.stamps = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002119
2120 # We need to construct a dependency graph for the setscene functions. Intermediate
2121 # dependencies between the setscene tasks only complicate the code. This code
2122 # therefore aims to collapse the huge runqueue dependency tree into a smaller one
2123 # only containing the setscene functions.
2124
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002125 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002126
2127 # First process the chains up to the first setscene task.
2128 endpoints = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002129 for tid in self.rqdata.runtaskentries:
2130 sq_revdeps[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
2131 sq_revdeps_new[tid] = set()
2132 if (len(sq_revdeps[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
2133 #bb.warn("Added endpoint %s" % (tid))
2134 endpoints[tid] = set()
2135
2136 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002137
2138 # Secondly process the chains between setscene tasks.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002139 for tid in self.rqdata.runq_setscene_tids:
2140 #bb.warn("Added endpoint 2 %s" % (tid))
2141 for dep in self.rqdata.runtaskentries[tid].depends:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002142 if tid in sq_revdeps[dep]:
2143 sq_revdeps[dep].remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002144 if dep not in endpoints:
2145 endpoints[dep] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002146 #bb.warn(" Added endpoint 3 %s" % (dep))
2147 endpoints[dep].add(tid)
2148
2149 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002150
2151 def process_endpoints(endpoints):
2152 newendpoints = {}
2153 for point, task in endpoints.items():
2154 tasks = set()
2155 if task:
2156 tasks |= task
2157 if sq_revdeps_new[point]:
2158 tasks |= sq_revdeps_new[point]
2159 sq_revdeps_new[point] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002160 if point in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002161 sq_revdeps_new[point] = tasks
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05002162 tasks = set()
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002163 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002164 for dep in self.rqdata.runtaskentries[point].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002165 if point in sq_revdeps[dep]:
2166 sq_revdeps[dep].remove(point)
2167 if tasks:
2168 sq_revdeps_new[dep] |= tasks
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002169 if len(sq_revdeps[dep]) == 0 and dep not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002170 newendpoints[dep] = task
2171 if len(newendpoints) != 0:
2172 process_endpoints(newendpoints)
2173
2174 process_endpoints(endpoints)
2175
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002176 self.rqdata.init_progress_reporter.next_stage()
2177
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002178 # Build a list of setscene tasks which are "unskippable"
2179 # These are direct endpoints referenced by the build
2180 endpoints2 = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002181 sq_revdeps2 = {}
2182 sq_revdeps_new2 = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002183 def process_endpoints2(endpoints):
2184 newendpoints = {}
2185 for point, task in endpoints.items():
2186 tasks = set([point])
2187 if task:
2188 tasks |= task
2189 if sq_revdeps_new2[point]:
2190 tasks |= sq_revdeps_new2[point]
2191 sq_revdeps_new2[point] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002192 if point in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002193 sq_revdeps_new2[point] = tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002194 for dep in self.rqdata.runtaskentries[point].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002195 if point in sq_revdeps2[dep]:
2196 sq_revdeps2[dep].remove(point)
2197 if tasks:
2198 sq_revdeps_new2[dep] |= tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002199 if (len(sq_revdeps2[dep]) == 0 or len(sq_revdeps_new2[dep]) != 0) and dep not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002200 newendpoints[dep] = tasks
2201 if len(newendpoints) != 0:
2202 process_endpoints2(newendpoints)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002203 for tid in self.rqdata.runtaskentries:
2204 sq_revdeps2[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
2205 sq_revdeps_new2[tid] = set()
2206 if (len(sq_revdeps2[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
2207 endpoints2[tid] = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002208 process_endpoints2(endpoints2)
2209 self.unskippable = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002210 for tid in self.rqdata.runq_setscene_tids:
2211 if sq_revdeps_new2[tid]:
2212 self.unskippable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002213
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002214 self.rqdata.init_progress_reporter.next_stage(len(self.rqdata.runtaskentries))
2215
2216 for taskcounter, tid in enumerate(self.rqdata.runtaskentries):
2217 if tid in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002218 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002219 for dep in sq_revdeps_new[tid]:
2220 deps.add(dep)
2221 sq_revdeps_squash[tid] = deps
2222 elif len(sq_revdeps_new[tid]) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002223 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002224 self.rqdata.init_progress_reporter.update(taskcounter)
2225
2226 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002227
2228 # Resolve setscene inter-task dependencies
2229 # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
2230 # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002231 for tid in self.rqdata.runq_setscene_tids:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002232 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2233 realtid = tid + "_setscene"
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002234 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002235 self.stamps[tid] = bb.build.stampfile(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn, noextra=True)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002236 for (depname, idependtask) in idepends:
2237
2238 if depname not in self.rqdata.taskData[mc].build_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002239 continue
2240
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002241 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2242 if depfn is None:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002243 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002244 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2245 if deptid not in self.rqdata.runtaskentries:
2246 bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002247
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002248 if not deptid in self.sq_harddeps:
2249 self.sq_harddeps[deptid] = set()
2250 self.sq_harddeps[deptid].add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002251
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002252 sq_revdeps_squash[tid].add(deptid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002253 # Have to zero this to avoid circular dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002254 sq_revdeps_squash[deptid] = set()
2255
2256 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002257
2258 for task in self.sq_harddeps:
2259 for dep in self.sq_harddeps[task]:
2260 sq_revdeps_squash[dep].add(task)
2261
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002262 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002263
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002264 #for tid in sq_revdeps_squash:
2265 # for dep in sq_revdeps_squash[tid]:
2266 # data = data + "\n %s" % dep
2267 # bb.warn("Task %s_setscene: is %s " % (tid, data
2268
2269 self.sq_deps = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002270 self.sq_revdeps = sq_revdeps_squash
2271 self.sq_revdeps2 = copy.deepcopy(self.sq_revdeps)
2272
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002273 for tid in self.sq_revdeps:
2274 self.sq_deps[tid] = set()
2275 for tid in self.sq_revdeps:
2276 for dep in self.sq_revdeps[tid]:
2277 self.sq_deps[dep].add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002278
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002279 self.rqdata.init_progress_reporter.next_stage()
2280
2281 for tid in self.sq_revdeps:
2282 if len(self.sq_revdeps[tid]) == 0:
2283 self.runq_buildable.add(tid)
2284
2285 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002286
2287 self.outrightfail = []
2288 if self.rq.hashvalidate:
2289 sq_hash = []
2290 sq_hashfn = []
2291 sq_fn = []
2292 sq_taskname = []
2293 sq_task = []
2294 noexec = []
2295 stamppresent = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002296 for tid in self.sq_revdeps:
2297 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2298
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002299 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002300
2301 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002302 noexec.append(tid)
2303 self.task_skip(tid)
2304 bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002305 continue
2306
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002307 if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
2308 logger.debug(2, 'Setscene stamp current for task %s', tid)
2309 stamppresent.append(tid)
2310 self.task_skip(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002311 continue
2312
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002313 if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
2314 logger.debug(2, 'Normal stamp current for task %s', tid)
2315 stamppresent.append(tid)
2316 self.task_skip(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002317 continue
2318
2319 sq_fn.append(fn)
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002320 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[taskfn])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002321 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002322 sq_taskname.append(taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002323 sq_task.append(tid)
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002324
2325 self.cooker.data.setVar("BB_SETSCENE_STAMPCURRENT_COUNT", len(stamppresent))
2326
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002327 call = self.rq.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002328 locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002329 valid = bb.utils.better_eval(call, locs)
2330
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002331 self.cooker.data.delVar("BB_SETSCENE_STAMPCURRENT_COUNT")
2332
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002333 valid_new = stamppresent
2334 for v in valid:
2335 valid_new.append(sq_task[v])
2336
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002337 for tid in self.sq_revdeps:
2338 if tid not in valid_new and tid not in noexec:
2339 logger.debug(2, 'No package found, so skipping setscene task %s', tid)
2340 self.outrightfail.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002341
2342 logger.info('Executing SetScene Tasks')
2343
2344 self.rq.state = runQueueSceneRun
2345
2346 def scenequeue_updatecounters(self, task, fail = False):
2347 for dep in self.sq_deps[task]:
2348 if fail and task in self.sq_harddeps and dep in self.sq_harddeps[task]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002349 logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002350 self.scenequeue_updatecounters(dep, fail)
2351 continue
2352 if task not in self.sq_revdeps2[dep]:
2353 # May already have been removed by the fail case above
2354 continue
2355 self.sq_revdeps2[dep].remove(task)
2356 if len(self.sq_revdeps2[dep]) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002357 self.runq_buildable.add(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002358
2359 def task_completeoutright(self, task):
2360 """
2361 Mark a task as completed
2362 Look at the reverse dependencies and mark any task with
2363 completed dependencies as buildable
2364 """
2365
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002366 logger.debug(1, 'Found task %s which could be accelerated', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002367 self.scenequeue_covered.add(task)
2368 self.scenequeue_updatecounters(task)
2369
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002370 def check_taskfail(self, task):
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002371 if self.rqdata.setscenewhitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002372 realtask = task.split('_setscene')[0]
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002373 (mc, fn, taskname, taskfn) = split_tid_mcfn(realtask)
2374 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002375 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2376 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2377 self.rq.state = runQueueCleanUp
2378
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002379 def task_complete(self, task):
2380 self.stats.taskCompleted()
2381 bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
2382 self.task_completeoutright(task)
2383
2384 def task_fail(self, task, result):
2385 self.stats.taskFailed()
2386 bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
2387 self.scenequeue_notcovered.add(task)
2388 self.scenequeue_updatecounters(task, True)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002389 self.check_taskfail(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002390
2391 def task_failoutright(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002392 self.runq_running.add(task)
2393 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002394 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002395 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002396 self.scenequeue_notcovered.add(task)
2397 self.scenequeue_updatecounters(task, True)
2398
2399 def task_skip(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002400 self.runq_running.add(task)
2401 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002402 self.task_completeoutright(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002403 self.stats.taskSkipped()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002404 self.stats.taskCompleted()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002405
2406 def execute(self):
2407 """
2408 Run the tasks in a queue prepared by prepare_runqueue
2409 """
2410
2411 self.rq.read_workers()
2412
2413 task = None
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002414 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002415 # Find the next setscene to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002416 for nexttask in self.rqdata.runq_setscene_tids:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002417 if nexttask in self.runq_buildable and nexttask not in self.runq_running and self.stamps[nexttask] not in self.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002418 if nexttask in self.unskippable:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002419 logger.debug(2, "Setscene task %s is unskippable" % nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002420 if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002421 fn = fn_from_tid(nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002422 foundtarget = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002423
2424 if nexttask in self.rqdata.target_tids:
2425 foundtarget = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002426 if not foundtarget:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002427 logger.debug(2, "Skipping setscene for task %s" % nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002428 self.task_skip(nexttask)
2429 self.scenequeue_notneeded.add(nexttask)
2430 return True
2431 if nexttask in self.outrightfail:
2432 self.task_failoutright(nexttask)
2433 return True
2434 task = nexttask
2435 break
2436 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002437 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
2438 taskname = taskname + "_setscene"
2439 if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
2440 logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002441 self.task_failoutright(task)
2442 return True
2443
2444 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002445 if task in self.rqdata.target_tids:
2446 self.task_failoutright(task)
2447 return True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002448
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002449 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
2450 logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002451 self.task_skip(task)
2452 return True
2453
2454 startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
2455 bb.event.fire(startevent, self.cfgData)
2456
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002457 taskdepdata = self.build_taskdepdata(task)
2458
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002459 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2460 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002461 if not mc in self.rq.fakeworker:
2462 self.rq.start_fakeworker(self, mc)
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002463 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002464 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002465 else:
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002466 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), taskdepdata, False)) + b"</runtask>")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002467 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002468
Brad Bishop37a0e4d2017-12-04 01:01:44 -05002469 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
2470 self.build_stamps2.append(self.build_stamps[task])
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002471 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002472 self.stats.taskActive()
Brad Bishop1a4b7ee2018-12-16 17:11:34 -08002473 if self.can_start_task():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002474 return True
2475
2476 if self.stats.active > 0:
2477 self.rq.read_workers()
2478 return self.rq.active_fds()
2479
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002480 #for tid in self.sq_revdeps:
2481 # if tid not in self.runq_running:
2482 # buildable = tid in self.runq_buildable
2483 # revdeps = self.sq_revdeps[tid]
2484 # bb.warn("Found we didn't run %s %s %s" % (tid, buildable, str(revdeps)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002485
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002486 self.rq.scenequeue_covered = self.scenequeue_covered
2487 self.rq.scenequeue_notcovered = self.scenequeue_notcovered
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002488
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002489 logger.debug(1, 'We can skip tasks %s', "\n".join(sorted(self.rq.scenequeue_covered)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002490
2491 self.rq.state = runQueueRunInit
2492
2493 completeevent = sceneQueueComplete(self.stats, self.rq)
2494 bb.event.fire(completeevent, self.cfgData)
2495
2496 return True
2497
2498 def runqueue_process_waitpid(self, task, status):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002499 RunQueueExecute.runqueue_process_waitpid(self, task, status)
2500
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002501
2502 def build_taskdepdata(self, task):
2503 def getsetscenedeps(tid):
2504 deps = set()
2505 (mc, fn, taskname, _) = split_tid_mcfn(tid)
2506 realtid = tid + "_setscene"
2507 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
2508 for (depname, idependtask) in idepends:
2509 if depname not in self.rqdata.taskData[mc].build_targets:
2510 continue
2511
2512 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
2513 if depfn is None:
2514 continue
2515 deptid = depfn + ":" + idependtask.replace("_setscene", "")
2516 deps.add(deptid)
2517 return deps
2518
2519 taskdepdata = {}
2520 next = getsetscenedeps(task)
2521 next.add(task)
2522 while next:
2523 additional = []
2524 for revdep in next:
2525 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
2526 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
2527 deps = getsetscenedeps(revdep)
2528 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
2529 taskhash = self.rqdata.runtaskentries[revdep].hash
2530 taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash]
2531 for revdep2 in deps:
2532 if revdep2 not in taskdepdata:
2533 additional.append(revdep2)
2534 next = additional
2535
2536 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
2537 return taskdepdata
2538
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002539class TaskFailure(Exception):
2540 """
2541 Exception raised when a task in a runqueue fails
2542 """
2543 def __init__(self, x):
2544 self.args = x
2545
2546
2547class runQueueExitWait(bb.event.Event):
2548 """
2549 Event when waiting for task processes to exit
2550 """
2551
2552 def __init__(self, remain):
2553 self.remain = remain
2554 self.message = "Waiting for %s active tasks to finish" % remain
2555 bb.event.Event.__init__(self)
2556
2557class runQueueEvent(bb.event.Event):
2558 """
2559 Base runQueue event class
2560 """
2561 def __init__(self, task, stats, rq):
2562 self.taskid = task
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002563 self.taskstring = task
2564 self.taskname = taskname_from_tid(task)
2565 self.taskfile = fn_from_tid(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002566 self.taskhash = rq.rqdata.get_task_hash(task)
2567 self.stats = stats.copy()
2568 bb.event.Event.__init__(self)
2569
2570class sceneQueueEvent(runQueueEvent):
2571 """
2572 Base sceneQueue event class
2573 """
2574 def __init__(self, task, stats, rq, noexec=False):
2575 runQueueEvent.__init__(self, task, stats, rq)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002576 self.taskstring = task + "_setscene"
2577 self.taskname = taskname_from_tid(task) + "_setscene"
2578 self.taskfile = fn_from_tid(task)
2579 self.taskhash = rq.rqdata.get_task_hash(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002580
2581class runQueueTaskStarted(runQueueEvent):
2582 """
2583 Event notifying a task was started
2584 """
2585 def __init__(self, task, stats, rq, noexec=False):
2586 runQueueEvent.__init__(self, task, stats, rq)
2587 self.noexec = noexec
2588
2589class sceneQueueTaskStarted(sceneQueueEvent):
2590 """
2591 Event notifying a setscene task was started
2592 """
2593 def __init__(self, task, stats, rq, noexec=False):
2594 sceneQueueEvent.__init__(self, task, stats, rq)
2595 self.noexec = noexec
2596
2597class runQueueTaskFailed(runQueueEvent):
2598 """
2599 Event notifying a task failed
2600 """
2601 def __init__(self, task, stats, exitcode, rq):
2602 runQueueEvent.__init__(self, task, stats, rq)
2603 self.exitcode = exitcode
2604
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002605 def __str__(self):
2606 return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
2607
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002608class sceneQueueTaskFailed(sceneQueueEvent):
2609 """
2610 Event notifying a setscene task failed
2611 """
2612 def __init__(self, task, stats, exitcode, rq):
2613 sceneQueueEvent.__init__(self, task, stats, rq)
2614 self.exitcode = exitcode
2615
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002616 def __str__(self):
2617 return "Setscene task (%s) failed with exit code '%s' - real task will be run instead" % (self.taskstring, self.exitcode)
2618
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002619class sceneQueueComplete(sceneQueueEvent):
2620 """
2621 Event when all the sceneQueue tasks are complete
2622 """
2623 def __init__(self, stats, rq):
2624 self.stats = stats.copy()
2625 bb.event.Event.__init__(self)
2626
2627class runQueueTaskCompleted(runQueueEvent):
2628 """
2629 Event notifying a task completed
2630 """
2631
2632class sceneQueueTaskCompleted(sceneQueueEvent):
2633 """
2634 Event notifying a setscene task completed
2635 """
2636
2637class runQueueTaskSkipped(runQueueEvent):
2638 """
2639 Event notifying a task was skipped
2640 """
2641 def __init__(self, task, stats, rq, reason):
2642 runQueueEvent.__init__(self, task, stats, rq)
2643 self.reason = reason
2644
2645class runQueuePipe():
2646 """
2647 Abstraction for a pipe between a worker thread and the server
2648 """
2649 def __init__(self, pipein, pipeout, d, rq, rqexec):
2650 self.input = pipein
2651 if pipeout:
2652 pipeout.close()
2653 bb.utils.nonblockingfd(self.input)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002654 self.queue = b""
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002655 self.d = d
2656 self.rq = rq
2657 self.rqexec = rqexec
2658
2659 def setrunqueueexec(self, rqexec):
2660 self.rqexec = rqexec
2661
2662 def read(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002663 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
2664 for worker in workers.values():
2665 worker.process.poll()
2666 if worker.process.returncode is not None and not self.rq.teardown:
2667 bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
2668 self.rq.finish_runqueue(True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002669
2670 start = len(self.queue)
2671 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002672 self.queue = self.queue + (self.input.read(102400) or b"")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002673 except (OSError, IOError) as e:
2674 if e.errno != errno.EAGAIN:
2675 raise
2676 end = len(self.queue)
2677 found = True
2678 while found and len(self.queue):
2679 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002680 index = self.queue.find(b"</event>")
2681 while index != -1 and self.queue.startswith(b"<event>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002682 try:
2683 event = pickle.loads(self.queue[7:index])
2684 except ValueError as e:
2685 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
2686 bb.event.fire_from_worker(event, self.d)
2687 found = True
2688 self.queue = self.queue[index+8:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002689 index = self.queue.find(b"</event>")
2690 index = self.queue.find(b"</exitcode>")
2691 while index != -1 and self.queue.startswith(b"<exitcode>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002692 try:
2693 task, status = pickle.loads(self.queue[10:index])
2694 except ValueError as e:
2695 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
2696 self.rqexec.runqueue_process_waitpid(task, status)
2697 found = True
2698 self.queue = self.queue[index+11:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002699 index = self.queue.find(b"</exitcode>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002700 return (end > start)
2701
2702 def close(self):
2703 while self.read():
2704 continue
2705 if len(self.queue) > 0:
2706 print("Warning, worker left partial message: %s" % self.queue)
2707 self.input.close()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002708
2709def get_setscene_enforce_whitelist(d):
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002710 if d.getVar('BB_SETSCENE_ENFORCE') != '1':
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002711 return None
Brad Bishop6e60e8b2018-02-01 10:27:11 -05002712 whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002713 outlist = []
2714 for item in whitelist[:]:
2715 if item.startswith('%:'):
2716 for target in sys.argv[1:]:
2717 if not target.startswith('-'):
2718 outlist.append(target.split(':')[0] + ':' + item.split(':')[1])
2719 else:
2720 outlist.append(item)
2721 return outlist
2722
2723def check_setscene_enforce_whitelist(pn, taskname, whitelist):
2724 import fnmatch
Brad Bishopd7bf8c12018-02-25 22:55:05 -05002725 if whitelist is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002726 item = '%s:%s' % (pn, taskname)
2727 for whitelist_item in whitelist:
2728 if fnmatch.fnmatch(item, whitelist_item):
2729 return True
2730 return False
2731 return True