blob: 84b268580fc2fba1a81c1867d8ca72267c126d04 [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001#!/usr/bin/env python
2# ex:ts=4:sw=4:sts=4:et
3# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
4"""
5BitBake 'RunQueue' implementation
6
7Handles preparation and execution of a queue of tasks
8"""
9
10# Copyright (C) 2006-2007 Richard Purdie
11#
12# This program is free software; you can redistribute it and/or modify
13# it under the terms of the GNU General Public License version 2 as
14# published by the Free Software Foundation.
15#
16# This program is distributed in the hope that it will be useful,
17# but WITHOUT ANY WARRANTY; without even the implied warranty of
18# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19# GNU General Public License for more details.
20#
21# You should have received a copy of the GNU General Public License along
22# with this program; if not, write to the Free Software Foundation, Inc.,
23# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24
25import copy
26import os
27import sys
28import signal
29import stat
30import fcntl
31import errno
32import logging
33import re
34import bb
35from bb import msg, data, event
36from bb import monitordisk
37import subprocess
Patrick Williamsc0f7c042017-02-23 20:41:17 -060038import pickle
Patrick Williamsc124f4f2015-09-15 14:41:29 -050039
40bblogger = logging.getLogger("BitBake")
41logger = logging.getLogger("BitBake.RunQueue")
42
43__find_md5__ = re.compile( r'(?i)(?<![a-z0-9])[a-f0-9]{32}(?![a-z0-9])' )
44
Patrick Williamsc0f7c042017-02-23 20:41:17 -060045def fn_from_tid(tid):
46 return tid.rsplit(":", 1)[0]
47
48def taskname_from_tid(tid):
49 return tid.rsplit(":", 1)[1]
50
51def split_tid(tid):
52 (mc, fn, taskname, _) = split_tid_mcfn(tid)
53 return (mc, fn, taskname)
54
55def split_tid_mcfn(tid):
56 if tid.startswith('multiconfig:'):
57 elems = tid.split(':')
58 mc = elems[1]
59 fn = ":".join(elems[2:-1])
60 taskname = elems[-1]
61 mcfn = "multiconfig:" + mc + ":" + fn
62 else:
63 tid = tid.rsplit(":", 1)
64 mc = ""
65 fn = tid[0]
66 taskname = tid[1]
67 mcfn = fn
68
69 return (mc, fn, taskname, mcfn)
70
71def build_tid(mc, fn, taskname):
72 if mc:
73 return "multiconfig:" + mc + ":" + fn + ":" + taskname
74 return fn + ":" + taskname
75
Patrick Williamsc124f4f2015-09-15 14:41:29 -050076class RunQueueStats:
77 """
78 Holds statistics on the tasks handled by the associated runQueue
79 """
80 def __init__(self, total):
81 self.completed = 0
82 self.skipped = 0
83 self.failed = 0
84 self.active = 0
85 self.total = total
86
87 def copy(self):
88 obj = self.__class__(self.total)
89 obj.__dict__.update(self.__dict__)
90 return obj
91
92 def taskFailed(self):
93 self.active = self.active - 1
94 self.failed = self.failed + 1
95
96 def taskCompleted(self, number = 1):
97 self.active = self.active - number
98 self.completed = self.completed + number
99
100 def taskSkipped(self, number = 1):
101 self.active = self.active + number
102 self.skipped = self.skipped + number
103
104 def taskActive(self):
105 self.active = self.active + 1
106
107# These values indicate the next step due to be run in the
108# runQueue state machine
109runQueuePrepare = 2
110runQueueSceneInit = 3
111runQueueSceneRun = 4
112runQueueRunInit = 5
113runQueueRunning = 6
114runQueueFailed = 7
115runQueueCleanUp = 8
116runQueueComplete = 9
117
118class RunQueueScheduler(object):
119 """
120 Control the order tasks are scheduled in.
121 """
122 name = "basic"
123
124 def __init__(self, runqueue, rqdata):
125 """
126 The default scheduler just returns the first buildable task (the
127 priority map is sorted by task number)
128 """
129 self.rq = runqueue
130 self.rqdata = rqdata
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600131 self.numTasks = len(self.rqdata.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500132
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600133 self.prio_map = [self.rqdata.runtaskentries.keys()]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500134
135 self.buildable = []
136 self.stamps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600137 for tid in self.rqdata.runtaskentries:
138 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
139 self.stamps[tid] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
140 if tid in self.rq.runq_buildable:
141 self.buildable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500142
143 self.rev_prio_map = None
144
145 def next_buildable_task(self):
146 """
147 Return the id of the first task we find that is buildable
148 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600149 self.buildable = [x for x in self.buildable if x not in self.rq.runq_running]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500150 if not self.buildable:
151 return None
152 if len(self.buildable) == 1:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600153 tid = self.buildable[0]
154 stamp = self.stamps[tid]
155 if stamp not in self.rq.build_stamps.values():
156 return tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500157
158 if not self.rev_prio_map:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600159 self.rev_prio_map = {}
160 for tid in self.rqdata.runtaskentries:
161 self.rev_prio_map[tid] = self.prio_map.index(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500162
163 best = None
164 bestprio = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600165 for tid in self.buildable:
166 prio = self.rev_prio_map[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500167 if bestprio is None or bestprio > prio:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600168 stamp = self.stamps[tid]
169 if stamp in self.rq.build_stamps.values():
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500170 continue
171 bestprio = prio
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600172 best = tid
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500173
174 return best
175
176 def next(self):
177 """
178 Return the id of the task we should build next
179 """
180 if self.rq.stats.active < self.rq.number_tasks:
181 return self.next_buildable_task()
182
183 def newbuilable(self, task):
184 self.buildable.append(task)
185
186class RunQueueSchedulerSpeed(RunQueueScheduler):
187 """
188 A scheduler optimised for speed. The priority map is sorted by task weight,
189 heavier weighted tasks (tasks needed by the most other tasks) are run first.
190 """
191 name = "speed"
192
193 def __init__(self, runqueue, rqdata):
194 """
195 The priority map is sorted by task weight.
196 """
197 RunQueueScheduler.__init__(self, runqueue, rqdata)
198
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600199 weights = {}
200 for tid in self.rqdata.runtaskentries:
201 weight = self.rqdata.runtaskentries[tid].weight
202 if not weight in weights:
203 weights[weight] = []
204 weights[weight].append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500205
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600206 self.prio_map = []
207 for weight in sorted(weights):
208 for w in weights[weight]:
209 self.prio_map.append(w)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500210
211 self.prio_map.reverse()
212
213class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
214 """
215 A scheduler optimised to complete .bb files are quickly as possible. The
216 priority map is sorted by task weight, but then reordered so once a given
217 .bb file starts to build, it's completed as quickly as possible. This works
218 well where disk space is at a premium and classes like OE's rm_work are in
219 force.
220 """
221 name = "completion"
222
223 def __init__(self, runqueue, rqdata):
224 RunQueueSchedulerSpeed.__init__(self, runqueue, rqdata)
225
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600226 #FIXME - whilst this groups all fns together it does not reorder the
227 #fn groups optimally.
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500228
229 basemap = copy.deepcopy(self.prio_map)
230 self.prio_map = []
231 while (len(basemap) > 0):
232 entry = basemap.pop(0)
233 self.prio_map.append(entry)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600234 fn = fn_from_tid(entry)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500235 todel = []
236 for entry in basemap:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600237 entry_fn = fn_from_tid(entry)
238 if entry_fn == fn:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500239 todel.append(basemap.index(entry))
240 self.prio_map.append(entry)
241 todel.reverse()
242 for idx in todel:
243 del basemap[idx]
244
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600245class RunTaskEntry(object):
246 def __init__(self):
247 self.depends = set()
248 self.revdeps = set()
249 self.hash = None
250 self.task = None
251 self.weight = 1
252
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500253class RunQueueData:
254 """
255 BitBake Run Queue implementation
256 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600257 def __init__(self, rq, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500258 self.cooker = cooker
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600259 self.dataCaches = dataCaches
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500260 self.taskData = taskData
261 self.targets = targets
262 self.rq = rq
263 self.warn_multi_bb = False
264
265 self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST", True) or ""
266 self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST", True) or "").split()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600267 self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData)
268 self.setscenewhitelist_checked = False
269 self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500270
271 self.reset()
272
273 def reset(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600274 self.runtaskentries = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500275
276 def runq_depends_names(self, ids):
277 import re
278 ret = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600279 for id in ids:
280 nam = os.path.basename(id)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500281 nam = re.sub("_[^,]*,", ",", nam)
282 ret.extend([nam])
283 return ret
284
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600285 def get_task_hash(self, tid):
286 return self.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500287
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600288 def get_user_idstring(self, tid, task_name_suffix = ""):
289 return tid + task_name_suffix
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500290
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500291 def get_short_user_idstring(self, task, task_name_suffix = ""):
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600292 (mc, fn, taskname, _) = split_tid_mcfn(task)
293 pn = self.dataCaches[mc].pkg_fn[fn]
294 taskname = taskname_from_tid(task) + task_name_suffix
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500295 return "%s:%s" % (pn, taskname)
296
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500297 def circular_depchains_handler(self, tasks):
298 """
299 Some tasks aren't buildable, likely due to circular dependency issues.
300 Identify the circular dependencies and print them in a user readable format.
301 """
302 from copy import deepcopy
303
304 valid_chains = []
305 explored_deps = {}
306 msgs = []
307
308 def chain_reorder(chain):
309 """
310 Reorder a dependency chain so the lowest task id is first
311 """
312 lowest = 0
313 new_chain = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600314 for entry in range(len(chain)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500315 if chain[entry] < chain[lowest]:
316 lowest = entry
317 new_chain.extend(chain[lowest:])
318 new_chain.extend(chain[:lowest])
319 return new_chain
320
321 def chain_compare_equal(chain1, chain2):
322 """
323 Compare two dependency chains and see if they're the same
324 """
325 if len(chain1) != len(chain2):
326 return False
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600327 for index in range(len(chain1)):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500328 if chain1[index] != chain2[index]:
329 return False
330 return True
331
332 def chain_array_contains(chain, chain_array):
333 """
334 Return True if chain_array contains chain
335 """
336 for ch in chain_array:
337 if chain_compare_equal(ch, chain):
338 return True
339 return False
340
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600341 def find_chains(tid, prev_chain):
342 prev_chain.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500343 total_deps = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600344 total_deps.extend(self.runtaskentries[tid].revdeps)
345 for revdep in self.runtaskentries[tid].revdeps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500346 if revdep in prev_chain:
347 idx = prev_chain.index(revdep)
348 # To prevent duplicates, reorder the chain to start with the lowest taskid
349 # and search through an array of those we've already printed
350 chain = prev_chain[idx:]
351 new_chain = chain_reorder(chain)
352 if not chain_array_contains(new_chain, valid_chains):
353 valid_chains.append(new_chain)
354 msgs.append("Dependency loop #%d found:\n" % len(valid_chains))
355 for dep in new_chain:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600356 msgs.append(" Task %s (dependent Tasks %s)\n" % (dep, self.runq_depends_names(self.runtaskentries[dep].depends)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500357 msgs.append("\n")
358 if len(valid_chains) > 10:
359 msgs.append("Aborted dependency loops search after 10 matches.\n")
360 return msgs
361 continue
362 scan = False
363 if revdep not in explored_deps:
364 scan = True
365 elif revdep in explored_deps[revdep]:
366 scan = True
367 else:
368 for dep in prev_chain:
369 if dep in explored_deps[revdep]:
370 scan = True
371 if scan:
372 find_chains(revdep, copy.deepcopy(prev_chain))
373 for dep in explored_deps[revdep]:
374 if dep not in total_deps:
375 total_deps.append(dep)
376
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600377 explored_deps[tid] = total_deps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500378
379 for task in tasks:
380 find_chains(task, [])
381
382 return msgs
383
384 def calculate_task_weights(self, endpoints):
385 """
386 Calculate a number representing the "weight" of each task. Heavier weighted tasks
387 have more dependencies and hence should be executed sooner for maximum speed.
388
389 This function also sanity checks the task list finding tasks that are not
390 possible to execute due to circular dependencies.
391 """
392
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600393 numTasks = len(self.runtaskentries)
394 weight = {}
395 deps_left = {}
396 task_done = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500397
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600398 for tid in self.runtaskentries:
399 task_done[tid] = False
400 weight[tid] = 1
401 deps_left[tid] = len(self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500402
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600403 for tid in endpoints:
404 weight[tid] = 10
405 task_done[tid] = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500406
407 while True:
408 next_points = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600409 for tid in endpoints:
410 for revdep in self.runtaskentries[tid].depends:
411 weight[revdep] = weight[revdep] + weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500412 deps_left[revdep] = deps_left[revdep] - 1
413 if deps_left[revdep] == 0:
414 next_points.append(revdep)
415 task_done[revdep] = True
416 endpoints = next_points
417 if len(next_points) == 0:
418 break
419
420 # Circular dependency sanity check
421 problem_tasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600422 for tid in self.runtaskentries:
423 if task_done[tid] is False or deps_left[tid] != 0:
424 problem_tasks.append(tid)
425 logger.debug(2, "Task %s is not buildable", tid)
426 logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
427 self.runtaskentries[tid].weight = weight[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500428
429 if problem_tasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600430 message = "%s unbuildable tasks were found.\n" % len(problem_tasks)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500431 message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n"
432 message = message + "Identifying dependency loops (this may take a short while)...\n"
433 logger.error(message)
434
435 msgs = self.circular_depchains_handler(problem_tasks)
436
437 message = "\n"
438 for msg in msgs:
439 message = message + msg
440 bb.msg.fatal("RunQueue", message)
441
442 return weight
443
444 def prepare(self):
445 """
446 Turn a set of taskData into a RunQueue and compute data needed
447 to optimise the execution order.
448 """
449
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600450 runq_build = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500451 recursivetasks = {}
452 recursiveitasks = {}
453 recursivetasksselfref = set()
454
455 taskData = self.taskData
456
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600457 found = False
458 for mc in self.taskData:
459 if len(taskData[mc].taskentries) > 0:
460 found = True
461 break
462 if not found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500463 # Nothing to do
464 return 0
465
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600466 self.init_progress_reporter.start()
467 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500468
469 # Step A - Work out a list of tasks to run
470 #
471 # Taskdata gives us a list of possible providers for every build and run
472 # target ordered by priority. It also gives information on each of those
473 # providers.
474 #
475 # To create the actual list of tasks to execute we fix the list of
476 # providers and then resolve the dependencies into task IDs. This
477 # process is repeated for each type of dependency (tdepends, deptask,
478 # rdeptast, recrdeptask, idepends).
479
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600480 def add_build_dependencies(depids, tasknames, depends, mc):
481 for depname in depids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500482 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600483 if depname not in taskData[mc].build_targets or not taskData[mc].build_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500484 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600485 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500486 if depdata is None:
487 continue
488 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600489 t = depdata + ":" + taskname
490 if t in taskData[mc].taskentries:
491 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500492
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600493 def add_runtime_dependencies(depids, tasknames, depends, mc):
494 for depname in depids:
495 if depname not in taskData[mc].run_targets or not taskData[mc].run_targets[depname]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500496 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600497 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500498 if depdata is None:
499 continue
500 for taskname in tasknames:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600501 t = depdata + ":" + taskname
502 if t in taskData[mc].taskentries:
503 depends.add(t)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500504
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600505 def add_resolved_dependencies(mc, fn, tasknames, depends):
506 for taskname in tasknames:
507 tid = build_tid(mc, fn, taskname)
508 if tid in self.runtaskentries:
509 depends.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500510
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600511 for mc in taskData:
512 for tid in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500513
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600514 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
515 #runtid = build_tid(mc, fn, taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500516
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600517 #logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
518
519 depends = set()
520 task_deps = self.dataCaches[mc].task_deps[taskfn]
521
522 self.runtaskentries[tid] = RunTaskEntry()
523
524 if fn in taskData[mc].failed_fns:
525 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500526
527 # Resolve task internal dependencies
528 #
529 # e.g. addtask before X after Y
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600530 for t in taskData[mc].taskentries[tid].tdepends:
531 (_, depfn, deptaskname, _) = split_tid_mcfn(t)
532 depends.add(build_tid(mc, depfn, deptaskname))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500533
534 # Resolve 'deptask' dependencies
535 #
536 # e.g. do_sometask[deptask] = "do_someothertask"
537 # (makes sure sometask runs after someothertask of all DEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600538 if 'deptask' in task_deps and taskname in task_deps['deptask']:
539 tasknames = task_deps['deptask'][taskname].split()
540 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500541
542 # Resolve 'rdeptask' dependencies
543 #
544 # e.g. do_sometask[rdeptask] = "do_someothertask"
545 # (makes sure sometask runs after someothertask of all RDEPENDS)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600546 if 'rdeptask' in task_deps and taskname in task_deps['rdeptask']:
547 tasknames = task_deps['rdeptask'][taskname].split()
548 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500549
550 # Resolve inter-task dependencies
551 #
552 # e.g. do_sometask[depends] = "targetname:do_someothertask"
553 # (makes sure sometask runs after targetname's someothertask)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600554 idepends = taskData[mc].taskentries[tid].idepends
555 for (depname, idependtask) in idepends:
556 if depname in taskData[mc].build_targets and taskData[mc].build_targets[depname] and not depname in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500557 # Won't be in build_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600558 depdata = taskData[mc].build_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500559 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600560 t = depdata + ":" + idependtask
561 depends.add(t)
562 if t not in taskData[mc].taskentries:
563 bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
564 irdepends = taskData[mc].taskentries[tid].irdepends
565 for (depname, idependtask) in irdepends:
566 if depname in taskData[mc].run_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500567 # Won't be in run_targets if ASSUME_PROVIDED
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600568 depdata = taskData[mc].run_targets[depname][0]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500569 if depdata is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600570 t = depdata + ":" + idependtask
571 depends.add(t)
572 if t not in taskData[mc].taskentries:
573 bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskname, fn, idependtask, depdata))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500574
575 # Resolve recursive 'recrdeptask' dependencies (Part A)
576 #
577 # e.g. do_sometask[recrdeptask] = "do_someothertask"
578 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
579 # We cover the recursive part of the dependencies below
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600580 if 'recrdeptask' in task_deps and taskname in task_deps['recrdeptask']:
581 tasknames = task_deps['recrdeptask'][taskname].split()
582 recursivetasks[tid] = tasknames
583 add_build_dependencies(taskData[mc].depids[taskfn], tasknames, depends, mc)
584 add_runtime_dependencies(taskData[mc].rdepids[taskfn], tasknames, depends, mc)
585 if taskname in tasknames:
586 recursivetasksselfref.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500587
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600588 if 'recideptask' in task_deps and taskname in task_deps['recideptask']:
589 recursiveitasks[tid] = []
590 for t in task_deps['recideptask'][taskname].split():
591 newdep = build_tid(mc, fn, t)
592 recursiveitasks[tid].append(newdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500593
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600594 self.runtaskentries[tid].depends = depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500595
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600596 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500597
598 # Resolve recursive 'recrdeptask' dependencies (Part B)
599 #
600 # e.g. do_sometask[recrdeptask] = "do_someothertask"
601 # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600602 # We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
603 self.init_progress_reporter.next_stage(len(recursivetasks))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500604 extradeps = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600605 for taskcounter, tid in enumerate(recursivetasks):
606 extradeps[tid] = set(self.runtaskentries[tid].depends)
607
608 tasknames = recursivetasks[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500609 seendeps = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500610
611 def generate_recdeps(t):
612 newdeps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600613 (mc, fn, taskname, _) = split_tid_mcfn(t)
614 add_resolved_dependencies(mc, fn, tasknames, newdeps)
615 extradeps[tid].update(newdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500616 seendeps.add(t)
617 newdeps.add(t)
618 for i in newdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600619 task = self.runtaskentries[i].task
620 for n in self.runtaskentries[i].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500621 if n not in seendeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600622 generate_recdeps(n)
623 generate_recdeps(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500624
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600625 if tid in recursiveitasks:
626 for dep in recursiveitasks[tid]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500627 generate_recdeps(dep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600628 self.init_progress_reporter.update(taskcounter)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500629
630 # Remove circular references so that do_a[recrdeptask] = "do_a do_b" can work
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600631 for tid in recursivetasks:
632 extradeps[tid].difference_update(recursivetasksselfref)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500633
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600634 for tid in self.runtaskentries:
635 task = self.runtaskentries[tid].task
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500636 # Add in extra dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600637 if tid in extradeps:
638 self.runtaskentries[tid].depends = extradeps[tid]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500639 # Remove all self references
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600640 if tid in self.runtaskentries[tid].depends:
641 logger.debug(2, "Task %s contains self reference!", tid)
642 self.runtaskentries[tid].depends.remove(tid)
643
644 self.init_progress_reporter.next_stage()
645
646 #self.dump_data()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500647
648 # Step B - Mark all active tasks
649 #
650 # Start with the tasks we were asked to run and mark all dependencies
651 # as active too. If the task is to be 'forced', clear its stamp. Once
652 # all active tasks are marked, prune the ones we don't need.
653
654 logger.verbose("Marking Active Tasks")
655
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600656 def mark_active(tid, depth):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500657 """
658 Mark an item as active along with its depends
659 (calls itself recursively)
660 """
661
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600662 if tid in runq_build:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500663 return
664
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600665 runq_build[tid] = 1
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500666
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600667 depends = self.runtaskentries[tid].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500668 for depend in depends:
669 mark_active(depend, depth+1)
670
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600671 self.target_tids = []
672 for (mc, target, task, fn) in self.targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500673
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600674 if target not in taskData[mc].build_targets or not taskData[mc].build_targets[target]:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500675 continue
676
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600677 if target in taskData[mc].failed_deps:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500678 continue
679
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500680 parents = False
681 if task.endswith('-'):
682 parents = True
683 task = task[:-1]
684
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600685 if fn in taskData[mc].failed_fns:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500686 continue
687
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600688 # fn already has mc prefix
689 tid = fn + ":" + task
690 self.target_tids.append(tid)
691 if tid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500692 import difflib
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600693 tasks = []
694 for x in taskData[mc].taskentries:
695 if x.startswith(fn + ":"):
696 tasks.append(taskname_from_tid(x))
697 close_matches = difflib.get_close_matches(task, tasks, cutoff=0.7)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500698 if close_matches:
699 extra = ". Close matches:\n %s" % "\n ".join(close_matches)
700 else:
701 extra = ""
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600702 bb.msg.fatal("RunQueue", "Task %s does not exist for target %s (%s)%s" % (task, target, tid, extra))
703
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500704 # For tasks called "XXXX-", ony run their dependencies
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500705 if parents:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600706 for i in self.runtaskentries[tid].depends:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500707 mark_active(i, 1)
708 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600709 mark_active(tid, 1)
710
711 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500712
713 # Step C - Prune all inactive tasks
714 #
715 # Once all active tasks are marked, prune the ones we don't need.
716
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500717 delcount = 0
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600718 for tid in list(self.runtaskentries.keys()):
719 if tid not in runq_build:
720 del self.runtaskentries[tid]
721 delcount += 1
722
723 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500724
725 #
726 # Step D - Sanity checks and computation
727 #
728
729 # Check to make sure we still have tasks to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600730 if len(self.runtaskentries) == 0:
731 if not taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500732 bb.msg.fatal("RunQueue", "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.")
733 else:
734 bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
735
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600736 logger.verbose("Pruned %s inactive tasks, %s left", delcount, len(self.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500737
738 logger.verbose("Assign Weightings")
739
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600740 self.init_progress_reporter.next_stage()
741
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500742 # Generate a list of reverse dependencies to ease future calculations
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600743 for tid in self.runtaskentries:
744 for dep in self.runtaskentries[tid].depends:
745 self.runtaskentries[dep].revdeps.add(tid)
746
747 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500748
749 # Identify tasks at the end of dependency chains
750 # Error on circular dependency loops (length two)
751 endpoints = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600752 for tid in self.runtaskentries:
753 revdeps = self.runtaskentries[tid].revdeps
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500754 if len(revdeps) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600755 endpoints.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500756 for dep in revdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600757 if dep in self.runtaskentries[tid].depends:
758 bb.msg.fatal("RunQueue", "Task %s has circular dependency on %s" % (tid, dep))
759
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500760
761 logger.verbose("Compute totals (have %s endpoint(s))", len(endpoints))
762
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600763 self.init_progress_reporter.next_stage()
764
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500765 # Calculate task weights
766 # Check of higher length circular dependencies
767 self.runq_weight = self.calculate_task_weights(endpoints)
768
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600769 self.init_progress_reporter.next_stage()
770
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500771 # Sanity Check - Check for multiple tasks building the same provider
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600772 for mc in self.dataCaches:
773 prov_list = {}
774 seen_fn = []
775 for tid in self.runtaskentries:
776 (tidmc, fn, taskname, taskfn) = split_tid_mcfn(tid)
777 if taskfn in seen_fn:
778 continue
779 if mc != tidmc:
780 continue
781 seen_fn.append(taskfn)
782 for prov in self.dataCaches[mc].fn_provides[taskfn]:
783 if prov not in prov_list:
784 prov_list[prov] = [taskfn]
785 elif taskfn not in prov_list[prov]:
786 prov_list[prov].append(taskfn)
787 for prov in prov_list:
788 if len(prov_list[prov]) < 2:
789 continue
790 if prov in self.multi_provider_whitelist:
791 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500792 seen_pn = []
793 # If two versions of the same PN are being built its fatal, we don't support it.
794 for fn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600795 pn = self.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500796 if pn not in seen_pn:
797 seen_pn.append(pn)
798 else:
799 bb.fatal("Multiple versions of %s are due to be built (%s). Only one version of a given PN should be built in any given build. You likely need to set PREFERRED_VERSION_%s to select the correct version or don't depend on multiple versions." % (pn, " ".join(prov_list[prov]), pn))
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500800 msg = "Multiple .bb files are due to be built which each provide %s:\n %s" % (prov, "\n ".join(prov_list[prov]))
801 #
802 # Construct a list of things which uniquely depend on each provider
803 # since this may help the user figure out which dependency is triggering this warning
804 #
805 msg += "\nA list of tasks depending on these providers is shown and may help explain where the dependency comes from."
806 deplist = {}
807 commondeps = None
808 for provfn in prov_list[prov]:
809 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600810 for tid in self.runtaskentries:
811 fn = fn_from_tid(tid)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500812 if fn != provfn:
813 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600814 for dep in self.runtaskentries[tid].revdeps:
815 fn = fn_from_tid(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500816 if fn == provfn:
817 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600818 deps.add(dep)
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500819 if not commondeps:
820 commondeps = set(deps)
821 else:
822 commondeps &= deps
823 deplist[provfn] = deps
824 for provfn in deplist:
825 msg += "\n%s has unique dependees:\n %s" % (provfn, "\n ".join(deplist[provfn] - commondeps))
826 #
827 # Construct a list of provides and runtime providers for each recipe
828 # (rprovides has to cover RPROVIDES, PACKAGES, PACKAGES_DYNAMIC)
829 #
830 msg += "\nIt could be that one recipe provides something the other doesn't and should. The following provider and runtime provider differences may be helpful."
831 provide_results = {}
832 rprovide_results = {}
833 commonprovs = None
834 commonrprovs = None
835 for provfn in prov_list[prov]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600836 provides = set(self.dataCaches[mc].fn_provides[provfn])
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500837 rprovides = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600838 for rprovide in self.dataCaches[mc].rproviders:
839 if provfn in self.dataCaches[mc].rproviders[rprovide]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500840 rprovides.add(rprovide)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600841 for package in self.dataCaches[mc].packages:
842 if provfn in self.dataCaches[mc].packages[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500843 rprovides.add(package)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600844 for package in self.dataCaches[mc].packages_dynamic:
845 if provfn in self.dataCaches[mc].packages_dynamic[package]:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500846 rprovides.add(package)
847 if not commonprovs:
848 commonprovs = set(provides)
849 else:
850 commonprovs &= provides
851 provide_results[provfn] = provides
852 if not commonrprovs:
853 commonrprovs = set(rprovides)
854 else:
855 commonrprovs &= rprovides
856 rprovide_results[provfn] = rprovides
857 #msg += "\nCommon provides:\n %s" % ("\n ".join(commonprovs))
858 #msg += "\nCommon rprovides:\n %s" % ("\n ".join(commonrprovs))
859 for provfn in prov_list[prov]:
860 msg += "\n%s has unique provides:\n %s" % (provfn, "\n ".join(provide_results[provfn] - commonprovs))
861 msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
862
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500863 if self.warn_multi_bb:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600864 logger.warning(msg)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500865 else:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500866 logger.error(msg)
867
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600868 self.init_progress_reporter.next_stage()
869
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500870 # Create a whitelist usable by the stamp checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600871 self.stampfnwhitelist = {}
872 for mc in self.taskData:
873 self.stampfnwhitelist[mc] = []
874 for entry in self.stampwhitelist.split():
875 if entry not in self.taskData[mc].build_targets:
876 continue
877 fn = self.taskData.build_targets[entry][0]
878 self.stampfnwhitelist[mc].append(fn)
879
880 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500881
882 # Iterate over the task list looking for tasks with a 'setscene' function
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600883 self.runq_setscene_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500884 if not self.cooker.configuration.nosetscene:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600885 for tid in self.runtaskentries:
886 (mc, fn, taskname, _) = split_tid_mcfn(tid)
887 setscenetid = fn + ":" + taskname + "_setscene"
888 if setscenetid not in taskData[mc].taskentries:
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500889 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600890 self.runq_setscene_tids.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500891
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600892 def invalidate_task(tid, error_nostamp):
893 (mc, fn, taskname, _) = split_tid_mcfn(tid)
894 taskdep = self.dataCaches[mc].task_deps[fn]
895 if fn + ":" + taskname not in taskData[mc].taskentries:
896 logger.warning("Task %s does not exist, invalidating this task will have no effect" % taskname)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500897 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
898 if error_nostamp:
899 bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
900 else:
901 bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
902 else:
903 logger.verbose("Invalidate task %s, %s", taskname, fn)
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600904 bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], fn)
905
906 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500907
908 # Invalidate task if force mode active
909 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600910 for tid in self.target_tids:
911 invalidate_task(tid, False)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500912
913 # Invalidate task if invalidate mode active
914 if self.cooker.configuration.invalidate_stamp:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600915 for tid in self.target_tids:
916 fn = fn_from_tid(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500917 for st in self.cooker.configuration.invalidate_stamp.split(','):
918 if not st.startswith("do_"):
919 st = "do_%s" % st
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600920 invalidate_task(fn + ":" + st, True)
921
922 self.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500923
Patrick Williamsf1e5d692016-03-30 15:21:19 -0500924 # Create and print to the logs a virtual/xxxx -> PN (fn) table
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600925 for mc in taskData:
926 virtmap = taskData[mc].get_providermap(prefix="virtual/")
927 virtpnmap = {}
928 for v in virtmap:
929 virtpnmap[v] = self.dataCaches[mc].pkg_fn[virtmap[v]]
930 bb.debug(2, "%s resolved to: %s (%s)" % (v, virtpnmap[v], virtmap[v]))
931 if hasattr(bb.parse.siggen, "tasks_resolved"):
932 bb.parse.siggen.tasks_resolved(virtmap, virtpnmap, self.dataCaches[mc])
933
934 self.init_progress_reporter.next_stage()
Patrick Williamsf1e5d692016-03-30 15:21:19 -0500935
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500936 # Iterate over the task list and call into the siggen code
937 dealtwith = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600938 todeal = set(self.runtaskentries)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500939 while len(todeal) > 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600940 for tid in todeal.copy():
941 if len(self.runtaskentries[tid].depends - dealtwith) == 0:
942 dealtwith.add(tid)
943 todeal.remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500944 procdep = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600945 for dep in self.runtaskentries[tid].depends:
946 procdep.append(fn_from_tid(dep) + "." + taskname_from_tid(dep))
947 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
948 self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(taskfn, taskname, procdep, self.dataCaches[mc])
949 task = self.runtaskentries[tid].task
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500950
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500951 bb.parse.siggen.writeout_file_checksum_cache()
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500952
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600953 #self.dump_data()
954 return len(self.runtaskentries)
955
956 def dump_data(self):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500957 """
958 Dump some debug information on the internal data structures
959 """
960 logger.debug(3, "run_tasks:")
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600961 for tid in self.runtaskentries:
962 logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
963 self.runtaskentries[tid].weight,
964 self.runtaskentries[tid].depends,
965 self.runtaskentries[tid].revdeps)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500966
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600967class RunQueueWorker():
968 def __init__(self, process, pipe):
969 self.process = process
970 self.pipe = pipe
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500971
972class RunQueue:
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600973 def __init__(self, cooker, cfgData, dataCaches, taskData, targets):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500974
975 self.cooker = cooker
976 self.cfgData = cfgData
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600977 self.rqdata = RunQueueData(self, cooker, cfgData, dataCaches, taskData, targets)
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500978
979 self.stamppolicy = cfgData.getVar("BB_STAMP_POLICY", True) or "perfile"
980 self.hashvalidate = cfgData.getVar("BB_HASHCHECK_FUNCTION", True) or None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600981 self.setsceneverify = cfgData.getVar("BB_SETSCENE_VERIFY_FUNCTION2", True) or None
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500982 self.depvalidate = cfgData.getVar("BB_SETSCENE_DEPVALID", True) or None
983
984 self.state = runQueuePrepare
985
986 # For disk space monitor
987 self.dm = monitordisk.diskMonitor(cfgData)
988
989 self.rqexe = None
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600990 self.worker = {}
991 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500992
Patrick Williamsc0f7c042017-02-23 20:41:17 -0600993 def _start_worker(self, mc, fakeroot = False, rqexec = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500994 logger.debug(1, "Starting bitbake-worker")
995 magic = "decafbad"
996 if self.cooker.configuration.profile:
997 magic = "decafbadbad"
998 if fakeroot:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -0500999 magic = magic + "beef"
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001000 fakerootcmd = self.cfgData.getVar("FAKEROOTCMD", True)
1001 fakerootenv = (self.cfgData.getVar("FAKEROOTBASEENV", True) or "").split()
1002 env = os.environ.copy()
1003 for key, value in (var.split('=') for var in fakerootenv):
1004 env[key] = value
1005 worker = subprocess.Popen([fakerootcmd, "bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
1006 else:
1007 worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
1008 bb.utils.nonblockingfd(worker.stdout)
1009 workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
1010
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001011 runqhash = {}
1012 for tid in self.rqdata.runtaskentries:
1013 runqhash[tid] = self.rqdata.runtaskentries[tid].hash
1014
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001015 workerdata = {
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001016 "taskdeps" : self.rqdata.dataCaches[mc].task_deps,
1017 "fakerootenv" : self.rqdata.dataCaches[mc].fakerootenv,
1018 "fakerootdirs" : self.rqdata.dataCaches[mc].fakerootdirs,
1019 "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001020 "sigdata" : bb.parse.siggen.get_taskdata(),
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001021 "runq_hash" : runqhash,
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001022 "logdefaultdebug" : bb.msg.loggerDefaultDebugLevel,
1023 "logdefaultverbose" : bb.msg.loggerDefaultVerbose,
1024 "logdefaultverboselogs" : bb.msg.loggerVerboseLogs,
1025 "logdefaultdomain" : bb.msg.loggerDefaultDomains,
1026 "prhost" : self.cooker.prhost,
1027 "buildname" : self.cfgData.getVar("BUILDNAME", True),
1028 "date" : self.cfgData.getVar("DATE", True),
1029 "time" : self.cfgData.getVar("TIME", True),
1030 }
1031
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001032 worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
1033 worker.stdin.write(b"<workerdata>" + pickle.dumps(workerdata) + b"</workerdata>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001034 worker.stdin.flush()
1035
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001036 return RunQueueWorker(worker, workerpipe)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001037
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001038 def _teardown_worker(self, worker):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001039 if not worker:
1040 return
1041 logger.debug(1, "Teardown for bitbake-worker")
1042 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001043 worker.process.stdin.write(b"<quit></quit>")
1044 worker.process.stdin.flush()
1045 worker.process.stdin.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001046 except IOError:
1047 pass
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001048 while worker.process.returncode is None:
1049 worker.pipe.read()
1050 worker.process.poll()
1051 while worker.pipe.read():
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001052 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001053 worker.pipe.close()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001054
1055 def start_worker(self):
1056 if self.worker:
1057 self.teardown_workers()
1058 self.teardown = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001059 for mc in self.rqdata.dataCaches:
1060 self.worker[mc] = self._start_worker(mc)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001061
1062 def start_fakeworker(self, rqexec):
1063 if not self.fakeworker:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001064 for mc in self.rqdata.dataCaches:
1065 self.fakeworker[mc] = self._start_worker(mc, True, rqexec)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001066
1067 def teardown_workers(self):
1068 self.teardown = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001069 for mc in self.worker:
1070 self._teardown_worker(self.worker[mc])
1071 self.worker = {}
1072 for mc in self.fakeworker:
1073 self._teardown_worker(self.fakeworker[mc])
1074 self.fakeworker = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001075
1076 def read_workers(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001077 for mc in self.worker:
1078 self.worker[mc].pipe.read()
1079 for mc in self.fakeworker:
1080 self.fakeworker[mc].pipe.read()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001081
1082 def active_fds(self):
1083 fds = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001084 for mc in self.worker:
1085 fds.append(self.worker[mc].pipe.input)
1086 for mc in self.fakeworker:
1087 fds.append(self.fakeworker[mc].pipe.input)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001088 return fds
1089
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001090 def check_stamp_task(self, tid, taskname = None, recurse = False, cache = None):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001091 def get_timestamp(f):
1092 try:
1093 if not os.access(f, os.F_OK):
1094 return None
1095 return os.stat(f)[stat.ST_MTIME]
1096 except:
1097 return None
1098
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001099 (mc, fn, tn, taskfn) = split_tid_mcfn(tid)
1100 if taskname is None:
1101 taskname = tn
1102
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001103 if self.stamppolicy == "perfile":
1104 fulldeptree = False
1105 else:
1106 fulldeptree = True
1107 stampwhitelist = []
1108 if self.stamppolicy == "whitelist":
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001109 stampwhitelist = self.rqdata.stampfnwhitelist[mc]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001110
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001111 stampfile = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001112
1113 # If the stamp is missing, it's not current
1114 if not os.access(stampfile, os.F_OK):
1115 logger.debug(2, "Stampfile %s not available", stampfile)
1116 return False
1117 # If it's a 'nostamp' task, it's not current
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001118 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001119 if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
1120 logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
1121 return False
1122
1123 if taskname != "do_setscene" and taskname.endswith("_setscene"):
1124 return True
1125
1126 if cache is None:
1127 cache = {}
1128
1129 iscurrent = True
1130 t1 = get_timestamp(stampfile)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001131 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001132 if iscurrent:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001133 (mc2, fn2, taskname2, taskfn2) = split_tid_mcfn(dep)
1134 stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCaches[mc2], taskfn2)
1135 stampfile3 = bb.build.stampfile(taskname2 + "_setscene", self.rqdata.dataCaches[mc2], taskfn2)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001136 t2 = get_timestamp(stampfile2)
1137 t3 = get_timestamp(stampfile3)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001138 if t3 and not t2:
1139 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001140 if t3 and t3 > t2:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001141 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001142 if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
1143 if not t2:
1144 logger.debug(2, 'Stampfile %s does not exist', stampfile2)
1145 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001146 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001147 if t1 < t2:
1148 logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
1149 iscurrent = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001150 break
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001151 if recurse and iscurrent:
1152 if dep in cache:
1153 iscurrent = cache[dep]
1154 if not iscurrent:
1155 logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
1156 else:
1157 iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
1158 cache[dep] = iscurrent
1159 if recurse:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001160 cache[tid] = iscurrent
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001161 return iscurrent
1162
1163 def _execute_runqueue(self):
1164 """
1165 Run the tasks in a queue prepared by rqdata.prepare()
1166 Upon failure, optionally try to recover the build using any alternate providers
1167 (if the abort on failure configuration option isn't set)
1168 """
1169
1170 retval = True
1171
1172 if self.state is runQueuePrepare:
1173 self.rqexe = RunQueueExecuteDummy(self)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001174 # NOTE: if you add, remove or significantly refactor the stages of this
1175 # process then you should recalculate the weightings here. This is quite
1176 # easy to do - just change the next line temporarily to pass debug=True as
1177 # the last parameter and you'll get a printout of the weightings as well
1178 # as a map to the lines where next_stage() was called. Of course this isn't
1179 # critical, but it helps to keep the progress reporting accurate.
1180 self.rqdata.init_progress_reporter = bb.progress.MultiStageProcessProgressReporter(self.cooker.data,
1181 "Initialising tasks",
1182 [43, 967, 4, 3, 1, 5, 3, 7, 13, 1, 2, 1, 1, 246, 35, 1, 38, 1, 35, 2, 338, 204, 142, 3, 3, 37, 244])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001183 if self.rqdata.prepare() == 0:
1184 self.state = runQueueComplete
1185 else:
1186 self.state = runQueueSceneInit
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001187 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001188
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001189 # we are ready to run, emit dependency info to any UI or class which
1190 # needs it
1191 depgraph = self.cooker.buildDependTree(self, self.rqdata.taskData)
1192 self.rqdata.init_progress_reporter.next_stage()
1193 bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001194
1195 if self.state is runQueueSceneInit:
1196 dump = self.cooker.configuration.dump_signatures
1197 if dump:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001198 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001199 if 'printdiff' in dump:
1200 invalidtasks = self.print_diffscenetasks()
1201 self.dump_signatures(dump)
1202 if 'printdiff' in dump:
1203 self.write_diffscenetasks(invalidtasks)
1204 self.state = runQueueComplete
1205 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001206 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001207 self.start_worker()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001208 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001209 self.rqexe = RunQueueExecuteScenequeue(self)
1210
1211 if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp]:
1212 self.dm.check(self)
1213
1214 if self.state is runQueueSceneRun:
1215 retval = self.rqexe.execute()
1216
1217 if self.state is runQueueRunInit:
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001218 if self.cooker.configuration.setsceneonly:
1219 self.state = runQueueComplete
1220 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001221 # Just in case we didn't setscene
1222 self.rqdata.init_progress_reporter.finish()
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001223 logger.info("Executing RunQueue Tasks")
1224 self.rqexe = RunQueueExecuteTasks(self)
1225 self.state = runQueueRunning
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001226
1227 if self.state is runQueueRunning:
1228 retval = self.rqexe.execute()
1229
1230 if self.state is runQueueCleanUp:
1231 retval = self.rqexe.finish()
1232
1233 if (self.state is runQueueComplete or self.state is runQueueFailed) and self.rqexe:
1234 self.teardown_workers()
1235 if self.rqexe.stats.failed:
1236 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed.", self.rqexe.stats.completed + self.rqexe.stats.failed, self.rqexe.stats.skipped, self.rqexe.stats.failed)
1237 else:
1238 # Let's avoid the word "failed" if nothing actually did
1239 logger.info("Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and all succeeded.", self.rqexe.stats.completed, self.rqexe.stats.skipped)
1240
1241 if self.state is runQueueFailed:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001242 if not self.rqdata.taskData[''].tryaltconfigs:
1243 raise bb.runqueue.TaskFailure(self.rqexe.failed_tids)
1244 for tid in self.rqexe.failed_tids:
1245 (mc, fn, tn, _) = split_tid_mcfn(tid)
1246 self.rqdata.taskData[mc].fail_fn(fn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001247 self.rqdata.reset()
1248
1249 if self.state is runQueueComplete:
1250 # All done
1251 return False
1252
1253 # Loop
1254 return retval
1255
1256 def execute_runqueue(self):
1257 # Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
1258 try:
1259 return self._execute_runqueue()
1260 except bb.runqueue.TaskFailure:
1261 raise
1262 except SystemExit:
1263 raise
1264 except bb.BBHandledException:
1265 try:
1266 self.teardown_workers()
1267 except:
1268 pass
1269 self.state = runQueueComplete
1270 raise
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001271 except Exception as err:
1272 logger.exception("An uncaught exception occurred in runqueue")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001273 try:
1274 self.teardown_workers()
1275 except:
1276 pass
1277 self.state = runQueueComplete
1278 raise
1279
1280 def finish_runqueue(self, now = False):
1281 if not self.rqexe:
1282 self.state = runQueueComplete
1283 return
1284
1285 if now:
1286 self.rqexe.finish_now()
1287 else:
1288 self.rqexe.finish()
1289
1290 def dump_signatures(self, options):
1291 done = set()
1292 bb.note("Reparsing files to collect dependency data")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001293 bb_cache = bb.cache.NoCache(self.cooker.databuilder)
1294 for tid in self.rqdata.runtaskentries:
1295 fn = fn_from_tid(tid)
1296 if fn not in done:
1297 the_data = bb_cache.loadDataFull(fn, self.cooker.collection.get_file_appends(fn))
1298 done.add(fn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001299
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001300 bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001301
1302 return
1303
1304 def print_diffscenetasks(self):
1305
1306 valid = []
1307 sq_hash = []
1308 sq_hashfn = []
1309 sq_fn = []
1310 sq_taskname = []
1311 sq_task = []
1312 noexec = []
1313 stamppresent = []
1314 valid_new = set()
1315
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001316 for tid in self.rqdata.runtaskentries:
1317 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1318 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001319
1320 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001321 noexec.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001322 continue
1323
1324 sq_fn.append(fn)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001325 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[fn])
1326 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001327 sq_taskname.append(taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001328 sq_task.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001329 locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.expanded_data }
1330 try:
1331 call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=True)"
1332 valid = bb.utils.better_eval(call, locs)
1333 # Handle version with no siginfo parameter
1334 except TypeError:
1335 call = self.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
1336 valid = bb.utils.better_eval(call, locs)
1337 for v in valid:
1338 valid_new.add(sq_task[v])
1339
1340 # Tasks which are both setscene and noexec never care about dependencies
1341 # We therefore find tasks which are setscene and noexec and mark their
1342 # unique dependencies as valid.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001343 for tid in noexec:
1344 if tid not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001345 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001346 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001347 hasnoexecparents = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001348 for dep2 in self.rqdata.runtaskentries[dep].revdeps:
1349 if dep2 in self.rqdata.runq_setscene_tids and dep2 in noexec:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001350 continue
1351 hasnoexecparents = False
1352 break
1353 if hasnoexecparents:
1354 valid_new.add(dep)
1355
1356 invalidtasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001357 for tid in self.rqdata.runtaskentries:
1358 if tid not in valid_new and tid not in noexec:
1359 invalidtasks.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001360
1361 found = set()
1362 processed = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001363 for tid in invalidtasks:
1364 toprocess = set([tid])
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001365 while toprocess:
1366 next = set()
1367 for t in toprocess:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001368 for dep in self.rqdata.runtaskentries[t].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001369 if dep in invalidtasks:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001370 found.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001371 if dep not in processed:
1372 processed.add(dep)
1373 next.add(dep)
1374 toprocess = next
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001375 if tid in found:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001376 toprocess = set()
1377
1378 tasklist = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001379 for tid in invalidtasks.difference(found):
1380 tasklist.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001381
1382 if tasklist:
1383 bb.plain("The differences between the current build and any cached tasks start at the following tasks:\n" + "\n".join(tasklist))
1384
1385 return invalidtasks.difference(found)
1386
1387 def write_diffscenetasks(self, invalidtasks):
1388
1389 # Define recursion callback
1390 def recursecb(key, hash1, hash2):
1391 hashes = [hash1, hash2]
1392 hashfiles = bb.siggen.find_siginfo(key, None, hashes, self.cfgData)
1393
1394 recout = []
1395 if len(hashfiles) == 2:
1396 out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
1397 recout.extend(list(' ' + l for l in out2))
1398 else:
1399 recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
1400
1401 return recout
1402
1403
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001404 for tid in invalidtasks:
1405 (mc, fn, taskname, _) = split_tid_mcfn(tid)
1406 pn = self.rqdata.dataCaches[mc].pkg_fn[fn]
1407 h = self.rqdata.runtaskentries[tid].hash
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001408 matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
1409 match = None
1410 for m in matches:
1411 if h in m:
1412 match = m
1413 if match is None:
1414 bb.fatal("Can't find a task we're supposed to have written out? (hash: %s)?" % h)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001415 matches = {k : v for k, v in iter(matches.items()) if h not in k}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001416 if matches:
1417 latestmatch = sorted(matches.keys(), key=lambda f: matches[f])[-1]
1418 prevh = __find_md5__.search(latestmatch).group(0)
1419 output = bb.siggen.compare_sigfiles(latestmatch, match, recursecb)
1420 bb.plain("\nTask %s:%s couldn't be used from the cache because:\n We need hash %s, closest matching task was %s\n " % (pn, taskname, h, prevh) + '\n '.join(output))
1421
1422class RunQueueExecute:
1423
1424 def __init__(self, rq):
1425 self.rq = rq
1426 self.cooker = rq.cooker
1427 self.cfgData = rq.cfgData
1428 self.rqdata = rq.rqdata
1429
1430 self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS", True) or 1)
1431 self.scheduler = self.cfgData.getVar("BB_SCHEDULER", True) or "speed"
1432
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001433 self.runq_buildable = set()
1434 self.runq_running = set()
1435 self.runq_complete = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001436
1437 self.build_stamps = {}
1438 self.build_stamps2 = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001439 self.failed_tids = []
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001440
1441 self.stampcache = {}
1442
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001443 for mc in rq.worker:
1444 rq.worker[mc].pipe.setrunqueueexec(self)
1445 for mc in rq.fakeworker:
1446 rq.fakeworker[mc].pipe.setrunqueueexec(self)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001447
1448 if self.number_tasks <= 0:
1449 bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
1450
1451 def runqueue_process_waitpid(self, task, status):
1452
1453 # self.build_stamps[pid] may not exist when use shared work directory.
1454 if task in self.build_stamps:
1455 self.build_stamps2.remove(self.build_stamps[task])
1456 del self.build_stamps[task]
1457
1458 if status != 0:
1459 self.task_fail(task, status)
1460 else:
1461 self.task_complete(task)
1462 return True
1463
1464 def finish_now(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001465 for mc in self.rq.worker:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001466 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001467 self.rq.worker[mc].process.stdin.write(b"<finishnow></finishnow>")
1468 self.rq.worker[mc].process.stdin.flush()
1469 except IOError:
1470 # worker must have died?
1471 pass
1472 for mc in self.rq.fakeworker:
1473 try:
1474 self.rq.fakeworker[mc].process.stdin.write(b"<finishnow></finishnow>")
1475 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001476 except IOError:
1477 # worker must have died?
1478 pass
1479
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001480 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001481 self.rq.state = runQueueFailed
1482 return
1483
1484 self.rq.state = runQueueComplete
1485 return
1486
1487 def finish(self):
1488 self.rq.state = runQueueCleanUp
1489
1490 if self.stats.active > 0:
1491 bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData)
1492 self.rq.read_workers()
1493 return self.rq.active_fds()
1494
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001495 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001496 self.rq.state = runQueueFailed
1497 return True
1498
1499 self.rq.state = runQueueComplete
1500 return True
1501
1502 def check_dependencies(self, task, taskdeps, setscene = False):
1503 if not self.rq.depvalidate:
1504 return False
1505
1506 taskdata = {}
1507 taskdeps.add(task)
1508 for dep in taskdeps:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001509 (mc, fn, taskname, _) = split_tid_mcfn(dep)
1510 pn = self.rqdata.dataCaches[mc].pkg_fn[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001511 taskdata[dep] = [pn, taskname, fn]
1512 call = self.rq.depvalidate + "(task, taskdata, notneeded, d)"
1513 locs = { "task" : task, "taskdata" : taskdata, "notneeded" : self.scenequeue_notneeded, "d" : self.cooker.expanded_data }
1514 valid = bb.utils.better_eval(call, locs)
1515 return valid
1516
1517class RunQueueExecuteDummy(RunQueueExecute):
1518 def __init__(self, rq):
1519 self.rq = rq
1520 self.stats = RunQueueStats(0)
1521
1522 def finish(self):
1523 self.rq.state = runQueueComplete
1524 return
1525
1526class RunQueueExecuteTasks(RunQueueExecute):
1527 def __init__(self, rq):
1528 RunQueueExecute.__init__(self, rq)
1529
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001530 self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001531
1532 self.stampcache = {}
1533
1534 initial_covered = self.rq.scenequeue_covered.copy()
1535
1536 # Mark initial buildable tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001537 for tid in self.rqdata.runtaskentries:
1538 if len(self.rqdata.runtaskentries[tid].depends) == 0:
1539 self.runq_buildable.add(tid)
1540 if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
1541 self.rq.scenequeue_covered.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001542
1543 found = True
1544 while found:
1545 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001546 for tid in self.rqdata.runtaskentries:
1547 if tid in self.rq.scenequeue_covered:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001548 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001549 logger.debug(1, 'Considering %s: %s' % (tid, str(self.rqdata.runtaskentries[tid].revdeps)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001550
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001551 if len(self.rqdata.runtaskentries[tid].revdeps) > 0 and self.rqdata.runtaskentries[tid].revdeps.issubset(self.rq.scenequeue_covered):
1552 if tid in self.rq.scenequeue_notcovered:
1553 continue
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001554 found = True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001555 self.rq.scenequeue_covered.add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001556
1557 logger.debug(1, 'Skip list (pre setsceneverify) %s', sorted(self.rq.scenequeue_covered))
1558
1559 # Allow the metadata to elect for setscene tasks to run anyway
1560 covered_remove = set()
1561 if self.rq.setsceneverify:
1562 invalidtasks = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001563 tasknames = {}
1564 fns = {}
1565 for tid in self.rqdata.runtaskentries:
1566 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1567 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
1568 fns[tid] = taskfn
1569 tasknames[tid] = taskname
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001570 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1571 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001572 if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
1573 logger.debug(2, 'Setscene stamp current for task %s', tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001574 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001575 if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
1576 logger.debug(2, 'Normal stamp current for task %s', tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001577 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001578 invalidtasks.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001579
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001580 call = self.rq.setsceneverify + "(covered, tasknames, fns, d, invalidtasks=invalidtasks)"
1581 locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : tasknames, "fns" : fns, "d" : self.cooker.expanded_data, "invalidtasks" : invalidtasks }
1582 covered_remove = bb.utils.better_eval(call, locs)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001583
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001584 def removecoveredtask(tid):
1585 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
1586 taskname = taskname + '_setscene'
1587 bb.build.del_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
1588 self.rq.scenequeue_covered.remove(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001589
1590 toremove = covered_remove
1591 for task in toremove:
1592 logger.debug(1, 'Not skipping task %s due to setsceneverify', task)
1593 while toremove:
1594 covered_remove = []
1595 for task in toremove:
1596 removecoveredtask(task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001597 for deptask in self.rqdata.runtaskentries[task].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001598 if deptask not in self.rq.scenequeue_covered:
1599 continue
1600 if deptask in toremove or deptask in covered_remove or deptask in initial_covered:
1601 continue
1602 logger.debug(1, 'Task %s depends on task %s so not skipping' % (task, deptask))
1603 covered_remove.append(deptask)
1604 toremove = covered_remove
1605
1606 logger.debug(1, 'Full skip list %s', self.rq.scenequeue_covered)
1607
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001608
1609 for mc in self.rqdata.dataCaches:
1610 target_pairs = []
1611 for tid in self.rqdata.target_tids:
1612 (tidmc, fn, taskname, _) = split_tid_mcfn(tid)
1613 if tidmc == mc:
1614 target_pairs.append((fn, taskname))
1615
1616 event.fire(bb.event.StampUpdate(target_pairs, self.rqdata.dataCaches[mc].stamp), self.cfgData)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001617
1618 schedulers = self.get_schedulers()
1619 for scheduler in schedulers:
1620 if self.scheduler == scheduler.name:
1621 self.sched = scheduler(self, self.rqdata)
1622 logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
1623 break
1624 else:
1625 bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
1626 (self.scheduler, ", ".join(obj.name for obj in schedulers)))
1627
1628 def get_schedulers(self):
1629 schedulers = set(obj for obj in globals().values()
1630 if type(obj) is type and
1631 issubclass(obj, RunQueueScheduler))
1632
1633 user_schedulers = self.cfgData.getVar("BB_SCHEDULERS", True)
1634 if user_schedulers:
1635 for sched in user_schedulers.split():
1636 if not "." in sched:
1637 bb.note("Ignoring scheduler '%s' from BB_SCHEDULERS: not an import" % sched)
1638 continue
1639
1640 modname, name = sched.rsplit(".", 1)
1641 try:
1642 module = __import__(modname, fromlist=(name,))
1643 except ImportError as exc:
1644 logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
1645 raise SystemExit(1)
1646 else:
1647 schedulers.add(getattr(module, name))
1648 return schedulers
1649
1650 def setbuildable(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001651 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001652 self.sched.newbuilable(task)
1653
1654 def task_completeoutright(self, task):
1655 """
1656 Mark a task as completed
1657 Look at the reverse dependencies and mark any task with
1658 completed dependencies as buildable
1659 """
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001660 self.runq_complete.add(task)
1661 for revdep in self.rqdata.runtaskentries[task].revdeps:
1662 if revdep in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001663 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001664 if revdep in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001665 continue
1666 alldeps = 1
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001667 for dep in self.rqdata.runtaskentries[revdep].depends:
1668 if dep not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001669 alldeps = 0
1670 if alldeps == 1:
1671 self.setbuildable(revdep)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001672 fn = fn_from_tid(revdep)
1673 taskname = taskname_from_tid(revdep)
1674 logger.debug(1, "Marking task %s as buildable", revdep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001675
1676 def task_complete(self, task):
1677 self.stats.taskCompleted()
1678 bb.event.fire(runQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
1679 self.task_completeoutright(task)
1680
1681 def task_fail(self, task, exitcode):
1682 """
1683 Called when a task has failed
1684 Updates the state engine with the failure
1685 """
1686 self.stats.taskFailed()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001687 self.failed_tids.append(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001688 bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001689 if self.rqdata.taskData[''].abort:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001690 self.rq.state = runQueueCleanUp
1691
1692 def task_skip(self, task, reason):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001693 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001694 self.setbuildable(task)
1695 bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
1696 self.task_completeoutright(task)
1697 self.stats.taskCompleted()
1698 self.stats.taskSkipped()
1699
1700 def execute(self):
1701 """
1702 Run the tasks in a queue prepared by rqdata.prepare()
1703 """
1704
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001705 if self.rqdata.setscenewhitelist and not self.rqdata.setscenewhitelist_checked:
1706 self.rqdata.setscenewhitelist_checked = True
1707
1708 # Check tasks that are going to run against the whitelist
1709 def check_norun_task(tid, showerror=False):
1710 (mc, fn, taskname, _) = split_tid_mcfn(tid)
1711 # Ignore covered tasks
1712 if tid in self.rq.scenequeue_covered:
1713 return False
1714 # Ignore stamped tasks
1715 if self.rq.check_stamp_task(tid, taskname, cache=self.stampcache):
1716 return False
1717 # Ignore noexec tasks
1718 taskdep = self.rqdata.dataCaches[mc].task_deps[fn]
1719 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1720 return False
1721
1722 pn = self.rqdata.dataCaches[mc].pkg_fn[fn]
1723 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
1724 if showerror:
1725 if tid in self.rqdata.runq_setscene_tids:
1726 logger.error('Task %s.%s attempted to execute unexpectedly and should have been setscened' % (pn, taskname))
1727 else:
1728 logger.error('Task %s.%s attempted to execute unexpectedly' % (pn, taskname))
1729 return True
1730 return False
1731 # Look to see if any tasks that we think shouldn't run are going to
1732 unexpected = False
1733 for tid in self.rqdata.runtaskentries:
1734 if check_norun_task(tid):
1735 unexpected = True
1736 break
1737 if unexpected:
1738 # Run through the tasks in the rough order they'd have executed and print errors
1739 # (since the order can be useful - usually missing sstate for the last few tasks
1740 # is the cause of the problem)
1741 task = self.sched.next()
1742 while task is not None:
1743 check_norun_task(task, showerror=True)
1744 self.task_skip(task, 'Setscene enforcement check')
1745 task = self.sched.next()
1746
1747 self.rq.state = runQueueCleanUp
1748 return True
1749
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001750 self.rq.read_workers()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001751
1752 if self.stats.total == 0:
1753 # nothing to do
1754 self.rq.state = runQueueCleanUp
1755
1756 task = self.sched.next()
1757 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001758 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001759
1760 if task in self.rq.scenequeue_covered:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001761 logger.debug(2, "Setscene covered task %s", task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001762 self.task_skip(task, "covered")
1763 return True
1764
1765 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001766 logger.debug(2, "Stamp current task %s", task)
1767
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001768 self.task_skip(task, "existing")
1769 return True
1770
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001771 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001772 if 'noexec' in taskdep and taskname in taskdep['noexec']:
1773 startevent = runQueueTaskStarted(task, self.stats, self.rq,
1774 noexec=True)
1775 bb.event.fire(startevent, self.cfgData)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001776 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001777 self.stats.taskActive()
1778 if not self.cooker.configuration.dry_run:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001779 bb.build.make_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001780 self.task_complete(task)
1781 return True
1782 else:
1783 startevent = runQueueTaskStarted(task, self.stats, self.rq)
1784 bb.event.fire(startevent, self.cfgData)
1785
1786 taskdepdata = self.build_taskdepdata(task)
1787
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001788 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001789 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
1790 if not self.rq.fakeworker:
1791 try:
1792 self.rq.start_fakeworker(self)
1793 except OSError as exc:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001794 logger.critical("Failed to spawn fakeroot worker to run %s: %s" % (task, str(exc)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001795 self.rq.state = runQueueFailed
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001796 self.stats.taskFailed()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001797 return True
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001798 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(fn), taskdepdata)) + b"</runtask>")
1799 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001800 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001801 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, False, self.cooker.collection.get_file_appends(taskfn), taskdepdata)) + b"</runtask>")
1802 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001803
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001804 self.build_stamps[task] = bb.build.stampfile(taskname, self.rqdata.dataCaches[mc], taskfn, noextra=True)
1805 self.build_stamps2.append(self.build_stamps[task])
1806 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001807 self.stats.taskActive()
1808 if self.stats.active < self.number_tasks:
1809 return True
1810
1811 if self.stats.active > 0:
1812 self.rq.read_workers()
1813 return self.rq.active_fds()
1814
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001815 if len(self.failed_tids) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001816 self.rq.state = runQueueFailed
1817 return True
1818
1819 # Sanity Checks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001820 for task in self.rqdata.runtaskentries:
1821 if task not in self.runq_buildable:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001822 logger.error("Task %s never buildable!", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001823 if task not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001824 logger.error("Task %s never ran!", task)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001825 if task not in self.runq_complete:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001826 logger.error("Task %s never completed!", task)
1827 self.rq.state = runQueueComplete
1828
1829 return True
1830
1831 def build_taskdepdata(self, task):
1832 taskdepdata = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001833 next = self.rqdata.runtaskentries[task].depends
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001834 next.add(task)
1835 while next:
1836 additional = []
1837 for revdep in next:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001838 (mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
1839 pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
1840 deps = self.rqdata.runtaskentries[revdep].depends
1841 provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001842 taskdepdata[revdep] = [pn, taskname, fn, deps, provides]
1843 for revdep2 in deps:
1844 if revdep2 not in taskdepdata:
1845 additional.append(revdep2)
1846 next = additional
1847
1848 #bb.note("Task %s: " % task + str(taskdepdata).replace("], ", "],\n"))
1849 return taskdepdata
1850
1851class RunQueueExecuteScenequeue(RunQueueExecute):
1852 def __init__(self, rq):
1853 RunQueueExecute.__init__(self, rq)
1854
1855 self.scenequeue_covered = set()
1856 self.scenequeue_notcovered = set()
1857 self.scenequeue_notneeded = set()
1858
1859 # If we don't have any setscene functions, skip this step
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001860 if len(self.rqdata.runq_setscene_tids) == 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001861 rq.scenequeue_covered = set()
1862 rq.state = runQueueRunInit
1863 return
1864
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001865 self.stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001866
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001867 sq_revdeps = {}
1868 sq_revdeps_new = {}
1869 sq_revdeps_squash = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001870 self.sq_harddeps = {}
1871
1872 # We need to construct a dependency graph for the setscene functions. Intermediate
1873 # dependencies between the setscene tasks only complicate the code. This code
1874 # therefore aims to collapse the huge runqueue dependency tree into a smaller one
1875 # only containing the setscene functions.
1876
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001877 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001878
1879 # First process the chains up to the first setscene task.
1880 endpoints = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001881 for tid in self.rqdata.runtaskentries:
1882 sq_revdeps[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
1883 sq_revdeps_new[tid] = set()
1884 if (len(sq_revdeps[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
1885 #bb.warn("Added endpoint %s" % (tid))
1886 endpoints[tid] = set()
1887
1888 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001889
1890 # Secondly process the chains between setscene tasks.
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001891 for tid in self.rqdata.runq_setscene_tids:
1892 #bb.warn("Added endpoint 2 %s" % (tid))
1893 for dep in self.rqdata.runtaskentries[tid].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001894 if dep not in endpoints:
1895 endpoints[dep] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001896 #bb.warn(" Added endpoint 3 %s" % (dep))
1897 endpoints[dep].add(tid)
1898
1899 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001900
1901 def process_endpoints(endpoints):
1902 newendpoints = {}
1903 for point, task in endpoints.items():
1904 tasks = set()
1905 if task:
1906 tasks |= task
1907 if sq_revdeps_new[point]:
1908 tasks |= sq_revdeps_new[point]
1909 sq_revdeps_new[point] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001910 if point in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001911 sq_revdeps_new[point] = tasks
Patrick Williamsd8c66bc2016-06-20 12:57:21 -05001912 tasks = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001913 for dep in self.rqdata.runtaskentries[point].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001914 if point in sq_revdeps[dep]:
1915 sq_revdeps[dep].remove(point)
1916 if tasks:
1917 sq_revdeps_new[dep] |= tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001918 if (len(sq_revdeps[dep]) == 0 or len(sq_revdeps_new[dep]) != 0) and dep not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001919 newendpoints[dep] = task
1920 if len(newendpoints) != 0:
1921 process_endpoints(newendpoints)
1922
1923 process_endpoints(endpoints)
1924
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001925 self.rqdata.init_progress_reporter.next_stage()
1926
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001927 # Build a list of setscene tasks which are "unskippable"
1928 # These are direct endpoints referenced by the build
1929 endpoints2 = {}
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001930 sq_revdeps2 = {}
1931 sq_revdeps_new2 = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001932 def process_endpoints2(endpoints):
1933 newendpoints = {}
1934 for point, task in endpoints.items():
1935 tasks = set([point])
1936 if task:
1937 tasks |= task
1938 if sq_revdeps_new2[point]:
1939 tasks |= sq_revdeps_new2[point]
1940 sq_revdeps_new2[point] = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001941 if point in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001942 sq_revdeps_new2[point] = tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001943 for dep in self.rqdata.runtaskentries[point].depends:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001944 if point in sq_revdeps2[dep]:
1945 sq_revdeps2[dep].remove(point)
1946 if tasks:
1947 sq_revdeps_new2[dep] |= tasks
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001948 if (len(sq_revdeps2[dep]) == 0 or len(sq_revdeps_new2[dep]) != 0) and dep not in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001949 newendpoints[dep] = tasks
1950 if len(newendpoints) != 0:
1951 process_endpoints2(newendpoints)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001952 for tid in self.rqdata.runtaskentries:
1953 sq_revdeps2[tid] = copy.copy(self.rqdata.runtaskentries[tid].revdeps)
1954 sq_revdeps_new2[tid] = set()
1955 if (len(sq_revdeps2[tid]) == 0) and tid not in self.rqdata.runq_setscene_tids:
1956 endpoints2[tid] = set()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001957 process_endpoints2(endpoints2)
1958 self.unskippable = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001959 for tid in self.rqdata.runq_setscene_tids:
1960 if sq_revdeps_new2[tid]:
1961 self.unskippable.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001962
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001963 self.rqdata.init_progress_reporter.next_stage(len(self.rqdata.runtaskentries))
1964
1965 for taskcounter, tid in enumerate(self.rqdata.runtaskentries):
1966 if tid in self.rqdata.runq_setscene_tids:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001967 deps = set()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001968 for dep in sq_revdeps_new[tid]:
1969 deps.add(dep)
1970 sq_revdeps_squash[tid] = deps
1971 elif len(sq_revdeps_new[tid]) != 0:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001972 bb.msg.fatal("RunQueue", "Something went badly wrong during scenequeue generation, aborting. Please report this problem.")
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001973 self.rqdata.init_progress_reporter.update(taskcounter)
1974
1975 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001976
1977 # Resolve setscene inter-task dependencies
1978 # e.g. do_sometask_setscene[depends] = "targetname:do_someothertask_setscene"
1979 # Note that anything explicitly depended upon will have its reverse dependencies removed to avoid circular dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001980 for tid in self.rqdata.runq_setscene_tids:
1981 (mc, fn, taskname, _) = split_tid_mcfn(tid)
1982 realtid = fn + ":" + taskname + "_setscene"
1983 idepends = self.rqdata.taskData[mc].taskentries[realtid].idepends
1984 for (depname, idependtask) in idepends:
1985
1986 if depname not in self.rqdata.taskData[mc].build_targets:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001987 continue
1988
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001989 depfn = self.rqdata.taskData[mc].build_targets[depname][0]
1990 if depfn is None:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001991 continue
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001992 deptid = depfn + ":" + idependtask.replace("_setscene", "")
1993 if deptid not in self.rqdata.runtaskentries:
1994 bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (realtid, depfn, idependtask))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001995
Patrick Williamsc0f7c042017-02-23 20:41:17 -06001996 if not deptid in self.sq_harddeps:
1997 self.sq_harddeps[deptid] = set()
1998 self.sq_harddeps[deptid].add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001999
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002000 sq_revdeps_squash[tid].add(deptid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002001 # Have to zero this to avoid circular dependencies
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002002 sq_revdeps_squash[deptid] = set()
2003
2004 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002005
2006 for task in self.sq_harddeps:
2007 for dep in self.sq_harddeps[task]:
2008 sq_revdeps_squash[dep].add(task)
2009
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002010 self.rqdata.init_progress_reporter.next_stage()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002011
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002012 #for tid in sq_revdeps_squash:
2013 # for dep in sq_revdeps_squash[tid]:
2014 # data = data + "\n %s" % dep
2015 # bb.warn("Task %s_setscene: is %s " % (tid, data
2016
2017 self.sq_deps = {}
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002018 self.sq_revdeps = sq_revdeps_squash
2019 self.sq_revdeps2 = copy.deepcopy(self.sq_revdeps)
2020
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002021 for tid in self.sq_revdeps:
2022 self.sq_deps[tid] = set()
2023 for tid in self.sq_revdeps:
2024 for dep in self.sq_revdeps[tid]:
2025 self.sq_deps[dep].add(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002026
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002027 self.rqdata.init_progress_reporter.next_stage()
2028
2029 for tid in self.sq_revdeps:
2030 if len(self.sq_revdeps[tid]) == 0:
2031 self.runq_buildable.add(tid)
2032
2033 self.rqdata.init_progress_reporter.finish()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002034
2035 self.outrightfail = []
2036 if self.rq.hashvalidate:
2037 sq_hash = []
2038 sq_hashfn = []
2039 sq_fn = []
2040 sq_taskname = []
2041 sq_task = []
2042 noexec = []
2043 stamppresent = []
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002044 for tid in self.sq_revdeps:
2045 (mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
2046
2047 taskdep = self.rqdata.dataCaches[mc].task_deps[fn]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002048
2049 if 'noexec' in taskdep and taskname in taskdep['noexec']:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002050 noexec.append(tid)
2051 self.task_skip(tid)
2052 bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCaches[mc], taskfn)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002053 continue
2054
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002055 if self.rq.check_stamp_task(tid, taskname + "_setscene", cache=self.stampcache):
2056 logger.debug(2, 'Setscene stamp current for task %s', tid)
2057 stamppresent.append(tid)
2058 self.task_skip(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002059 continue
2060
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002061 if self.rq.check_stamp_task(tid, taskname, recurse = True, cache=self.stampcache):
2062 logger.debug(2, 'Normal stamp current for task %s', tid)
2063 stamppresent.append(tid)
2064 self.task_skip(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002065 continue
2066
2067 sq_fn.append(fn)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002068 sq_hashfn.append(self.rqdata.dataCaches[mc].hashfn[fn])
2069 sq_hash.append(self.rqdata.runtaskentries[tid].hash)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002070 sq_taskname.append(taskname)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002071 sq_task.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002072 call = self.rq.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
2073 locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.expanded_data }
2074 valid = bb.utils.better_eval(call, locs)
2075
2076 valid_new = stamppresent
2077 for v in valid:
2078 valid_new.append(sq_task[v])
2079
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002080 for tid in self.sq_revdeps:
2081 if tid not in valid_new and tid not in noexec:
2082 logger.debug(2, 'No package found, so skipping setscene task %s', tid)
2083 self.outrightfail.append(tid)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002084
2085 logger.info('Executing SetScene Tasks')
2086
2087 self.rq.state = runQueueSceneRun
2088
2089 def scenequeue_updatecounters(self, task, fail = False):
2090 for dep in self.sq_deps[task]:
2091 if fail and task in self.sq_harddeps and dep in self.sq_harddeps[task]:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002092 logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002093 self.scenequeue_updatecounters(dep, fail)
2094 continue
2095 if task not in self.sq_revdeps2[dep]:
2096 # May already have been removed by the fail case above
2097 continue
2098 self.sq_revdeps2[dep].remove(task)
2099 if len(self.sq_revdeps2[dep]) == 0:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002100 self.runq_buildable.add(dep)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002101
2102 def task_completeoutright(self, task):
2103 """
2104 Mark a task as completed
2105 Look at the reverse dependencies and mark any task with
2106 completed dependencies as buildable
2107 """
2108
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002109 logger.debug(1, 'Found task %s which could be accelerated', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002110 self.scenequeue_covered.add(task)
2111 self.scenequeue_updatecounters(task)
2112
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002113 def check_taskfail(self, task):
2114 if self.rqdata.setscenewhitelist:
2115 realtask = task.split('_setscene')[0]
2116 (mc, fn, taskname, _) = split_tid_mcfn(realtask)
2117 pn = self.rqdata.dataCaches[mc].pkg_fn[fn]
2118 if not check_setscene_enforce_whitelist(pn, taskname, self.rqdata.setscenewhitelist):
2119 logger.error('Task %s.%s failed' % (pn, taskname + "_setscene"))
2120 self.rq.state = runQueueCleanUp
2121
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002122 def task_complete(self, task):
2123 self.stats.taskCompleted()
2124 bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
2125 self.task_completeoutright(task)
2126
2127 def task_fail(self, task, result):
2128 self.stats.taskFailed()
2129 bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
2130 self.scenequeue_notcovered.add(task)
2131 self.scenequeue_updatecounters(task, True)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002132 self.check_taskfail(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002133
2134 def task_failoutright(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002135 self.runq_running.add(task)
2136 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002137 self.stats.taskCompleted()
2138 self.stats.taskSkipped()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002139 self.scenequeue_notcovered.add(task)
2140 self.scenequeue_updatecounters(task, True)
2141
2142 def task_skip(self, task):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002143 self.runq_running.add(task)
2144 self.runq_buildable.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002145 self.task_completeoutright(task)
2146 self.stats.taskCompleted()
2147 self.stats.taskSkipped()
2148
2149 def execute(self):
2150 """
2151 Run the tasks in a queue prepared by prepare_runqueue
2152 """
2153
2154 self.rq.read_workers()
2155
2156 task = None
2157 if self.stats.active < self.number_tasks:
2158 # Find the next setscene to run
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002159 for nexttask in self.rqdata.runq_setscene_tids:
2160 if nexttask in self.runq_buildable and nexttask not in self.runq_running:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002161 if nexttask in self.unskippable:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002162 logger.debug(2, "Setscene task %s is unskippable" % nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002163 if nexttask not in self.unskippable and len(self.sq_revdeps[nexttask]) > 0 and self.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sq_revdeps[nexttask], True):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002164 fn = fn_from_tid(nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002165 foundtarget = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002166
2167 if nexttask in self.rqdata.target_tids:
2168 foundtarget = True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002169 if not foundtarget:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002170 logger.debug(2, "Skipping setscene for task %s" % nexttask)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002171 self.task_skip(nexttask)
2172 self.scenequeue_notneeded.add(nexttask)
2173 return True
2174 if nexttask in self.outrightfail:
2175 self.task_failoutright(nexttask)
2176 return True
2177 task = nexttask
2178 break
2179 if task is not None:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002180 (mc, fn, taskname, taskfn) = split_tid_mcfn(task)
2181 taskname = taskname + "_setscene"
2182 if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
2183 logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002184 self.task_failoutright(task)
2185 return True
2186
2187 if self.cooker.configuration.force:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002188 if task in self.rqdata.target_tids:
2189 self.task_failoutright(task)
2190 return True
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002191
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002192 if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
2193 logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002194 self.task_skip(task)
2195 return True
2196
2197 startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
2198 bb.event.fire(startevent, self.cfgData)
2199
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002200 taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
2201 if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not self.cooker.configuration.dry_run:
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002202 if not self.rq.fakeworker:
2203 self.rq.start_fakeworker(self)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002204 self.rq.fakeworker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), None)) + b"</runtask>")
2205 self.rq.fakeworker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002206 else:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002207 self.rq.worker[mc].process.stdin.write(b"<runtask>" + pickle.dumps((taskfn, task, taskname, True, self.cooker.collection.get_file_appends(taskfn), None)) + b"</runtask>")
2208 self.rq.worker[mc].process.stdin.flush()
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002209
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002210 self.runq_running.add(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002211 self.stats.taskActive()
2212 if self.stats.active < self.number_tasks:
2213 return True
2214
2215 if self.stats.active > 0:
2216 self.rq.read_workers()
2217 return self.rq.active_fds()
2218
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002219 #for tid in self.sq_revdeps:
2220 # if tid not in self.runq_running:
2221 # buildable = tid in self.runq_buildable
2222 # revdeps = self.sq_revdeps[tid]
2223 # bb.warn("Found we didn't run %s %s %s" % (tid, buildable, str(revdeps)))
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002224
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002225 self.rq.scenequeue_covered = self.scenequeue_covered
2226 self.rq.scenequeue_notcovered = self.scenequeue_notcovered
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002227
2228 logger.debug(1, 'We can skip tasks %s', sorted(self.rq.scenequeue_covered))
2229
2230 self.rq.state = runQueueRunInit
2231
2232 completeevent = sceneQueueComplete(self.stats, self.rq)
2233 bb.event.fire(completeevent, self.cfgData)
2234
2235 return True
2236
2237 def runqueue_process_waitpid(self, task, status):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002238 RunQueueExecute.runqueue_process_waitpid(self, task, status)
2239
2240class TaskFailure(Exception):
2241 """
2242 Exception raised when a task in a runqueue fails
2243 """
2244 def __init__(self, x):
2245 self.args = x
2246
2247
2248class runQueueExitWait(bb.event.Event):
2249 """
2250 Event when waiting for task processes to exit
2251 """
2252
2253 def __init__(self, remain):
2254 self.remain = remain
2255 self.message = "Waiting for %s active tasks to finish" % remain
2256 bb.event.Event.__init__(self)
2257
2258class runQueueEvent(bb.event.Event):
2259 """
2260 Base runQueue event class
2261 """
2262 def __init__(self, task, stats, rq):
2263 self.taskid = task
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002264 self.taskstring = task
2265 self.taskname = taskname_from_tid(task)
2266 self.taskfile = fn_from_tid(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002267 self.taskhash = rq.rqdata.get_task_hash(task)
2268 self.stats = stats.copy()
2269 bb.event.Event.__init__(self)
2270
2271class sceneQueueEvent(runQueueEvent):
2272 """
2273 Base sceneQueue event class
2274 """
2275 def __init__(self, task, stats, rq, noexec=False):
2276 runQueueEvent.__init__(self, task, stats, rq)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002277 self.taskstring = task + "_setscene"
2278 self.taskname = taskname_from_tid(task) + "_setscene"
2279 self.taskfile = fn_from_tid(task)
2280 self.taskhash = rq.rqdata.get_task_hash(task)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002281
2282class runQueueTaskStarted(runQueueEvent):
2283 """
2284 Event notifying a task was started
2285 """
2286 def __init__(self, task, stats, rq, noexec=False):
2287 runQueueEvent.__init__(self, task, stats, rq)
2288 self.noexec = noexec
2289
2290class sceneQueueTaskStarted(sceneQueueEvent):
2291 """
2292 Event notifying a setscene task was started
2293 """
2294 def __init__(self, task, stats, rq, noexec=False):
2295 sceneQueueEvent.__init__(self, task, stats, rq)
2296 self.noexec = noexec
2297
2298class runQueueTaskFailed(runQueueEvent):
2299 """
2300 Event notifying a task failed
2301 """
2302 def __init__(self, task, stats, exitcode, rq):
2303 runQueueEvent.__init__(self, task, stats, rq)
2304 self.exitcode = exitcode
2305
2306class sceneQueueTaskFailed(sceneQueueEvent):
2307 """
2308 Event notifying a setscene task failed
2309 """
2310 def __init__(self, task, stats, exitcode, rq):
2311 sceneQueueEvent.__init__(self, task, stats, rq)
2312 self.exitcode = exitcode
2313
2314class sceneQueueComplete(sceneQueueEvent):
2315 """
2316 Event when all the sceneQueue tasks are complete
2317 """
2318 def __init__(self, stats, rq):
2319 self.stats = stats.copy()
2320 bb.event.Event.__init__(self)
2321
2322class runQueueTaskCompleted(runQueueEvent):
2323 """
2324 Event notifying a task completed
2325 """
2326
2327class sceneQueueTaskCompleted(sceneQueueEvent):
2328 """
2329 Event notifying a setscene task completed
2330 """
2331
2332class runQueueTaskSkipped(runQueueEvent):
2333 """
2334 Event notifying a task was skipped
2335 """
2336 def __init__(self, task, stats, rq, reason):
2337 runQueueEvent.__init__(self, task, stats, rq)
2338 self.reason = reason
2339
2340class runQueuePipe():
2341 """
2342 Abstraction for a pipe between a worker thread and the server
2343 """
2344 def __init__(self, pipein, pipeout, d, rq, rqexec):
2345 self.input = pipein
2346 if pipeout:
2347 pipeout.close()
2348 bb.utils.nonblockingfd(self.input)
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002349 self.queue = b""
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002350 self.d = d
2351 self.rq = rq
2352 self.rqexec = rqexec
2353
2354 def setrunqueueexec(self, rqexec):
2355 self.rqexec = rqexec
2356
2357 def read(self):
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002358 for workers, name in [(self.rq.worker, "Worker"), (self.rq.fakeworker, "Fakeroot")]:
2359 for worker in workers.values():
2360 worker.process.poll()
2361 if worker.process.returncode is not None and not self.rq.teardown:
2362 bb.error("%s process (%s) exited unexpectedly (%s), shutting down..." % (name, worker.process.pid, str(worker.process.returncode)))
2363 self.rq.finish_runqueue(True)
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002364
2365 start = len(self.queue)
2366 try:
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002367 self.queue = self.queue + (self.input.read(102400) or b"")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002368 except (OSError, IOError) as e:
2369 if e.errno != errno.EAGAIN:
2370 raise
2371 end = len(self.queue)
2372 found = True
2373 while found and len(self.queue):
2374 found = False
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002375 index = self.queue.find(b"</event>")
2376 while index != -1 and self.queue.startswith(b"<event>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002377 try:
2378 event = pickle.loads(self.queue[7:index])
2379 except ValueError as e:
2380 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index]))
2381 bb.event.fire_from_worker(event, self.d)
2382 found = True
2383 self.queue = self.queue[index+8:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002384 index = self.queue.find(b"</event>")
2385 index = self.queue.find(b"</exitcode>")
2386 while index != -1 and self.queue.startswith(b"<exitcode>"):
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002387 try:
2388 task, status = pickle.loads(self.queue[10:index])
2389 except ValueError as e:
2390 bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
2391 self.rqexec.runqueue_process_waitpid(task, status)
2392 found = True
2393 self.queue = self.queue[index+11:]
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002394 index = self.queue.find(b"</exitcode>")
Patrick Williamsc124f4f2015-09-15 14:41:29 -05002395 return (end > start)
2396
2397 def close(self):
2398 while self.read():
2399 continue
2400 if len(self.queue) > 0:
2401 print("Warning, worker left partial message: %s" % self.queue)
2402 self.input.close()
Patrick Williamsc0f7c042017-02-23 20:41:17 -06002403
2404def get_setscene_enforce_whitelist(d):
2405 if d.getVar('BB_SETSCENE_ENFORCE', True) != '1':
2406 return None
2407 whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST", True) or "").split()
2408 outlist = []
2409 for item in whitelist[:]:
2410 if item.startswith('%:'):
2411 for target in sys.argv[1:]:
2412 if not target.startswith('-'):
2413 outlist.append(target.split(':')[0] + ':' + item.split(':')[1])
2414 else:
2415 outlist.append(item)
2416 return outlist
2417
2418def check_setscene_enforce_whitelist(pn, taskname, whitelist):
2419 import fnmatch
2420 if whitelist:
2421 item = '%s:%s' % (pn, taskname)
2422 for whitelist_item in whitelist:
2423 if fnmatch.fnmatch(item, whitelist_item):
2424 return True
2425 return False
2426 return True