Brad Bishop | d7bf8c1 | 2018-02-25 22:55:05 -0500 | [diff] [blame] | 1 | # Copyright (C) 2017 Intel Corporation |
Brad Bishop | c342db3 | 2019-05-15 21:57:59 -0400 | [diff] [blame] | 2 | # |
| 3 | # SPDX-License-Identifier: MIT |
| 4 | # |
Brad Bishop | d7bf8c1 | 2018-02-25 22:55:05 -0500 | [diff] [blame] | 5 | |
| 6 | import unittest |
| 7 | |
| 8 | from checklayer import LayerType, get_signatures, check_command, get_depgraph |
| 9 | from checklayer.case import OECheckLayerTestCase |
| 10 | |
| 11 | class BSPCheckLayer(OECheckLayerTestCase): |
| 12 | @classmethod |
| 13 | def setUpClass(self): |
| 14 | if self.tc.layer['type'] != LayerType.BSP: |
| 15 | raise unittest.SkipTest("BSPCheckLayer: Layer %s isn't BSP one." %\ |
| 16 | self.tc.layer['name']) |
| 17 | |
| 18 | def test_bsp_defines_machines(self): |
| 19 | self.assertTrue(self.tc.layer['conf']['machines'], |
| 20 | "Layer is BSP but doesn't defines machines.") |
| 21 | |
| 22 | def test_bsp_no_set_machine(self): |
| 23 | from oeqa.utils.commands import get_bb_var |
| 24 | |
| 25 | machine = get_bb_var('MACHINE') |
| 26 | self.assertEqual(self.td['bbvars']['MACHINE'], machine, |
| 27 | msg="Layer %s modified machine %s -> %s" % \ |
| 28 | (self.tc.layer['name'], self.td['bbvars']['MACHINE'], machine)) |
| 29 | |
| 30 | |
| 31 | def test_machine_world(self): |
| 32 | ''' |
| 33 | "bitbake world" is expected to work regardless which machine is selected. |
| 34 | BSP layers sometimes break that by enabling a recipe for a certain machine |
| 35 | without checking whether that recipe actually can be built in the current |
| 36 | distro configuration (for example, OpenGL might not enabled). |
| 37 | |
| 38 | This test iterates over all machines. It would be nicer to instantiate |
| 39 | it once per machine. It merely checks for errors during parse |
| 40 | time. It does not actually attempt to build anything. |
| 41 | ''' |
| 42 | |
| 43 | if not self.td['machines']: |
| 44 | self.skipTest('No machines set with --machines.') |
| 45 | msg = [] |
| 46 | for machine in self.td['machines']: |
| 47 | # In contrast to test_machine_signatures() below, errors are fatal here. |
| 48 | try: |
| 49 | get_signatures(self.td['builddir'], failsafe=False, machine=machine) |
| 50 | except RuntimeError as ex: |
| 51 | msg.append(str(ex)) |
| 52 | if msg: |
| 53 | msg.insert(0, 'The following machines broke a world build:') |
| 54 | self.fail('\n'.join(msg)) |
| 55 | |
| 56 | def test_machine_signatures(self): |
| 57 | ''' |
| 58 | Selecting a machine may only affect the signature of tasks that are specific |
| 59 | to that machine. In other words, when MACHINE=A and MACHINE=B share a recipe |
| 60 | foo and the output of foo, then both machine configurations must build foo |
| 61 | in exactly the same way. Otherwise it is not possible to use both machines |
| 62 | in the same distribution. |
| 63 | |
| 64 | This criteria can only be tested by testing different machines in combination, |
| 65 | i.e. one main layer, potentially several additional BSP layers and an explicit |
| 66 | choice of machines: |
| 67 | yocto-check-layer --additional-layers .../meta-intel --machines intel-corei7-64 imx6slevk -- .../meta-freescale |
| 68 | ''' |
| 69 | |
| 70 | if not self.td['machines']: |
| 71 | self.skipTest('No machines set with --machines.') |
| 72 | |
| 73 | # Collect signatures for all machines that we are testing |
| 74 | # and merge that into a hash: |
| 75 | # tune -> task -> signature -> list of machines with that combination |
| 76 | # |
| 77 | # It is an error if any tune/task pair has more than one signature, |
| 78 | # because that implies that the machines that caused those different |
| 79 | # signatures do not agree on how to execute the task. |
| 80 | tunes = {} |
| 81 | # Preserve ordering of machines as chosen by the user. |
| 82 | for machine in self.td['machines']: |
| 83 | curr_sigs, tune2tasks = get_signatures(self.td['builddir'], failsafe=True, machine=machine) |
| 84 | # Invert the tune -> [tasks] mapping. |
| 85 | tasks2tune = {} |
| 86 | for tune, tasks in tune2tasks.items(): |
| 87 | for task in tasks: |
| 88 | tasks2tune[task] = tune |
| 89 | for task, sighash in curr_sigs.items(): |
| 90 | tunes.setdefault(tasks2tune[task], {}).setdefault(task, {}).setdefault(sighash, []).append(machine) |
| 91 | |
| 92 | msg = [] |
| 93 | pruned = 0 |
| 94 | last_line_key = None |
| 95 | # do_fetch, do_unpack, ..., do_build |
| 96 | taskname_list = [] |
| 97 | if tunes: |
| 98 | # The output below is most useful when we start with tasks that are at |
| 99 | # the bottom of the dependency chain, i.e. those that run first. If |
| 100 | # those tasks differ, the rest also does. |
| 101 | # |
| 102 | # To get an ordering of tasks, we do a topological sort of the entire |
| 103 | # depgraph for the base configuration, then on-the-fly flatten that list by stripping |
| 104 | # out the recipe names and removing duplicates. The base configuration |
| 105 | # is not necessarily representative, but should be close enough. Tasks |
| 106 | # that were not encountered get a default priority. |
| 107 | depgraph = get_depgraph() |
| 108 | depends = depgraph['tdepends'] |
| 109 | WHITE = 1 |
| 110 | GRAY = 2 |
| 111 | BLACK = 3 |
| 112 | color = {} |
| 113 | found = set() |
| 114 | def visit(task): |
| 115 | color[task] = GRAY |
| 116 | for dep in depends.get(task, ()): |
| 117 | if color.setdefault(dep, WHITE) == WHITE: |
| 118 | visit(dep) |
| 119 | color[task] = BLACK |
| 120 | pn, taskname = task.rsplit('.', 1) |
| 121 | if taskname not in found: |
| 122 | taskname_list.append(taskname) |
| 123 | found.add(taskname) |
| 124 | for task in depends.keys(): |
| 125 | if color.setdefault(task, WHITE) == WHITE: |
| 126 | visit(task) |
| 127 | |
| 128 | taskname_order = dict([(task, index) for index, task in enumerate(taskname_list) ]) |
| 129 | def task_key(task): |
| 130 | pn, taskname = task.rsplit(':', 1) |
| 131 | return (pn, taskname_order.get(taskname, len(taskname_list)), taskname) |
| 132 | |
| 133 | for tune in sorted(tunes.keys()): |
| 134 | tasks = tunes[tune] |
| 135 | # As for test_signatures it would be nicer to sort tasks |
| 136 | # by dependencies here, but that is harder because we have |
| 137 | # to report on tasks from different machines, which might |
| 138 | # have different dependencies. We resort to pruning the |
| 139 | # output by reporting only one task per recipe if the set |
| 140 | # of machines matches. |
| 141 | # |
| 142 | # "bitbake-diffsigs -t -s" is intelligent enough to print |
| 143 | # diffs recursively, so often it does not matter that much |
| 144 | # if we don't pick the underlying difference |
| 145 | # here. However, sometimes recursion fails |
| 146 | # (https://bugzilla.yoctoproject.org/show_bug.cgi?id=6428). |
| 147 | # |
| 148 | # To mitigate that a bit, we use a hard-coded ordering of |
| 149 | # tasks that represents how they normally run and prefer |
| 150 | # to print the ones that run first. |
| 151 | for task in sorted(tasks.keys(), key=task_key): |
| 152 | signatures = tasks[task] |
| 153 | # do_build can be ignored: it is know to have |
| 154 | # different signatures in some cases, for example in |
| 155 | # the allarch ca-certificates due to RDEPENDS=openssl. |
| 156 | # That particular dependency is whitelisted via |
| 157 | # SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS, but still shows up |
| 158 | # in the sstate signature hash because filtering it |
| 159 | # out would be hard and running do_build multiple |
| 160 | # times doesn't really matter. |
| 161 | if len(signatures.keys()) > 1 and \ |
| 162 | not task.endswith(':do_build'): |
| 163 | # Error! |
| 164 | # |
| 165 | # Sort signatures by machines, because the hex values don't mean anything. |
| 166 | # => all-arch adwaita-icon-theme:do_build: 1234... (beaglebone, qemux86) != abcdf... (qemux86-64) |
| 167 | # |
| 168 | # Skip the line if it is covered already by the predecessor (same pn, same sets of machines). |
| 169 | pn, taskname = task.rsplit(':', 1) |
| 170 | next_line_key = (pn, sorted(signatures.values())) |
| 171 | if next_line_key != last_line_key: |
| 172 | line = ' %s %s: ' % (tune, task) |
| 173 | line += ' != '.join(['%s (%s)' % (signature, ', '.join([m for m in signatures[signature]])) for |
| 174 | signature in sorted(signatures.keys(), key=lambda s: signatures[s])]) |
| 175 | last_line_key = next_line_key |
| 176 | msg.append(line) |
| 177 | # Randomly pick two mismatched signatures and remember how to invoke |
| 178 | # bitbake-diffsigs for them. |
| 179 | iterator = iter(signatures.items()) |
| 180 | a = next(iterator) |
| 181 | b = next(iterator) |
| 182 | diffsig_machines = '(%s) != (%s)' % (', '.join(a[1]), ', '.join(b[1])) |
| 183 | diffsig_params = '-t %s %s -s %s %s' % (pn, taskname, a[0], b[0]) |
| 184 | else: |
| 185 | pruned += 1 |
| 186 | |
| 187 | if msg: |
| 188 | msg.insert(0, 'The machines have conflicting signatures for some shared tasks:') |
| 189 | if pruned > 0: |
| 190 | msg.append('') |
| 191 | msg.append('%d tasks where not listed because some other task of the recipe already differed.' % pruned) |
| 192 | msg.append('It is likely that differences from different recipes also have the same root cause.') |
| 193 | msg.append('') |
| 194 | # Explain how to investigate... |
| 195 | msg.append('To investigate, run bitbake-diffsigs -t recipename taskname -s fromsig tosig.') |
| 196 | cmd = 'bitbake-diffsigs %s' % diffsig_params |
| 197 | msg.append('Example: %s in the last line' % diffsig_machines) |
| 198 | msg.append('Command: %s' % cmd) |
| 199 | # ... and actually do it automatically for that example, but without aborting |
| 200 | # when that fails. |
| 201 | try: |
| 202 | output = check_command('Comparing signatures failed.', cmd).decode('utf-8') |
| 203 | except RuntimeError as ex: |
| 204 | output = str(ex) |
| 205 | msg.extend([' ' + line for line in output.splitlines()]) |
| 206 | self.fail('\n'.join(msg)) |