Patrick Williams | 92b42cb | 2022-09-03 06:53:57 -0500 | [diff] [blame] | 1 | # |
| 2 | # Copyright OpenEmbedded Contributors |
| 3 | # |
| 4 | # SPDX-License-Identifier: MIT |
| 5 | # |
| 6 | |
| 7 | # These directories will be staged in the sysroot |
| 8 | SYSROOT_DIRS = " \ |
| 9 | ${includedir} \ |
| 10 | ${libdir} \ |
| 11 | ${base_libdir} \ |
| 12 | ${nonarch_base_libdir} \ |
| 13 | ${datadir} \ |
| 14 | /sysroot-only \ |
| 15 | " |
| 16 | |
| 17 | # These directories are also staged in the sysroot when they contain files that |
| 18 | # are usable on the build system |
| 19 | SYSROOT_DIRS_NATIVE = " \ |
| 20 | ${bindir} \ |
| 21 | ${sbindir} \ |
| 22 | ${base_bindir} \ |
| 23 | ${base_sbindir} \ |
| 24 | ${libexecdir} \ |
| 25 | ${sysconfdir} \ |
| 26 | ${localstatedir} \ |
| 27 | " |
| 28 | SYSROOT_DIRS:append:class-native = " ${SYSROOT_DIRS_NATIVE}" |
| 29 | SYSROOT_DIRS:append:class-cross = " ${SYSROOT_DIRS_NATIVE}" |
| 30 | SYSROOT_DIRS:append:class-crosssdk = " ${SYSROOT_DIRS_NATIVE}" |
| 31 | |
| 32 | # These directories will not be staged in the sysroot |
| 33 | SYSROOT_DIRS_IGNORE = " \ |
| 34 | ${mandir} \ |
| 35 | ${docdir} \ |
| 36 | ${infodir} \ |
| 37 | ${datadir}/X11/locale \ |
| 38 | ${datadir}/applications \ |
| 39 | ${datadir}/bash-completion \ |
| 40 | ${datadir}/fonts \ |
| 41 | ${datadir}/gtk-doc/html \ |
| 42 | ${datadir}/installed-tests \ |
| 43 | ${datadir}/locale \ |
| 44 | ${datadir}/pixmaps \ |
| 45 | ${datadir}/terminfo \ |
| 46 | ${libdir}/${BPN}/ptest \ |
| 47 | " |
| 48 | |
| 49 | sysroot_stage_dir() { |
| 50 | src="$1" |
| 51 | dest="$2" |
| 52 | # if the src doesn't exist don't do anything |
| 53 | if [ ! -d "$src" ]; then |
| 54 | return |
| 55 | fi |
| 56 | |
| 57 | mkdir -p "$dest" |
| 58 | rdest=$(realpath --relative-to="$src" "$dest") |
| 59 | ( |
| 60 | cd $src |
| 61 | find . -print0 | cpio --null -pdlu $rdest |
| 62 | ) |
| 63 | } |
| 64 | |
| 65 | sysroot_stage_dirs() { |
| 66 | from="$1" |
| 67 | to="$2" |
| 68 | |
| 69 | for dir in ${SYSROOT_DIRS}; do |
| 70 | sysroot_stage_dir "$from$dir" "$to$dir" |
| 71 | done |
| 72 | |
| 73 | # Remove directories we do not care about |
| 74 | for dir in ${SYSROOT_DIRS_IGNORE}; do |
| 75 | rm -rf "$to$dir" |
| 76 | done |
| 77 | } |
| 78 | |
| 79 | sysroot_stage_all() { |
| 80 | sysroot_stage_dirs ${D} ${SYSROOT_DESTDIR} |
| 81 | } |
| 82 | |
| 83 | python sysroot_strip () { |
| 84 | inhibit_sysroot = d.getVar('INHIBIT_SYSROOT_STRIP') |
| 85 | if inhibit_sysroot and oe.types.boolean(inhibit_sysroot): |
| 86 | return |
| 87 | |
| 88 | dstdir = d.getVar('SYSROOT_DESTDIR') |
| 89 | pn = d.getVar('PN') |
| 90 | libdir = d.getVar("libdir") |
| 91 | base_libdir = d.getVar("base_libdir") |
| 92 | qa_already_stripped = 'already-stripped' in (d.getVar('INSANE_SKIP:' + pn) or "").split() |
| 93 | strip_cmd = d.getVar("STRIP") |
| 94 | |
| 95 | oe.package.strip_execs(pn, dstdir, strip_cmd, libdir, base_libdir, d, |
| 96 | qa_already_stripped=qa_already_stripped) |
| 97 | } |
| 98 | |
| 99 | do_populate_sysroot[dirs] = "${SYSROOT_DESTDIR}" |
| 100 | |
| 101 | addtask populate_sysroot after do_install |
| 102 | |
| 103 | SYSROOT_PREPROCESS_FUNCS ?= "" |
| 104 | SYSROOT_DESTDIR = "${WORKDIR}/sysroot-destdir" |
| 105 | |
| 106 | python do_populate_sysroot () { |
| 107 | # SYSROOT 'version' 2 |
| 108 | bb.build.exec_func("sysroot_stage_all", d) |
| 109 | bb.build.exec_func("sysroot_strip", d) |
| 110 | for f in (d.getVar('SYSROOT_PREPROCESS_FUNCS') or '').split(): |
| 111 | bb.build.exec_func(f, d) |
| 112 | pn = d.getVar("PN") |
| 113 | multiprov = d.getVar("BB_MULTI_PROVIDER_ALLOWED").split() |
| 114 | provdir = d.expand("${SYSROOT_DESTDIR}${base_prefix}/sysroot-providers/") |
| 115 | bb.utils.mkdirhier(provdir) |
| 116 | for p in d.getVar("PROVIDES").split(): |
| 117 | if p in multiprov: |
| 118 | continue |
| 119 | p = p.replace("/", "_") |
| 120 | with open(provdir + p, "w") as f: |
| 121 | f.write(pn) |
| 122 | } |
| 123 | |
| 124 | do_populate_sysroot[vardeps] += "${SYSROOT_PREPROCESS_FUNCS}" |
| 125 | do_populate_sysroot[vardepsexclude] += "BB_MULTI_PROVIDER_ALLOWED" |
| 126 | |
| 127 | POPULATESYSROOTDEPS = "" |
| 128 | POPULATESYSROOTDEPS:class-target = "virtual/${MLPREFIX}${HOST_PREFIX}binutils:do_populate_sysroot" |
| 129 | POPULATESYSROOTDEPS:class-nativesdk = "virtual/${HOST_PREFIX}binutils-crosssdk:do_populate_sysroot" |
| 130 | do_populate_sysroot[depends] += "${POPULATESYSROOTDEPS}" |
| 131 | |
| 132 | SSTATETASKS += "do_populate_sysroot" |
| 133 | do_populate_sysroot[cleandirs] = "${SYSROOT_DESTDIR}" |
| 134 | do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}" |
| 135 | do_populate_sysroot[sstate-outputdirs] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}" |
| 136 | do_populate_sysroot[sstate-fixmedir] = "${COMPONENTS_DIR}/${PACKAGE_ARCH}/${PN}" |
| 137 | |
| 138 | python do_populate_sysroot_setscene () { |
| 139 | sstate_setscene(d) |
| 140 | } |
| 141 | addtask do_populate_sysroot_setscene |
| 142 | |
| 143 | def staging_copyfile(c, target, dest, postinsts, seendirs): |
| 144 | import errno |
| 145 | |
| 146 | destdir = os.path.dirname(dest) |
| 147 | if destdir not in seendirs: |
| 148 | bb.utils.mkdirhier(destdir) |
| 149 | seendirs.add(destdir) |
| 150 | if "/usr/bin/postinst-" in c: |
| 151 | postinsts.append(dest) |
| 152 | if os.path.islink(c): |
| 153 | linkto = os.readlink(c) |
| 154 | if os.path.lexists(dest): |
| 155 | if not os.path.islink(dest): |
| 156 | raise OSError(errno.EEXIST, "Link %s already exists as a file" % dest, dest) |
| 157 | if os.readlink(dest) == linkto: |
| 158 | return dest |
| 159 | raise OSError(errno.EEXIST, "Link %s already exists to a different location? (%s vs %s)" % (dest, os.readlink(dest), linkto), dest) |
| 160 | os.symlink(linkto, dest) |
| 161 | #bb.warn(c) |
| 162 | else: |
| 163 | try: |
| 164 | os.link(c, dest) |
| 165 | except OSError as err: |
| 166 | if err.errno == errno.EXDEV: |
| 167 | bb.utils.copyfile(c, dest) |
| 168 | else: |
| 169 | raise |
| 170 | return dest |
| 171 | |
| 172 | def staging_copydir(c, target, dest, seendirs): |
| 173 | if dest not in seendirs: |
| 174 | bb.utils.mkdirhier(dest) |
| 175 | seendirs.add(dest) |
| 176 | |
| 177 | def staging_processfixme(fixme, target, recipesysroot, recipesysrootnative, d): |
| 178 | import subprocess |
| 179 | |
| 180 | if not fixme: |
| 181 | return |
| 182 | cmd = "sed -e 's:^[^/]*/:%s/:g' %s | xargs sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (target, " ".join(fixme), recipesysroot, recipesysrootnative) |
| 183 | for fixmevar in ['PSEUDO_SYSROOT', 'HOSTTOOLS_DIR', 'PKGDATA_DIR', 'PSEUDO_LOCALSTATEDIR', 'LOGFIFO']: |
| 184 | fixme_path = d.getVar(fixmevar) |
| 185 | cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path) |
| 186 | bb.debug(2, cmd) |
| 187 | subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) |
| 188 | |
| 189 | |
| 190 | def staging_populate_sysroot_dir(targetsysroot, nativesysroot, native, d): |
| 191 | import glob |
| 192 | import subprocess |
| 193 | import errno |
| 194 | |
| 195 | fixme = [] |
| 196 | postinsts = [] |
| 197 | seendirs = set() |
| 198 | stagingdir = d.getVar("STAGING_DIR") |
| 199 | if native: |
| 200 | pkgarchs = ['${BUILD_ARCH}', '${BUILD_ARCH}_*'] |
| 201 | targetdir = nativesysroot |
| 202 | else: |
| 203 | pkgarchs = ['${MACHINE_ARCH}'] |
| 204 | pkgarchs = pkgarchs + list(reversed(d.getVar("PACKAGE_EXTRA_ARCHS").split())) |
| 205 | pkgarchs.append('allarch') |
| 206 | targetdir = targetsysroot |
| 207 | |
| 208 | bb.utils.mkdirhier(targetdir) |
| 209 | for pkgarch in pkgarchs: |
| 210 | for manifest in glob.glob(d.expand("${SSTATE_MANIFESTS}/manifest-%s-*.populate_sysroot" % pkgarch)): |
| 211 | if manifest.endswith("-initial.populate_sysroot"): |
| 212 | # skip libgcc-initial due to file overlap |
| 213 | continue |
| 214 | if not native and (manifest.endswith("-native.populate_sysroot") or "nativesdk-" in manifest): |
| 215 | continue |
| 216 | if native and not (manifest.endswith("-native.populate_sysroot") or manifest.endswith("-cross.populate_sysroot") or "-cross-" in manifest): |
| 217 | continue |
| 218 | tmanifest = targetdir + "/" + os.path.basename(manifest) |
| 219 | if os.path.exists(tmanifest): |
| 220 | continue |
| 221 | try: |
| 222 | os.link(manifest, tmanifest) |
| 223 | except OSError as err: |
| 224 | if err.errno == errno.EXDEV: |
| 225 | bb.utils.copyfile(manifest, tmanifest) |
| 226 | else: |
| 227 | raise |
| 228 | with open(manifest, "r") as f: |
| 229 | for l in f: |
| 230 | l = l.strip() |
| 231 | if l.endswith("/fixmepath"): |
| 232 | fixme.append(l) |
| 233 | continue |
| 234 | if l.endswith("/fixmepath.cmd"): |
| 235 | continue |
| 236 | dest = l.replace(stagingdir, "") |
| 237 | dest = targetdir + "/" + "/".join(dest.split("/")[3:]) |
| 238 | if l.endswith("/"): |
| 239 | staging_copydir(l, targetdir, dest, seendirs) |
| 240 | continue |
| 241 | try: |
| 242 | staging_copyfile(l, targetdir, dest, postinsts, seendirs) |
| 243 | except FileExistsError: |
| 244 | continue |
| 245 | |
| 246 | staging_processfixme(fixme, targetdir, targetsysroot, nativesysroot, d) |
| 247 | for p in postinsts: |
| 248 | subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT) |
| 249 | |
| 250 | # |
| 251 | # Manifests here are complicated. The main sysroot area has the unpacked sstate |
| 252 | # which us unrelocated and tracked by the main sstate manifests. Each recipe |
| 253 | # specific sysroot has manifests for each dependency that is installed there. |
| 254 | # The task hash is used to tell whether the data needs to be reinstalled. We |
| 255 | # use a symlink to point to the currently installed hash. There is also a |
| 256 | # "complete" stamp file which is used to mark if installation completed. If |
| 257 | # something fails (e.g. a postinst), this won't get written and we would |
| 258 | # remove and reinstall the dependency. This also means partially installed |
| 259 | # dependencies should get cleaned up correctly. |
| 260 | # |
| 261 | |
| 262 | python extend_recipe_sysroot() { |
| 263 | import copy |
| 264 | import subprocess |
| 265 | import errno |
| 266 | import collections |
| 267 | import glob |
| 268 | |
| 269 | taskdepdata = d.getVar("BB_TASKDEPDATA", False) |
| 270 | mytaskname = d.getVar("BB_RUNTASK") |
| 271 | if mytaskname.endswith("_setscene"): |
| 272 | mytaskname = mytaskname.replace("_setscene", "") |
| 273 | workdir = d.getVar("WORKDIR") |
| 274 | #bb.warn(str(taskdepdata)) |
| 275 | pn = d.getVar("PN") |
| 276 | stagingdir = d.getVar("STAGING_DIR") |
| 277 | sharedmanifests = d.getVar("COMPONENTS_DIR") + "/manifests" |
| 278 | recipesysroot = d.getVar("RECIPE_SYSROOT") |
| 279 | recipesysrootnative = d.getVar("RECIPE_SYSROOT_NATIVE") |
| 280 | |
| 281 | # Detect bitbake -b usage |
| 282 | nodeps = d.getVar("BB_LIMITEDDEPS") or False |
| 283 | if nodeps: |
| 284 | lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock") |
| 285 | staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, True, d) |
| 286 | staging_populate_sysroot_dir(recipesysroot, recipesysrootnative, False, d) |
| 287 | bb.utils.unlockfile(lock) |
| 288 | return |
| 289 | |
| 290 | start = None |
| 291 | configuredeps = [] |
| 292 | owntaskdeps = [] |
| 293 | for dep in taskdepdata: |
| 294 | data = taskdepdata[dep] |
| 295 | if data[1] == mytaskname and data[0] == pn: |
| 296 | start = dep |
| 297 | elif data[0] == pn: |
| 298 | owntaskdeps.append(data[1]) |
| 299 | if start is None: |
| 300 | bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?") |
| 301 | |
| 302 | # We need to figure out which sysroot files we need to expose to this task. |
| 303 | # This needs to match what would get restored from sstate, which is controlled |
| 304 | # ultimately by calls from bitbake to setscene_depvalid(). |
| 305 | # That function expects a setscene dependency tree. We build a dependency tree |
| 306 | # condensed to inter-sstate task dependencies, similar to that used by setscene |
| 307 | # tasks. We can then call into setscene_depvalid() and decide |
| 308 | # which dependencies we can "see" and should expose in the recipe specific sysroot. |
| 309 | setscenedeps = copy.deepcopy(taskdepdata) |
| 310 | |
| 311 | start = set([start]) |
| 312 | |
| 313 | sstatetasks = d.getVar("SSTATETASKS").split() |
| 314 | # Add recipe specific tasks referenced by setscene_depvalid() |
| 315 | sstatetasks.append("do_stash_locale") |
| 316 | sstatetasks.append("do_deploy") |
| 317 | |
| 318 | def print_dep_tree(deptree): |
| 319 | data = "" |
| 320 | for dep in deptree: |
| 321 | deps = " " + "\n ".join(deptree[dep][3]) + "\n" |
| 322 | data = data + "%s:\n %s\n %s\n%s %s\n %s\n" % (deptree[dep][0], deptree[dep][1], deptree[dep][2], deps, deptree[dep][4], deptree[dep][5]) |
| 323 | return data |
| 324 | |
| 325 | #bb.note("Full dep tree is:\n%s" % print_dep_tree(taskdepdata)) |
| 326 | |
| 327 | #bb.note(" start2 is %s" % str(start)) |
| 328 | |
| 329 | # If start is an sstate task (like do_package) we need to add in its direct dependencies |
| 330 | # else the code below won't recurse into them. |
| 331 | for dep in set(start): |
| 332 | for dep2 in setscenedeps[dep][3]: |
| 333 | start.add(dep2) |
| 334 | start.remove(dep) |
| 335 | |
| 336 | #bb.note(" start3 is %s" % str(start)) |
| 337 | |
| 338 | # Create collapsed do_populate_sysroot -> do_populate_sysroot tree |
| 339 | for dep in taskdepdata: |
| 340 | data = setscenedeps[dep] |
| 341 | if data[1] not in sstatetasks: |
| 342 | for dep2 in setscenedeps: |
| 343 | data2 = setscenedeps[dep2] |
| 344 | if dep in data2[3]: |
| 345 | data2[3].update(setscenedeps[dep][3]) |
| 346 | data2[3].remove(dep) |
| 347 | if dep in start: |
| 348 | start.update(setscenedeps[dep][3]) |
| 349 | start.remove(dep) |
| 350 | del setscenedeps[dep] |
| 351 | |
| 352 | # Remove circular references |
| 353 | for dep in setscenedeps: |
| 354 | if dep in setscenedeps[dep][3]: |
| 355 | setscenedeps[dep][3].remove(dep) |
| 356 | |
| 357 | #bb.note("Computed dep tree is:\n%s" % print_dep_tree(setscenedeps)) |
| 358 | #bb.note(" start is %s" % str(start)) |
| 359 | |
| 360 | # Direct dependencies should be present and can be depended upon |
| 361 | for dep in sorted(set(start)): |
| 362 | if setscenedeps[dep][1] == "do_populate_sysroot": |
| 363 | if dep not in configuredeps: |
| 364 | configuredeps.append(dep) |
| 365 | bb.note("Direct dependencies are %s" % str(configuredeps)) |
| 366 | #bb.note(" or %s" % str(start)) |
| 367 | |
| 368 | msgbuf = [] |
| 369 | # Call into setscene_depvalid for each sub-dependency and only copy sysroot files |
| 370 | # for ones that would be restored from sstate. |
| 371 | done = list(start) |
| 372 | next = list(start) |
| 373 | while next: |
| 374 | new = [] |
| 375 | for dep in next: |
| 376 | data = setscenedeps[dep] |
| 377 | for datadep in data[3]: |
| 378 | if datadep in done: |
| 379 | continue |
| 380 | taskdeps = {} |
| 381 | taskdeps[dep] = setscenedeps[dep][:2] |
| 382 | taskdeps[datadep] = setscenedeps[datadep][:2] |
| 383 | retval = setscene_depvalid(datadep, taskdeps, [], d, msgbuf) |
| 384 | if retval: |
| 385 | msgbuf.append("Skipping setscene dependency %s for installation into the sysroot" % datadep) |
| 386 | continue |
| 387 | done.append(datadep) |
| 388 | new.append(datadep) |
| 389 | if datadep not in configuredeps and setscenedeps[datadep][1] == "do_populate_sysroot": |
| 390 | configuredeps.append(datadep) |
| 391 | msgbuf.append("Adding dependency on %s" % setscenedeps[datadep][0]) |
| 392 | else: |
| 393 | msgbuf.append("Following dependency on %s" % setscenedeps[datadep][0]) |
| 394 | next = new |
| 395 | |
| 396 | # This logging is too verbose for day to day use sadly |
| 397 | #bb.debug(2, "\n".join(msgbuf)) |
| 398 | |
| 399 | depdir = recipesysrootnative + "/installeddeps" |
| 400 | bb.utils.mkdirhier(depdir) |
| 401 | bb.utils.mkdirhier(sharedmanifests) |
| 402 | |
| 403 | lock = bb.utils.lockfile(recipesysroot + "/sysroot.lock") |
| 404 | |
| 405 | fixme = {} |
| 406 | seendirs = set() |
| 407 | postinsts = [] |
| 408 | multilibs = {} |
| 409 | manifests = {} |
| 410 | # All files that we're going to be installing, to find conflicts. |
| 411 | fileset = {} |
| 412 | |
| 413 | invalidate_tasks = set() |
| 414 | for f in os.listdir(depdir): |
| 415 | removed = [] |
| 416 | if not f.endswith(".complete"): |
| 417 | continue |
| 418 | f = depdir + "/" + f |
| 419 | if os.path.islink(f) and not os.path.exists(f): |
| 420 | bb.note("%s no longer exists, removing from sysroot" % f) |
| 421 | lnk = os.readlink(f.replace(".complete", "")) |
| 422 | sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir) |
| 423 | os.unlink(f) |
| 424 | os.unlink(f.replace(".complete", "")) |
| 425 | removed.append(os.path.basename(f.replace(".complete", ""))) |
| 426 | |
| 427 | # If we've removed files from the sysroot above, the task that installed them may still |
| 428 | # have a stamp file present for the task. This is probably invalid right now but may become |
| 429 | # valid again if the user were to change configuration back for example. Since we've removed |
| 430 | # the files a task might need, remove the stamp file too to force it to rerun. |
| 431 | # YOCTO #14790 |
| 432 | if removed: |
| 433 | for i in glob.glob(depdir + "/index.*"): |
| 434 | if i.endswith("." + mytaskname): |
| 435 | continue |
| 436 | with open(i, "r") as f: |
| 437 | for l in f: |
| 438 | if l.startswith("TaskDeps:"): |
| 439 | continue |
| 440 | l = l.strip() |
| 441 | if l in removed: |
| 442 | invalidate_tasks.add(i.rsplit(".", 1)[1]) |
| 443 | break |
| 444 | for t in invalidate_tasks: |
| 445 | bb.note("Invalidating stamps for task %s" % t) |
| 446 | bb.build.clean_stamp(t, d) |
| 447 | |
| 448 | installed = [] |
| 449 | for dep in configuredeps: |
| 450 | c = setscenedeps[dep][0] |
| 451 | if mytaskname in ["do_sdk_depends", "do_populate_sdk_ext"] and c.endswith("-initial"): |
| 452 | bb.note("Skipping initial setscene dependency %s for installation into the sysroot" % c) |
| 453 | continue |
| 454 | installed.append(c) |
| 455 | |
| 456 | # We want to remove anything which this task previously installed but is no longer a dependency |
| 457 | taskindex = depdir + "/" + "index." + mytaskname |
| 458 | if os.path.exists(taskindex): |
| 459 | potential = [] |
| 460 | with open(taskindex, "r") as f: |
| 461 | for l in f: |
| 462 | l = l.strip() |
| 463 | if l not in installed: |
| 464 | fl = depdir + "/" + l |
| 465 | if not os.path.exists(fl): |
| 466 | # Was likely already uninstalled |
| 467 | continue |
| 468 | potential.append(l) |
| 469 | # We need to ensure no other task needs this dependency. We hold the sysroot |
| 470 | # lock so we ca search the indexes to check |
| 471 | if potential: |
| 472 | for i in glob.glob(depdir + "/index.*"): |
| 473 | if i.endswith("." + mytaskname): |
| 474 | continue |
| 475 | with open(i, "r") as f: |
| 476 | for l in f: |
| 477 | if l.startswith("TaskDeps:"): |
| 478 | prevtasks = l.split()[1:] |
| 479 | if mytaskname in prevtasks: |
| 480 | # We're a dependency of this task so we can clear items out the sysroot |
| 481 | break |
| 482 | l = l.strip() |
| 483 | if l in potential: |
| 484 | potential.remove(l) |
| 485 | for l in potential: |
| 486 | fl = depdir + "/" + l |
| 487 | bb.note("Task %s no longer depends on %s, removing from sysroot" % (mytaskname, l)) |
| 488 | lnk = os.readlink(fl) |
| 489 | sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir) |
| 490 | os.unlink(fl) |
| 491 | os.unlink(fl + ".complete") |
| 492 | |
| 493 | msg_exists = [] |
| 494 | msg_adding = [] |
| 495 | |
| 496 | # Handle all removals first since files may move between recipes |
| 497 | for dep in configuredeps: |
| 498 | c = setscenedeps[dep][0] |
| 499 | if c not in installed: |
| 500 | continue |
| 501 | taskhash = setscenedeps[dep][5] |
| 502 | taskmanifest = depdir + "/" + c + "." + taskhash |
| 503 | |
| 504 | if os.path.exists(depdir + "/" + c): |
| 505 | lnk = os.readlink(depdir + "/" + c) |
| 506 | if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"): |
| 507 | continue |
| 508 | else: |
| 509 | bb.note("%s exists in sysroot, but is stale (%s vs. %s), removing." % (c, lnk, c + "." + taskhash)) |
| 510 | sstate_clean_manifest(depdir + "/" + lnk, d, canrace=True, prefix=workdir) |
| 511 | os.unlink(depdir + "/" + c) |
| 512 | if os.path.lexists(depdir + "/" + c + ".complete"): |
| 513 | os.unlink(depdir + "/" + c + ".complete") |
| 514 | elif os.path.lexists(depdir + "/" + c): |
| 515 | os.unlink(depdir + "/" + c) |
| 516 | |
| 517 | binfiles = {} |
| 518 | # Now handle installs |
| 519 | for dep in configuredeps: |
| 520 | c = setscenedeps[dep][0] |
| 521 | if c not in installed: |
| 522 | continue |
| 523 | taskhash = setscenedeps[dep][5] |
| 524 | taskmanifest = depdir + "/" + c + "." + taskhash |
| 525 | |
| 526 | if os.path.exists(depdir + "/" + c): |
| 527 | lnk = os.readlink(depdir + "/" + c) |
| 528 | if lnk == c + "." + taskhash and os.path.exists(depdir + "/" + c + ".complete"): |
| 529 | msg_exists.append(c) |
| 530 | continue |
| 531 | |
| 532 | msg_adding.append(c) |
| 533 | |
| 534 | os.symlink(c + "." + taskhash, depdir + "/" + c) |
| 535 | |
| 536 | manifest, d2 = oe.sstatesig.find_sstate_manifest(c, setscenedeps[dep][2], "populate_sysroot", d, multilibs) |
| 537 | if d2 is not d: |
| 538 | # If we don't do this, the recipe sysroot will be placed in the wrong WORKDIR for multilibs |
| 539 | # We need a consistent WORKDIR for the image |
| 540 | d2.setVar("WORKDIR", d.getVar("WORKDIR")) |
| 541 | destsysroot = d2.getVar("RECIPE_SYSROOT") |
| 542 | # We put allarch recipes into the default sysroot |
| 543 | if manifest and "allarch" in manifest: |
| 544 | destsysroot = d.getVar("RECIPE_SYSROOT") |
| 545 | |
| 546 | native = False |
| 547 | if c.endswith("-native") or "-cross-" in c or "-crosssdk" in c: |
| 548 | native = True |
| 549 | |
| 550 | if manifest: |
| 551 | newmanifest = collections.OrderedDict() |
| 552 | targetdir = destsysroot |
| 553 | if native: |
| 554 | targetdir = recipesysrootnative |
| 555 | if targetdir not in fixme: |
| 556 | fixme[targetdir] = [] |
| 557 | fm = fixme[targetdir] |
| 558 | |
| 559 | with open(manifest, "r") as f: |
| 560 | manifests[dep] = manifest |
| 561 | for l in f: |
| 562 | l = l.strip() |
| 563 | if l.endswith("/fixmepath"): |
| 564 | fm.append(l) |
| 565 | continue |
| 566 | if l.endswith("/fixmepath.cmd"): |
| 567 | continue |
| 568 | dest = l.replace(stagingdir, "") |
| 569 | dest = "/" + "/".join(dest.split("/")[3:]) |
| 570 | newmanifest[l] = targetdir + dest |
| 571 | |
| 572 | # Check if files have already been installed by another |
| 573 | # recipe and abort if they have, explaining what recipes are |
| 574 | # conflicting. |
| 575 | hashname = targetdir + dest |
| 576 | if not hashname.endswith("/"): |
| 577 | if hashname in fileset: |
| 578 | bb.fatal("The file %s is installed by both %s and %s, aborting" % (dest, c, fileset[hashname])) |
| 579 | else: |
| 580 | fileset[hashname] = c |
| 581 | |
| 582 | # Having multiple identical manifests in each sysroot eats diskspace so |
| 583 | # create a shared pool of them and hardlink if we can. |
| 584 | # We create the manifest in advance so that if something fails during installation, |
| 585 | # or the build is interrupted, subsequent exeuction can cleanup. |
| 586 | sharedm = sharedmanifests + "/" + os.path.basename(taskmanifest) |
| 587 | if not os.path.exists(sharedm): |
| 588 | smlock = bb.utils.lockfile(sharedm + ".lock") |
| 589 | # Can race here. You'd think it just means we may not end up with all copies hardlinked to each other |
| 590 | # but python can lose file handles so we need to do this under a lock. |
| 591 | if not os.path.exists(sharedm): |
| 592 | with open(sharedm, 'w') as m: |
| 593 | for l in newmanifest: |
| 594 | dest = newmanifest[l] |
| 595 | m.write(dest.replace(workdir + "/", "") + "\n") |
| 596 | bb.utils.unlockfile(smlock) |
| 597 | try: |
| 598 | os.link(sharedm, taskmanifest) |
| 599 | except OSError as err: |
| 600 | if err.errno == errno.EXDEV: |
| 601 | bb.utils.copyfile(sharedm, taskmanifest) |
| 602 | else: |
| 603 | raise |
| 604 | # Finally actually install the files |
| 605 | for l in newmanifest: |
| 606 | dest = newmanifest[l] |
| 607 | if l.endswith("/"): |
| 608 | staging_copydir(l, targetdir, dest, seendirs) |
| 609 | continue |
| 610 | if "/bin/" in l or "/sbin/" in l: |
| 611 | # defer /*bin/* files until last in case they need libs |
| 612 | binfiles[l] = (targetdir, dest) |
| 613 | else: |
| 614 | staging_copyfile(l, targetdir, dest, postinsts, seendirs) |
| 615 | |
| 616 | # Handle deferred binfiles |
| 617 | for l in binfiles: |
| 618 | (targetdir, dest) = binfiles[l] |
| 619 | staging_copyfile(l, targetdir, dest, postinsts, seendirs) |
| 620 | |
| 621 | bb.note("Installed into sysroot: %s" % str(msg_adding)) |
| 622 | bb.note("Skipping as already exists in sysroot: %s" % str(msg_exists)) |
| 623 | |
| 624 | for f in fixme: |
| 625 | staging_processfixme(fixme[f], f, recipesysroot, recipesysrootnative, d) |
| 626 | |
| 627 | for p in postinsts: |
| 628 | subprocess.check_output(p, shell=True, stderr=subprocess.STDOUT) |
| 629 | |
| 630 | for dep in manifests: |
| 631 | c = setscenedeps[dep][0] |
| 632 | os.symlink(manifests[dep], depdir + "/" + c + ".complete") |
| 633 | |
| 634 | with open(taskindex, "w") as f: |
| 635 | f.write("TaskDeps: " + " ".join(owntaskdeps) + "\n") |
| 636 | for l in sorted(installed): |
| 637 | f.write(l + "\n") |
| 638 | |
| 639 | bb.utils.unlockfile(lock) |
| 640 | } |
| 641 | extend_recipe_sysroot[vardepsexclude] += "MACHINE_ARCH PACKAGE_EXTRA_ARCHS SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA" |
| 642 | |
| 643 | do_prepare_recipe_sysroot[deptask] = "do_populate_sysroot" |
| 644 | python do_prepare_recipe_sysroot () { |
| 645 | bb.build.exec_func("extend_recipe_sysroot", d) |
| 646 | } |
| 647 | addtask do_prepare_recipe_sysroot before do_configure after do_fetch |
| 648 | |
| 649 | python staging_taskhandler() { |
| 650 | bbtasks = e.tasklist |
| 651 | for task in bbtasks: |
| 652 | deps = d.getVarFlag(task, "depends") |
| 653 | if task == "do_configure" or (deps and "populate_sysroot" in deps): |
| 654 | d.prependVarFlag(task, "prefuncs", "extend_recipe_sysroot ") |
| 655 | } |
| 656 | staging_taskhandler[eventmask] = "bb.event.RecipeTaskPreProcess" |
| 657 | addhandler staging_taskhandler |
| 658 | |
| 659 | |
| 660 | # |
| 661 | # Target build output, stored in do_populate_sysroot or do_package can depend |
| 662 | # not only upon direct dependencies but also indirect ones. A good example is |
| 663 | # linux-libc-headers. The toolchain depends on this but most target recipes do |
| 664 | # not. There are some headers which are not used by the toolchain build and do |
| 665 | # not change the toolchain task output, hence the task hashes can change without |
| 666 | # changing the sysroot output of that recipe yet they can influence others. |
| 667 | # |
| 668 | # A specific example is rtc.h which can change rtcwake.c in util-linux but is not |
| 669 | # used in the glibc or gcc build. To account for this, we need to account for the |
| 670 | # populate_sysroot hashes in the task output hashes. |
| 671 | # |
| 672 | python target_add_sysroot_deps () { |
| 673 | current_task = "do_" + d.getVar("BB_CURRENTTASK") |
| 674 | if current_task not in ["do_populate_sysroot", "do_package"]: |
| 675 | return |
| 676 | |
| 677 | pn = d.getVar("PN") |
| 678 | if pn.endswith("-native"): |
| 679 | return |
| 680 | |
| 681 | taskdepdata = d.getVar("BB_TASKDEPDATA", False) |
| 682 | deps = {} |
| 683 | for dep in taskdepdata.values(): |
| 684 | if dep[1] == "do_populate_sysroot" and not dep[0].endswith(("-native", "-initial")) and "-cross-" not in dep[0] and dep[0] != pn: |
| 685 | deps[dep[0]] = dep[6] |
| 686 | |
| 687 | d.setVar("HASHEQUIV_EXTRA_SIGDATA", "\n".join("%s: %s" % (k, deps[k]) for k in sorted(deps.keys()))) |
| 688 | } |
| 689 | SSTATECREATEFUNCS += "target_add_sysroot_deps" |
| 690 | |