blob: 2c8e7b8cc2321494f1435b82473c686eeff87d05 [file] [log] [blame]
Patrick Williams92b42cb2022-09-03 06:53:57 -05001#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7SSTATE_VERSION = "10"
8
9SSTATE_ZSTD_CLEVEL ??= "8"
10
11SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
12SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
13
14def generate_sstatefn(spec, hash, taskname, siginfo, d):
15 if taskname is None:
16 return ""
17 extension = ".tar.zst"
18 # 8 chars reserved for siginfo
19 limit = 254 - 8
20 if siginfo:
21 limit = 254
22 extension = ".tar.zst.siginfo"
23 if not hash:
24 hash = "INVALID"
25 fn = spec + hash + "_" + taskname + extension
26 # If the filename is too long, attempt to reduce it
27 if len(fn) > limit:
28 components = spec.split(":")
29 # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
30 # 7 is for the separators
31 avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
32 components[2] = components[2][:avail]
33 components[3] = components[3][:avail]
34 components[4] = components[4][:avail]
35 spec = ":".join(components)
36 fn = spec + hash + "_" + taskname + extension
37 if len(fn) > limit:
38 bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
39 return hash[:2] + "/" + hash[2:4] + "/" + fn
40
41SSTATE_PKGARCH = "${PACKAGE_ARCH}"
42SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
43SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
44SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
45SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
46SSTATE_EXTRAPATH = ""
47SSTATE_EXTRAPATHWILDCARD = ""
48SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tar.zst*"
49
50# explicitly make PV to depend on evaluated value of PV variable
51PV[vardepvalue] = "${PV}"
52
53# We don't want the sstate to depend on things like the distro string
54# of the system, we let the sstate paths take care of this.
55SSTATE_EXTRAPATH[vardepvalue] = ""
56SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
57
58# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
59SSTATE_ALLOW_OVERLAP_FILES = "${DEPLOY_DIR}/licenses/"
60# Avoid docbook/sgml catalog warnings for now
61SSTATE_ALLOW_OVERLAP_FILES += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
62# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
63SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
64SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
65# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
66SSTATE_ALLOW_OVERLAP_FILES += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
67# Archive the sources for many architectures in one deploy folder
68SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}"
69# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
70SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/ovmf"
71SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/grub-efi"
72SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/systemd-boot"
73SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/microcode"
74
75SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
76SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
77SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
78SSTATE_HASHEQUIV_FILEMAP ?= " \
79 populate_sysroot:*/postinst-useradd-*:${TMPDIR} \
80 populate_sysroot:*/postinst-useradd-*:${COREBASE} \
81 populate_sysroot:*/postinst-useradd-*:regex-\s(PATH|PSEUDO_IGNORE_PATHS|HOME|LOGNAME|OMP_NUM_THREADS|USER)=.*\s \
82 populate_sysroot:*/crossscripts/*:${TMPDIR} \
83 populate_sysroot:*/crossscripts/*:${COREBASE} \
84 "
85
86BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
87
88SSTATE_ARCHS = " \
89 ${BUILD_ARCH} \
90 ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
91 ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
92 ${SDK_ARCH}_${SDK_OS} \
93 ${SDK_ARCH}_${PACKAGE_ARCH} \
94 allarch \
95 ${PACKAGE_ARCH} \
96 ${PACKAGE_EXTRA_ARCHS} \
97 ${MACHINE_ARCH}"
98SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
99
100SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
101
102SSTATECREATEFUNCS += "sstate_hardcode_path"
103SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
104SSTATEPOSTCREATEFUNCS = ""
105SSTATEPREINSTFUNCS = ""
106SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
107SSTATEPOSTINSTFUNCS = ""
108EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
109
110# Check whether sstate exists for tasks that support sstate and are in the
111# locked signatures file.
112SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error'
113
114# Check whether the task's computed hash matches the task's hash in the
115# locked signatures file.
116SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error"
117
118# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
119# not sign)
120SSTATE_SIG_KEY ?= ""
121SSTATE_SIG_PASSPHRASE ?= ""
122# Whether to verify the GnUPG signatures when extracting sstate archives
123SSTATE_VERIFY_SIG ?= "0"
124# List of signatures to consider valid.
125SSTATE_VALID_SIGS ??= ""
126SSTATE_VALID_SIGS[vardepvalue] = ""
127
128SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
129SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
130 the output hash for a task, which in turn is used to determine equivalency. \
131 "
132
133SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
134SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
135 hash equivalency server, such as PN, PV, taskname, etc. This information \
136 is very useful for developers looking at task data, but may leak sensitive \
137 data if the equivalence server is public. \
138 "
139
140python () {
141 if bb.data.inherits_class('native', d):
142 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
143 elif bb.data.inherits_class('crosssdk', d):
144 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
145 elif bb.data.inherits_class('cross', d):
146 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}"))
147 elif bb.data.inherits_class('nativesdk', d):
148 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
149 elif bb.data.inherits_class('cross-canadian', d):
150 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
151 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all":
152 d.setVar('SSTATE_PKGARCH', "allarch")
153 else:
154 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
155
156 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
157 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
158 d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
159 d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
160
161 unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
162 d.setVar('SSTATETASKS', " ".join(unique_tasks))
163 for task in unique_tasks:
164 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
165 d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
166 d.setVarFlag(task, 'network', '1')
167 d.setVarFlag(task + "_setscene", 'network', '1')
168}
169
170def sstate_init(task, d):
171 ss = {}
172 ss['task'] = task
173 ss['dirs'] = []
174 ss['plaindirs'] = []
175 ss['lockfiles'] = []
176 ss['lockfiles-shared'] = []
177 return ss
178
179def sstate_state_fromvars(d, task = None):
180 if task is None:
181 task = d.getVar('BB_CURRENTTASK')
182 if not task:
183 bb.fatal("sstate code running without task context?!")
184 task = task.replace("_setscene", "")
185
186 if task.startswith("do_"):
187 task = task[3:]
188 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split()
189 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split()
190 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split()
191 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split()
192 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split()
193 interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs') or "").split()
194 fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or ""
195 if not task or len(inputs) != len(outputs):
196 bb.fatal("sstate variables not setup correctly?!")
197
198 if task == "populate_lic":
199 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
200 d.setVar("SSTATE_EXTRAPATH", "")
201 d.setVar('SSTATE_EXTRAPATHWILDCARD', "")
202
203 ss = sstate_init(task, d)
204 for i in range(len(inputs)):
205 sstate_add(ss, inputs[i], outputs[i], d)
206 ss['lockfiles'] = lockfiles
207 ss['lockfiles-shared'] = lockfilesshared
208 ss['plaindirs'] = plaindirs
209 ss['interceptfuncs'] = interceptfuncs
210 ss['fixmedir'] = fixmedir
211 return ss
212
213def sstate_add(ss, source, dest, d):
214 if not source.endswith("/"):
215 source = source + "/"
216 if not dest.endswith("/"):
217 dest = dest + "/"
218 source = os.path.normpath(source)
219 dest = os.path.normpath(dest)
220 srcbase = os.path.basename(source)
221 ss['dirs'].append([srcbase, source, dest])
222 return ss
223
224def sstate_install(ss, d):
225 import oe.path
226 import oe.sstatesig
227 import subprocess
228
229 sharedfiles = []
230 shareddirs = []
231 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
232
233 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
234
235 manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
236
237 if os.access(manifest, os.R_OK):
238 bb.fatal("Package already staged (%s)?!" % manifest)
239
240 d.setVar("SSTATE_INST_POSTRM", manifest + ".postrm")
241
242 locks = []
243 for lock in ss['lockfiles-shared']:
244 locks.append(bb.utils.lockfile(lock, True))
245 for lock in ss['lockfiles']:
246 locks.append(bb.utils.lockfile(lock))
247
248 for state in ss['dirs']:
249 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
250 for walkroot, dirs, files in os.walk(state[1]):
251 for file in files:
252 srcpath = os.path.join(walkroot, file)
253 dstpath = srcpath.replace(state[1], state[2])
254 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
255 sharedfiles.append(dstpath)
256 for dir in dirs:
257 srcdir = os.path.join(walkroot, dir)
258 dstdir = srcdir.replace(state[1], state[2])
259 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
260 if os.path.islink(srcdir):
261 sharedfiles.append(dstdir)
262 continue
263 if not dstdir.endswith("/"):
264 dstdir = dstdir + "/"
265 shareddirs.append(dstdir)
266
267 # Check the file list for conflicts against files which already exist
268 overlap_allowed = (d.getVar("SSTATE_ALLOW_OVERLAP_FILES") or "").split()
269 match = []
270 for f in sharedfiles:
271 if os.path.exists(f) and not os.path.islink(f):
272 f = os.path.normpath(f)
273 realmatch = True
274 for w in overlap_allowed:
275 w = os.path.normpath(w)
276 if f.startswith(w):
277 realmatch = False
278 break
279 if realmatch:
280 match.append(f)
281 sstate_search_cmd = "grep -rlF '%s' %s --exclude=master.list | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
282 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
283 if search_output:
284 match.append(" (matched in %s)" % search_output.decode('utf-8').rstrip())
285 else:
286 match.append(" (not matched to any task)")
287 if match:
288 bb.error("The recipe %s is trying to install files into a shared " \
289 "area when those files already exist. Those files and their manifest " \
290 "location are:\n %s\nPlease verify which recipe should provide the " \
291 "above files.\n\nThe build has stopped, as continuing in this scenario WILL " \
292 "break things - if not now, possibly in the future (we've seen builds fail " \
293 "several months later). If the system knew how to recover from this " \
294 "automatically it would, however there are several different scenarios " \
295 "which can result in this and we don't know which one this is. It may be " \
296 "you have switched providers of something like virtual/kernel (e.g. from " \
297 "linux-yocto to linux-yocto-dev), in that case you need to execute the " \
298 "clean task for both recipes and it will resolve this error. It may be " \
299 "you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \
300 "those recipes should again resolve this error, however switching " \
301 "DISTRO_FEATURES on an existing build directory is not supported - you " \
302 "should really clean out tmp and rebuild (reusing sstate should be safe). " \
303 "It could be the overlapping files detected are harmless in which case " \
304 "adding them to SSTATE_ALLOW_OVERLAP_FILES may be the correct solution. It could " \
305 "also be your build is including two different conflicting versions of " \
306 "things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
307 "be to resolve the conflict. If in doubt, please ask on the mailing list, " \
308 "sharing the error and filelist above." % \
309 (d.getVar('PN'), "\n ".join(match)))
310 bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.")
311
312 if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"):
313 sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd")
314 sharedfiles.append(ss['fixmedir'] + "/fixmepath")
315
316 # Write out the manifest
317 f = open(manifest, "w")
318 for file in sharedfiles:
319 f.write(file + "\n")
320
321 # We want to ensure that directories appear at the end of the manifest
322 # so that when we test to see if they should be deleted any contents
323 # added by the task will have been removed first.
324 dirs = sorted(shareddirs, key=len)
325 # Must remove children first, which will have a longer path than the parent
326 for di in reversed(dirs):
327 f.write(di + "\n")
328 f.close()
329
330 # Append to the list of manifests for this PACKAGE_ARCH
331
332 i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
333 l = bb.utils.lockfile(i + ".lock")
334 filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n"
335 manifests = []
336 if os.path.exists(i):
337 with open(i, "r") as f:
338 manifests = f.readlines()
339 # We append new entries, we don't remove older entries which may have the same
340 # manifest name but different versions from stamp/workdir. See below.
341 if filedata not in manifests:
342 with open(i, "a+") as f:
343 f.write(filedata)
344 bb.utils.unlockfile(l)
345
346 # Run the actual file install
347 for state in ss['dirs']:
348 if os.path.exists(state[1]):
349 oe.path.copyhardlinktree(state[1], state[2])
350
351 for postinst in (d.getVar('SSTATEPOSTINSTFUNCS') or '').split():
352 # All hooks should run in the SSTATE_INSTDIR
353 bb.build.exec_func(postinst, d, (sstateinst,))
354
355 for lock in locks:
356 bb.utils.unlockfile(lock)
357
358sstate_install[vardepsexclude] += "SSTATE_ALLOW_OVERLAP_FILES STATE_MANMACH SSTATE_MANFILEPREFIX"
359sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
360
361def sstate_installpkg(ss, d):
362 from oe.gpg_sign import get_signer
363
364 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
365 d.setVar("SSTATE_CURRTASK", ss['task'])
366 sstatefetch = d.getVar('SSTATE_PKGNAME')
367 sstatepkg = d.getVar('SSTATE_PKG')
368
369 if not os.path.exists(sstatepkg):
370 pstaging_fetch(sstatefetch, d)
371
372 if not os.path.isfile(sstatepkg):
373 bb.note("Sstate package %s does not exist" % sstatepkg)
374 return False
375
376 sstate_clean(ss, d)
377
378 d.setVar('SSTATE_INSTDIR', sstateinst)
379
380 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
381 if not os.path.isfile(sstatepkg + '.sig'):
382 bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
383 return False
384 signer = get_signer(d, 'local')
385 if not signer.verify(sstatepkg + '.sig', d.getVar("SSTATE_VALID_SIGS")):
386 bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
387 return False
388
389 # Empty sstateinst directory, ensure its clean
390 if os.path.exists(sstateinst):
391 oe.path.remove(sstateinst)
392 bb.utils.mkdirhier(sstateinst)
393
394 sstateinst = d.getVar("SSTATE_INSTDIR")
395 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
396
397 for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']:
398 # All hooks should run in the SSTATE_INSTDIR
399 bb.build.exec_func(f, d, (sstateinst,))
400
401 return sstate_installpkgdir(ss, d)
402
403def sstate_installpkgdir(ss, d):
404 import oe.path
405 import subprocess
406
407 sstateinst = d.getVar("SSTATE_INSTDIR")
408 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
409
410 for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split():
411 # All hooks should run in the SSTATE_INSTDIR
412 bb.build.exec_func(f, d, (sstateinst,))
413
414 def prepdir(dir):
415 # remove dir if it exists, ensure any parent directories do exist
416 if os.path.exists(dir):
417 oe.path.remove(dir)
418 bb.utils.mkdirhier(dir)
419 oe.path.remove(dir)
420
421 for state in ss['dirs']:
422 prepdir(state[1])
423 bb.utils.rename(sstateinst + state[0], state[1])
424 sstate_install(ss, d)
425
426 for plain in ss['plaindirs']:
427 workdir = d.getVar('WORKDIR')
428 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
429 src = sstateinst + "/" + plain.replace(workdir, '')
430 if sharedworkdir in plain:
431 src = sstateinst + "/" + plain.replace(sharedworkdir, '')
432 dest = plain
433 bb.utils.mkdirhier(src)
434 prepdir(dest)
435 bb.utils.rename(src, dest)
436
437 return True
438
439python sstate_hardcode_path_unpack () {
440 # Fixup hardcoded paths
441 #
442 # Note: The logic below must match the reverse logic in
443 # sstate_hardcode_path(d)
444 import subprocess
445
446 sstateinst = d.getVar('SSTATE_INSTDIR')
447 sstatefixmedir = d.getVar('SSTATE_FIXMEDIR')
448 fixmefn = sstateinst + "fixmepath"
449 if os.path.isfile(fixmefn):
450 staging_target = d.getVar('RECIPE_SYSROOT')
451 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
452
453 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
454 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
455 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
456 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host)
457 else:
458 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target)
459
460 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
461 for fixmevar in extra_staging_fixmes.split():
462 fixme_path = d.getVar(fixmevar)
463 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
464
465 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
466 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
467
468 # Defer do_populate_sysroot relocation command
469 if sstatefixmedir:
470 bb.utils.mkdirhier(sstatefixmedir)
471 with open(sstatefixmedir + "/fixmepath.cmd", "w") as f:
472 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath")
473 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST")
474 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST")
475 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET")
476 f.write(sstate_hardcode_cmd)
477 bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath")
478 return
479
480 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
481 subprocess.check_call(sstate_hardcode_cmd, shell=True)
482
483 # Need to remove this or we'd copy it into the target directory and may
484 # conflict with another writer
485 os.remove(fixmefn)
486}
487
488def sstate_clean_cachefile(ss, d):
489 import oe.path
490
491 if d.getVarFlag('do_%s' % ss['task'], 'task'):
492 d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
493 sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
494 bb.note("Removing %s" % sstatepkgfile)
495 oe.path.remove(sstatepkgfile)
496
497def sstate_clean_cachefiles(d):
498 for task in (d.getVar('SSTATETASKS') or "").split():
499 ld = d.createCopy()
500 ss = sstate_state_fromvars(ld, task)
501 sstate_clean_cachefile(ss, ld)
502
503def sstate_clean_manifest(manifest, d, canrace=False, prefix=None):
504 import oe.path
505
506 mfile = open(manifest)
507 entries = mfile.readlines()
508 mfile.close()
509
510 for entry in entries:
511 entry = entry.strip()
512 if prefix and not entry.startswith("/"):
513 entry = prefix + "/" + entry
514 bb.debug(2, "Removing manifest: %s" % entry)
515 # We can race against another package populating directories as we're removing them
516 # so we ignore errors here.
517 try:
518 if entry.endswith("/"):
519 if os.path.islink(entry[:-1]):
520 os.remove(entry[:-1])
521 elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace:
522 # Removing directories whilst builds are in progress exposes a race. Only
523 # do it in contexts where it is safe to do so.
524 os.rmdir(entry[:-1])
525 else:
526 os.remove(entry)
527 except OSError:
528 pass
529
530 postrm = manifest + ".postrm"
531 if os.path.exists(manifest + ".postrm"):
532 import subprocess
533 os.chmod(postrm, 0o755)
534 subprocess.check_call(postrm, shell=True)
535 oe.path.remove(postrm)
536
537 oe.path.remove(manifest)
538
539def sstate_clean(ss, d):
540 import oe.path
541 import glob
542
543 d2 = d.createCopy()
544 stamp_clean = d.getVar("STAMPCLEAN")
545 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info')
546 if extrainf:
547 d2.setVar("SSTATE_MANMACH", extrainf)
548 wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
549 else:
550 wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
551
552 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
553
554 if os.path.exists(manifest):
555 locks = []
556 for lock in ss['lockfiles-shared']:
557 locks.append(bb.utils.lockfile(lock))
558 for lock in ss['lockfiles']:
559 locks.append(bb.utils.lockfile(lock))
560
561 sstate_clean_manifest(manifest, d, canrace=True)
562
563 for lock in locks:
564 bb.utils.unlockfile(lock)
565
566 # Remove the current and previous stamps, but keep the sigdata.
567 #
568 # The glob() matches do_task* which may match multiple tasks, for
569 # example: do_package and do_package_write_ipk, so we need to
570 # exactly match *.do_task.* and *.do_task_setscene.*
571 rm_stamp = '.do_%s.' % ss['task']
572 rm_setscene = '.do_%s_setscene.' % ss['task']
573 # For BB_SIGNATURE_HANDLER = "noop"
574 rm_nohash = ".do_%s" % ss['task']
575 for stfile in glob.glob(wildcard_stfile):
576 # Keep the sigdata
577 if ".sigdata." in stfile or ".sigbasedata." in stfile:
578 continue
579 # Preserve taint files in the stamps directory
580 if stfile.endswith('.taint'):
581 continue
582 if rm_stamp in stfile or rm_setscene in stfile or \
583 stfile.endswith(rm_nohash):
584 oe.path.remove(stfile)
585
586sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
587
588CLEANFUNCS += "sstate_cleanall"
589
590python sstate_cleanall() {
591 bb.note("Removing shared state for package %s" % d.getVar('PN'))
592
593 manifest_dir = d.getVar('SSTATE_MANIFESTS')
594 if not os.path.exists(manifest_dir):
595 return
596
597 tasks = d.getVar('SSTATETASKS').split()
598 for name in tasks:
599 ld = d.createCopy()
600 shared_state = sstate_state_fromvars(ld, name)
601 sstate_clean(shared_state, ld)
602}
603
604python sstate_hardcode_path () {
605 import subprocess, platform
606
607 # Need to remove hardcoded paths and fix these when we install the
608 # staging packages.
609 #
610 # Note: the logic in this function needs to match the reverse logic
611 # in sstate_installpkg(ss, d)
612
613 staging_target = d.getVar('RECIPE_SYSROOT')
614 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
615 sstate_builddir = d.getVar('SSTATE_BUILDDIR')
616
617 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host
618 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
619 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
620 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
621 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
622 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
623 else:
624 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
625 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
626
627 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
628 for fixmevar in extra_staging_fixmes.split():
629 fixme_path = d.getVar(fixmevar)
630 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
631 sstate_grep_cmd += " -e '%s'" % (fixme_path)
632
633 fixmefn = sstate_builddir + "fixmepath"
634
635 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD')
636 sstate_filelist_cmd = "tee %s" % (fixmefn)
637
638 # fixmepath file needs relative paths, drop sstate_builddir prefix
639 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
640
641 xargs_no_empty_run_cmd = '--no-run-if-empty'
642 if platform.system() == 'Darwin':
643 xargs_no_empty_run_cmd = ''
644
645 # Limit the fixpaths and sed operations based on the initial grep search
646 # This has the side effect of making sure the vfs cache is hot
647 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
648
649 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
650 subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir)
651
652 # If the fixmefn is empty, remove it..
653 if os.stat(fixmefn).st_size == 0:
654 os.remove(fixmefn)
655 else:
656 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
657 subprocess.check_output(sstate_filelist_relative_cmd, shell=True)
658}
659
660def sstate_package(ss, d):
661 import oe.path
662 import time
663
664 tmpdir = d.getVar('TMPDIR')
665
666 fixtime = False
667 if ss['task'] == "package":
668 fixtime = True
669
670 def fixtimestamp(root, path):
671 f = os.path.join(root, path)
672 if os.lstat(f).st_mtime > sde:
673 os.utime(f, (sde, sde), follow_symlinks=False)
674
675 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
676 sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
677 d.setVar("SSTATE_CURRTASK", ss['task'])
678 bb.utils.remove(sstatebuild, recurse=True)
679 bb.utils.mkdirhier(sstatebuild)
680 for state in ss['dirs']:
681 if not os.path.exists(state[1]):
682 continue
683 srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
684 # Find and error for absolute symlinks. We could attempt to relocate but its not
685 # clear where the symlink is relative to in this context. We could add that markup
686 # to sstate tasks but there aren't many of these so better just avoid them entirely.
687 for walkroot, dirs, files in os.walk(state[1]):
688 for file in files + dirs:
689 if fixtime:
690 fixtimestamp(walkroot, file)
691 srcpath = os.path.join(walkroot, file)
692 if not os.path.islink(srcpath):
693 continue
694 link = os.readlink(srcpath)
695 if not os.path.isabs(link):
696 continue
697 if not link.startswith(tmpdir):
698 continue
699 bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
700 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
701 bb.utils.rename(state[1], sstatebuild + state[0])
702
703 workdir = d.getVar('WORKDIR')
704 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
705 for plain in ss['plaindirs']:
706 pdir = plain.replace(workdir, sstatebuild)
707 if sharedworkdir in plain:
708 pdir = plain.replace(sharedworkdir, sstatebuild)
709 bb.utils.mkdirhier(plain)
710 bb.utils.mkdirhier(pdir)
711 bb.utils.rename(plain, pdir)
712 if fixtime:
713 fixtimestamp(pdir, "")
714 for walkroot, dirs, files in os.walk(pdir):
715 for file in files + dirs:
716 fixtimestamp(walkroot, file)
717
718 d.setVar('SSTATE_BUILDDIR', sstatebuild)
719 d.setVar('SSTATE_INSTDIR', sstatebuild)
720
721 if d.getVar('SSTATE_SKIP_CREATION') == '1':
722 return
723
724 sstate_create_package = ['sstate_report_unihash', 'sstate_create_package']
725 if d.getVar('SSTATE_SIG_KEY'):
726 sstate_create_package.append('sstate_sign_package')
727
728 for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
729 sstate_create_package + \
730 (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
731 # All hooks should run in SSTATE_BUILDDIR.
732 bb.build.exec_func(f, d, (sstatebuild,))
733
734 # SSTATE_PKG may have been changed by sstate_report_unihash
735 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
736 if not os.path.exists(siginfo):
737 bb.siggen.dump_this_task(siginfo, d)
738 else:
739 try:
740 os.utime(siginfo, None)
741 except PermissionError:
742 pass
743 except OSError as e:
744 # Handle read-only file systems gracefully
745 import errno
746 if e.errno != errno.EROFS:
747 raise e
748
749 return
750
751sstate_package[vardepsexclude] += "SSTATE_SIG_KEY"
752
753def pstaging_fetch(sstatefetch, d):
754 import bb.fetch2
755
756 # Only try and fetch if the user has configured a mirror
757 mirrors = d.getVar('SSTATE_MIRRORS')
758 if not mirrors:
759 return
760
761 # Copy the data object and override DL_DIR and SRC_URI
762 localdata = bb.data.createCopy(d)
763
764 dldir = localdata.expand("${SSTATE_DIR}")
765 bb.utils.mkdirhier(dldir)
766
767 localdata.delVar('MIRRORS')
768 localdata.setVar('FILESPATH', dldir)
769 localdata.setVar('DL_DIR', dldir)
770 localdata.setVar('PREMIRRORS', mirrors)
771 localdata.setVar('SRCPV', d.getVar('SRCPV'))
772
773 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
774 # we'll want to allow network access for the current set of fetches.
775 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
776 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
777 localdata.delVar('BB_NO_NETWORK')
778
779 # Try a fetch from the sstate mirror, if it fails just return and
780 # we will build the package
781 uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
782 'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
783 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
784 uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
785
786 for srcuri in uris:
Andrew Geissler87f5cff2022-09-30 13:13:31 -0500787 localdata.delVar('SRC_URI')
Patrick Williams92b42cb2022-09-03 06:53:57 -0500788 localdata.setVar('SRC_URI', srcuri)
789 try:
790 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
791 fetcher.checkstatus()
792 fetcher.download()
793
794 except bb.fetch2.BBFetchException:
795 pass
796
797pstaging_fetch[vardepsexclude] += "SRCPV"
798
799
800def sstate_setscene(d):
801 shared_state = sstate_state_fromvars(d)
802 accelerate = sstate_installpkg(shared_state, d)
803 if not accelerate:
804 msg = "No sstate archive obtainable, will run full task instead."
805 bb.warn(msg)
806 raise bb.BBHandledException(msg)
807
808python sstate_task_prefunc () {
809 shared_state = sstate_state_fromvars(d)
810 sstate_clean(shared_state, d)
811}
812sstate_task_prefunc[dirs] = "${WORKDIR}"
813
814python sstate_task_postfunc () {
815 shared_state = sstate_state_fromvars(d)
816
817 for intercept in shared_state['interceptfuncs']:
818 bb.build.exec_func(intercept, d, (d.getVar("WORKDIR"),))
819
820 omask = os.umask(0o002)
821 if omask != 0o002:
822 bb.note("Using umask 0o002 (not %0o) for sstate packaging" % omask)
823 sstate_package(shared_state, d)
824 os.umask(omask)
825
826 sstateinst = d.getVar("SSTATE_INSTDIR")
827 d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir'])
828
829 sstate_installpkgdir(shared_state, d)
830
831 bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True)
832}
833sstate_task_postfunc[dirs] = "${WORKDIR}"
834
835
836#
837# Shell function to generate a sstate package from a directory
838# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
839#
840sstate_create_package () {
841 # Exit early if it already exists
842 if [ -e ${SSTATE_PKG} ]; then
843 touch ${SSTATE_PKG} 2>/dev/null || true
844 return
845 fi
846
847 mkdir --mode=0775 -p `dirname ${SSTATE_PKG}`
848 TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
849
850 OPT="-cS"
851 ZSTD="zstd -${SSTATE_ZSTD_CLEVEL} -T${ZSTD_THREADS}"
852 # Use pzstd if available
853 if [ -x "$(command -v pzstd)" ]; then
854 ZSTD="pzstd -${SSTATE_ZSTD_CLEVEL} -p ${ZSTD_THREADS}"
855 fi
856
857 # Need to handle empty directories
858 if [ "$(ls -A)" ]; then
859 set +e
860 tar -I "$ZSTD" $OPT -f $TFILE *
861 ret=$?
862 if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
863 exit 1
864 fi
865 set -e
866 else
867 tar -I "$ZSTD" $OPT --file=$TFILE --files-from=/dev/null
868 fi
869 chmod 0664 $TFILE
870 # Skip if it was already created by some other process
871 if [ -h ${SSTATE_PKG} ] && [ ! -e ${SSTATE_PKG} ]; then
872 # There is a symbolic link, but it links to nothing.
873 # Forcefully replace it with the new file.
874 ln -f $TFILE ${SSTATE_PKG} || true
875 elif [ ! -e ${SSTATE_PKG} ]; then
876 # Move into place using ln to attempt an atomic op.
877 # Abort if it already exists
878 ln $TFILE ${SSTATE_PKG} || true
879 else
880 touch ${SSTATE_PKG} 2>/dev/null || true
881 fi
882 rm $TFILE
883}
884
885python sstate_sign_package () {
886 from oe.gpg_sign import get_signer
887
888
889 signer = get_signer(d, 'local')
890 sstate_pkg = d.getVar('SSTATE_PKG')
891 if os.path.exists(sstate_pkg + '.sig'):
892 os.unlink(sstate_pkg + '.sig')
893 signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
894 d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
895}
896
897python sstate_report_unihash() {
898 report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
899
900 if report_unihash:
901 ss = sstate_state_fromvars(d)
902 report_unihash(os.getcwd(), ss['task'], d)
903}
904
905#
906# Shell function to decompress and prepare a package for installation
907# Will be run from within SSTATE_INSTDIR.
908#
909sstate_unpack_package () {
910 ZSTD="zstd -T${ZSTD_THREADS}"
911 # Use pzstd if available
912 if [ -x "$(command -v pzstd)" ]; then
913 ZSTD="pzstd -p ${ZSTD_THREADS}"
914 fi
915
916 tar -I "$ZSTD" -xvpf ${SSTATE_PKG}
917 # update .siginfo atime on local/NFS mirror if it is a symbolic link
918 [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
919 # update each symbolic link instead of any referenced file
920 touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
921 [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
922 [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
923}
924
925BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
926
927def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
928 found = set()
929 missed = set()
930
931 def gethash(task):
932 return sq_data['unihash'][task]
933
934 def getpathcomponents(task, d):
935 # Magic data from BB_HASHFILENAME
936 splithashfn = sq_data['hashfn'][task].split(" ")
937 spec = splithashfn[1]
938 if splithashfn[0] == "True":
939 extrapath = d.getVar("NATIVELSBSTRING") + "/"
940 else:
941 extrapath = ""
942
943 tname = bb.runqueue.taskname_from_tid(task)[3:]
944
945 if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
946 spec = splithashfn[2]
947 extrapath = ""
948
949 return spec, extrapath, tname
950
951 def getsstatefile(tid, siginfo, d):
952 spec, extrapath, tname = getpathcomponents(tid, d)
953 return extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d)
954
955 for tid in sq_data['hash']:
956
957 sstatefile = d.expand("${SSTATE_DIR}/" + getsstatefile(tid, siginfo, d))
958
959 if os.path.exists(sstatefile):
960 found.add(tid)
961 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
962 else:
963 missed.add(tid)
964 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
965
966 foundLocal = len(found)
967 mirrors = d.getVar("SSTATE_MIRRORS")
968 if mirrors:
969 # Copy the data object and override DL_DIR and SRC_URI
970 localdata = bb.data.createCopy(d)
971
972 dldir = localdata.expand("${SSTATE_DIR}")
973 localdata.delVar('MIRRORS')
974 localdata.setVar('FILESPATH', dldir)
975 localdata.setVar('DL_DIR', dldir)
976 localdata.setVar('PREMIRRORS', mirrors)
977
978 bb.debug(2, "SState using premirror of: %s" % mirrors)
979
980 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
981 # we'll want to allow network access for the current set of fetches.
982 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
983 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
984 localdata.delVar('BB_NO_NETWORK')
985
986 from bb.fetch2 import FetchConnectionCache
987 def checkstatus_init():
988 while not connection_cache_pool.full():
989 connection_cache_pool.put(FetchConnectionCache())
990
991 def checkstatus_end():
992 while not connection_cache_pool.empty():
993 connection_cache = connection_cache_pool.get()
994 connection_cache.close_connections()
995
996 def checkstatus(arg):
997 (tid, sstatefile) = arg
998
999 connection_cache = connection_cache_pool.get()
1000 localdata2 = bb.data.createCopy(localdata)
1001 srcuri = "file://" + sstatefile
1002 localdata2.setVar('SRC_URI', srcuri)
1003 bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
1004
1005 import traceback
1006
1007 try:
1008 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
1009 connection_cache=connection_cache)
1010 fetcher.checkstatus()
1011 bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
1012 found.add(tid)
1013 missed.remove(tid)
1014 except bb.fetch2.FetchError as e:
1015 bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)\n%s" % (srcuri, repr(e), traceback.format_exc()))
1016 except Exception as e:
1017 bb.error("SState: cannot test %s: %s\n%s" % (srcuri, repr(e), traceback.format_exc()))
1018
1019 connection_cache_pool.put(connection_cache)
1020
1021 if progress:
1022 bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
1023
1024 tasklist = []
1025 for tid in missed:
1026 sstatefile = d.expand(getsstatefile(tid, siginfo, d))
1027 tasklist.append((tid, sstatefile))
1028
1029 if tasklist:
1030 nproc = min(int(d.getVar("BB_NUMBER_THREADS")), len(tasklist))
1031
1032 progress = len(tasklist) >= 100
1033 if progress:
1034 msg = "Checking sstate mirror object availability"
1035 bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
1036
1037 # Have to setup the fetcher environment here rather than in each thread as it would race
1038 fetcherenv = bb.fetch2.get_fetcher_environment(d)
1039 with bb.utils.environment(**fetcherenv):
1040 bb.event.enable_threadlock()
1041 import concurrent.futures
1042 from queue import Queue
1043 connection_cache_pool = Queue(nproc)
1044 checkstatus_init()
1045 with concurrent.futures.ThreadPoolExecutor(max_workers=nproc) as executor:
1046 executor.map(checkstatus, tasklist.copy())
1047 checkstatus_end()
1048 bb.event.disable_threadlock()
1049
1050 if progress:
1051 bb.event.fire(bb.event.ProcessFinished(msg), d)
1052
1053 inheritlist = d.getVar("INHERIT")
1054 if "toaster" in inheritlist:
1055 evdata = {'missed': [], 'found': []};
1056 for tid in missed:
1057 sstatefile = d.expand(getsstatefile(tid, False, d))
1058 evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1059 for tid in found:
1060 sstatefile = d.expand(getsstatefile(tid, False, d))
1061 evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1062 bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
1063
1064 if summary:
1065 # Print some summary statistics about the current task completion and how much sstate
1066 # reuse there was. Avoid divide by zero errors.
1067 total = len(sq_data['hash'])
1068 complete = 0
1069 if currentcount:
1070 complete = (len(found) + currentcount) / (total + currentcount) * 100
1071 match = 0
1072 if total:
1073 match = len(found) / total * 100
1074 bb.plain("Sstate summary: Wanted %d Local %d Mirrors %d Missed %d Current %d (%d%% match, %d%% complete)" %
1075 (total, foundLocal, len(found)-foundLocal, len(missed), currentcount, match, complete))
1076
1077 if hasattr(bb.parse.siggen, "checkhashes"):
1078 bb.parse.siggen.checkhashes(sq_data, missed, found, d)
1079
1080 return found
1081setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT"
1082
1083BB_SETSCENE_DEPVALID = "setscene_depvalid"
1084
1085def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
1086 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
1087 # task is included in taskdependees too
1088 # Return - False - We need this dependency
1089 # - True - We can skip this dependency
1090 import re
1091
1092 def logit(msg, log):
1093 if log is not None:
1094 log.append(msg)
1095 else:
1096 bb.debug(2, msg)
1097
1098 logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
1099
1100 directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx"]
1101
1102 def isNativeCross(x):
1103 return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
1104
1105 # We only need to trigger deploy_source_date_epoch through direct dependencies
1106 if taskdependees[task][1] in directtasks:
1107 return True
1108
1109 # We only need to trigger packagedata through direct dependencies
1110 # but need to preserve packagedata on packagedata links
1111 if taskdependees[task][1] == "do_packagedata":
1112 for dep in taskdependees:
1113 if taskdependees[dep][1] == "do_packagedata":
1114 return False
1115 return True
1116
1117 for dep in taskdependees:
1118 logit(" considering dependency: %s" % (str(taskdependees[dep])), log)
1119 if task == dep:
1120 continue
1121 if dep in notneeded:
1122 continue
1123 # do_package_write_* and do_package doesn't need do_package
1124 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
1125 continue
1126 # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
1127 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
1128 return False
1129 # do_package/packagedata/package_qa/deploy don't need do_populate_sysroot
1130 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa', 'do_deploy']:
1131 continue
1132 # Native/Cross packages don't exist and are noexec anyway
1133 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
1134 continue
1135
1136 # This is due to the [depends] in useradd.bbclass complicating matters
1137 # The logic *is* reversed here due to the way hard setscene dependencies are injected
1138 if (taskdependees[task][1] == 'do_package' or taskdependees[task][1] == 'do_populate_sysroot') and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
1139 continue
1140
1141 # Consider sysroot depending on sysroot tasks
1142 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
1143 # Allow excluding certain recursive dependencies. If a recipe needs it should add a
1144 # specific dependency itself, rather than relying on one of its dependees to pull
1145 # them in.
1146 # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html
1147 not_needed = False
1148 excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT')
1149 if excludedeps is None:
1150 # Cache the regular expressions for speed
1151 excludedeps = []
1152 for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split():
1153 excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1])))
1154 d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps)
1155 for excl in excludedeps:
1156 if excl[0].match(taskdependees[dep][0]):
1157 if excl[1].match(taskdependees[task][0]):
1158 not_needed = True
1159 break
1160 if not_needed:
1161 continue
1162 # For meta-extsdk-toolchain we want all sysroot dependencies
1163 if taskdependees[dep][0] == 'meta-extsdk-toolchain':
1164 return False
1165 # Native/Cross populate_sysroot need their dependencies
1166 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
1167 return False
1168 # Target populate_sysroot depended on by cross tools need to be installed
1169 if isNativeCross(taskdependees[dep][0]):
1170 return False
1171 # Native/cross tools depended upon by target sysroot are not needed
1172 # Add an exception for shadow-native as required by useradd.bbclass
1173 if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
1174 continue
1175 # Target populate_sysroot need their dependencies
1176 return False
1177
1178 if taskdependees[dep][1] in directtasks:
1179 continue
1180
1181 # Safe fallthrough default
1182 logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
1183 return False
1184 return True
1185
1186addhandler sstate_eventhandler
1187sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
1188python sstate_eventhandler() {
1189 d = e.data
1190 writtensstate = d.getVar('SSTATE_CURRTASK')
1191 if not writtensstate:
1192 taskname = d.getVar("BB_RUNTASK")[3:]
1193 spec = d.getVar('SSTATE_PKGSPEC')
1194 swspec = d.getVar('SSTATE_SWSPEC')
1195 if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
1196 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
1197 d.setVar("SSTATE_EXTRAPATH", "")
1198 d.setVar("SSTATE_CURRTASK", taskname)
1199 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
1200 if not os.path.exists(siginfo):
1201 bb.siggen.dump_this_task(siginfo, d)
1202 else:
1203 try:
1204 os.utime(siginfo, None)
1205 except PermissionError:
1206 pass
1207 except OSError as e:
1208 # Handle read-only file systems gracefully
1209 import errno
1210 if e.errno != errno.EROFS:
1211 raise e
1212
1213}
1214
1215SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
1216
1217#
1218# Event handler which removes manifests and stamps file for recipes which are no
1219# longer 'reachable' in a build where they once were. 'Reachable' refers to
1220# whether a recipe is parsed so recipes in a layer which was removed would no
1221# longer be reachable. Switching between systemd and sysvinit where recipes
1222# became skipped would be another example.
1223#
1224# Also optionally removes the workdir of those tasks/recipes
1225#
1226addhandler sstate_eventhandler_reachablestamps
1227sstate_eventhandler_reachablestamps[eventmask] = "bb.event.ReachableStamps"
1228python sstate_eventhandler_reachablestamps() {
1229 import glob
1230 d = e.data
1231 stamps = e.stamps.values()
1232 removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
1233 preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps')
1234 preservestamps = []
1235 if os.path.exists(preservestampfile):
1236 with open(preservestampfile, 'r') as f:
1237 preservestamps = f.readlines()
1238 seen = []
1239
1240 # The machine index contains all the stamps this machine has ever seen in this build directory.
1241 # We should only remove things which this machine once accessed but no longer does.
1242 machineindex = set()
1243 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1244 mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}")
1245 if os.path.exists(mi):
1246 with open(mi, "r") as f:
1247 machineindex = set(line.strip() for line in f.readlines())
1248
1249 for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
1250 toremove = []
1251 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1252 if not os.path.exists(i):
1253 continue
1254 manseen = set()
1255 ignore = []
1256 with open(i, "r") as f:
1257 lines = f.readlines()
1258 for l in reversed(lines):
1259 try:
1260 (stamp, manifest, workdir) = l.split()
1261 # The index may have multiple entries for the same manifest as the code above only appends
1262 # new entries and there may be an entry with matching manifest but differing version in stamp/workdir.
1263 # The last entry in the list is the valid one, any earlier entries with matching manifests
1264 # should be ignored.
1265 if manifest in manseen:
1266 ignore.append(l)
1267 continue
1268 manseen.add(manifest)
1269 if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
1270 toremove.append(l)
1271 if stamp not in seen:
1272 bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
1273 seen.append(stamp)
1274 except ValueError:
1275 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1276
1277 if toremove:
1278 msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
1279 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1280
1281 removed = 0
1282 for r in toremove:
1283 (stamp, manifest, workdir) = r.split()
1284 for m in glob.glob(manifest + ".*"):
1285 if m.endswith(".postrm"):
1286 continue
1287 sstate_clean_manifest(m, d)
1288 bb.utils.remove(stamp + "*")
1289 if removeworkdir:
1290 bb.utils.remove(workdir, recurse = True)
1291 lines.remove(r)
1292 removed = removed + 1
1293 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1294
1295 bb.event.fire(bb.event.ProcessFinished(msg), d)
1296
1297 with open(i, "w") as f:
1298 for l in lines:
1299 if l in ignore:
1300 continue
1301 f.write(l)
1302 machineindex |= set(stamps)
1303 with open(mi, "w") as f:
1304 for l in machineindex:
1305 f.write(l + "\n")
1306
1307 if preservestamps:
1308 os.remove(preservestampfile)
1309}
1310
1311
1312#
1313# Bitbake can generate an event showing which setscene tasks are 'stale',
1314# i.e. which ones will be rerun. These are ones where a stamp file is present but
1315# it is stable (e.g. taskhash doesn't match). With that list we can go through
1316# the manifests for matching tasks and "uninstall" those manifests now. We do
1317# this now rather than mid build since the distribution of files between sstate
1318# objects may have changed, new tasks may run first and if those new tasks overlap
1319# with the stale tasks, we'd see overlapping files messages and failures. Thankfully
1320# removing these files is fast.
1321#
1322addhandler sstate_eventhandler_stalesstate
1323sstate_eventhandler_stalesstate[eventmask] = "bb.event.StaleSetSceneTasks"
1324python sstate_eventhandler_stalesstate() {
1325 d = e.data
1326 tasks = e.tasks
1327
1328 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1329
1330 for a in list(set(d.getVar("SSTATE_ARCHS").split())):
1331 toremove = []
1332 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1333 if not os.path.exists(i):
1334 continue
1335 with open(i, "r") as f:
1336 lines = f.readlines()
1337 for l in lines:
1338 try:
1339 (stamp, manifest, workdir) = l.split()
1340 for tid in tasks:
1341 for s in tasks[tid]:
1342 if s.startswith(stamp):
1343 taskname = bb.runqueue.taskname_from_tid(tid)[3:]
1344 manname = manifest + "." + taskname
1345 if os.path.exists(manname):
1346 bb.debug(2, "Sstate for %s is stale, removing related manifest %s" % (tid, manname))
1347 toremove.append((manname, tid, tasks[tid]))
1348 break
1349 except ValueError:
1350 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1351
1352 if toremove:
1353 msg = "Removing %d stale sstate objects for arch %s" % (len(toremove), a)
1354 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1355
1356 removed = 0
1357 for (manname, tid, stamps) in toremove:
1358 sstate_clean_manifest(manname, d)
1359 for stamp in stamps:
1360 bb.utils.remove(stamp)
1361 removed = removed + 1
1362 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1363
1364 bb.event.fire(bb.event.ProcessFinished(msg), d)
1365}