blob: 77e3ea34e16b0000f91919c7b838ee4ec194f662 [file] [log] [blame]
Patrick Williams92b42cb2022-09-03 06:53:57 -05001#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7SSTATE_VERSION = "10"
8
9SSTATE_ZSTD_CLEVEL ??= "8"
10
11SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
12SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
13
14def generate_sstatefn(spec, hash, taskname, siginfo, d):
15 if taskname is None:
16 return ""
17 extension = ".tar.zst"
18 # 8 chars reserved for siginfo
19 limit = 254 - 8
20 if siginfo:
21 limit = 254
22 extension = ".tar.zst.siginfo"
23 if not hash:
24 hash = "INVALID"
25 fn = spec + hash + "_" + taskname + extension
26 # If the filename is too long, attempt to reduce it
27 if len(fn) > limit:
28 components = spec.split(":")
29 # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
30 # 7 is for the separators
31 avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
32 components[2] = components[2][:avail]
33 components[3] = components[3][:avail]
34 components[4] = components[4][:avail]
35 spec = ":".join(components)
36 fn = spec + hash + "_" + taskname + extension
37 if len(fn) > limit:
38 bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
39 return hash[:2] + "/" + hash[2:4] + "/" + fn
40
41SSTATE_PKGARCH = "${PACKAGE_ARCH}"
42SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
43SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
44SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
45SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
46SSTATE_EXTRAPATH = ""
47SSTATE_EXTRAPATHWILDCARD = ""
48SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tar.zst*"
49
50# explicitly make PV to depend on evaluated value of PV variable
51PV[vardepvalue] = "${PV}"
52
53# We don't want the sstate to depend on things like the distro string
54# of the system, we let the sstate paths take care of this.
55SSTATE_EXTRAPATH[vardepvalue] = ""
56SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
57
58# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
59SSTATE_ALLOW_OVERLAP_FILES = "${DEPLOY_DIR}/licenses/"
60# Avoid docbook/sgml catalog warnings for now
61SSTATE_ALLOW_OVERLAP_FILES += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
62# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
63SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
64SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
65# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
66SSTATE_ALLOW_OVERLAP_FILES += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
67# Archive the sources for many architectures in one deploy folder
68SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}"
69# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
70SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/ovmf"
71SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/grub-efi"
72SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/systemd-boot"
73SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/microcode"
74
75SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
76SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
77SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
78SSTATE_HASHEQUIV_FILEMAP ?= " \
79 populate_sysroot:*/postinst-useradd-*:${TMPDIR} \
80 populate_sysroot:*/postinst-useradd-*:${COREBASE} \
81 populate_sysroot:*/postinst-useradd-*:regex-\s(PATH|PSEUDO_IGNORE_PATHS|HOME|LOGNAME|OMP_NUM_THREADS|USER)=.*\s \
82 populate_sysroot:*/crossscripts/*:${TMPDIR} \
83 populate_sysroot:*/crossscripts/*:${COREBASE} \
84 "
85
86BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
87
88SSTATE_ARCHS = " \
89 ${BUILD_ARCH} \
90 ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
91 ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
92 ${SDK_ARCH}_${SDK_OS} \
93 ${SDK_ARCH}_${PACKAGE_ARCH} \
94 allarch \
95 ${PACKAGE_ARCH} \
96 ${PACKAGE_EXTRA_ARCHS} \
97 ${MACHINE_ARCH}"
98SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
99
100SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
101
102SSTATECREATEFUNCS += "sstate_hardcode_path"
103SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
104SSTATEPOSTCREATEFUNCS = ""
105SSTATEPREINSTFUNCS = ""
106SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
107SSTATEPOSTINSTFUNCS = ""
108EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
109
110# Check whether sstate exists for tasks that support sstate and are in the
111# locked signatures file.
112SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error'
113
114# Check whether the task's computed hash matches the task's hash in the
115# locked signatures file.
116SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error"
117
118# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
119# not sign)
120SSTATE_SIG_KEY ?= ""
121SSTATE_SIG_PASSPHRASE ?= ""
122# Whether to verify the GnUPG signatures when extracting sstate archives
123SSTATE_VERIFY_SIG ?= "0"
124# List of signatures to consider valid.
125SSTATE_VALID_SIGS ??= ""
126SSTATE_VALID_SIGS[vardepvalue] = ""
127
128SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
129SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
130 the output hash for a task, which in turn is used to determine equivalency. \
131 "
132
133SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
134SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
135 hash equivalency server, such as PN, PV, taskname, etc. This information \
136 is very useful for developers looking at task data, but may leak sensitive \
137 data if the equivalence server is public. \
138 "
139
140python () {
141 if bb.data.inherits_class('native', d):
142 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
143 elif bb.data.inherits_class('crosssdk', d):
144 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
145 elif bb.data.inherits_class('cross', d):
146 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}"))
147 elif bb.data.inherits_class('nativesdk', d):
148 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
149 elif bb.data.inherits_class('cross-canadian', d):
150 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
151 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all":
152 d.setVar('SSTATE_PKGARCH', "allarch")
153 else:
154 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
155
156 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
157 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
158 d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
159 d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
160
161 unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
162 d.setVar('SSTATETASKS', " ".join(unique_tasks))
163 for task in unique_tasks:
164 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
165 d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
166 d.setVarFlag(task, 'network', '1')
167 d.setVarFlag(task + "_setscene", 'network', '1')
168}
169
170def sstate_init(task, d):
171 ss = {}
172 ss['task'] = task
173 ss['dirs'] = []
174 ss['plaindirs'] = []
175 ss['lockfiles'] = []
176 ss['lockfiles-shared'] = []
177 return ss
178
179def sstate_state_fromvars(d, task = None):
180 if task is None:
181 task = d.getVar('BB_CURRENTTASK')
182 if not task:
183 bb.fatal("sstate code running without task context?!")
184 task = task.replace("_setscene", "")
185
186 if task.startswith("do_"):
187 task = task[3:]
188 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split()
189 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split()
190 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split()
191 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split()
192 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split()
193 interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs') or "").split()
194 fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or ""
195 if not task or len(inputs) != len(outputs):
196 bb.fatal("sstate variables not setup correctly?!")
197
198 if task == "populate_lic":
199 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
200 d.setVar("SSTATE_EXTRAPATH", "")
201 d.setVar('SSTATE_EXTRAPATHWILDCARD', "")
202
203 ss = sstate_init(task, d)
204 for i in range(len(inputs)):
205 sstate_add(ss, inputs[i], outputs[i], d)
206 ss['lockfiles'] = lockfiles
207 ss['lockfiles-shared'] = lockfilesshared
208 ss['plaindirs'] = plaindirs
209 ss['interceptfuncs'] = interceptfuncs
210 ss['fixmedir'] = fixmedir
211 return ss
212
213def sstate_add(ss, source, dest, d):
214 if not source.endswith("/"):
215 source = source + "/"
216 if not dest.endswith("/"):
217 dest = dest + "/"
218 source = os.path.normpath(source)
219 dest = os.path.normpath(dest)
220 srcbase = os.path.basename(source)
221 ss['dirs'].append([srcbase, source, dest])
222 return ss
223
224def sstate_install(ss, d):
225 import oe.path
226 import oe.sstatesig
227 import subprocess
228
229 sharedfiles = []
230 shareddirs = []
231 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
232
233 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
234
235 manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
236
237 if os.access(manifest, os.R_OK):
238 bb.fatal("Package already staged (%s)?!" % manifest)
239
240 d.setVar("SSTATE_INST_POSTRM", manifest + ".postrm")
241
242 locks = []
243 for lock in ss['lockfiles-shared']:
244 locks.append(bb.utils.lockfile(lock, True))
245 for lock in ss['lockfiles']:
246 locks.append(bb.utils.lockfile(lock))
247
248 for state in ss['dirs']:
249 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
250 for walkroot, dirs, files in os.walk(state[1]):
251 for file in files:
252 srcpath = os.path.join(walkroot, file)
253 dstpath = srcpath.replace(state[1], state[2])
254 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
255 sharedfiles.append(dstpath)
256 for dir in dirs:
257 srcdir = os.path.join(walkroot, dir)
258 dstdir = srcdir.replace(state[1], state[2])
259 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
260 if os.path.islink(srcdir):
261 sharedfiles.append(dstdir)
262 continue
263 if not dstdir.endswith("/"):
264 dstdir = dstdir + "/"
265 shareddirs.append(dstdir)
266
267 # Check the file list for conflicts against files which already exist
268 overlap_allowed = (d.getVar("SSTATE_ALLOW_OVERLAP_FILES") or "").split()
269 match = []
270 for f in sharedfiles:
271 if os.path.exists(f) and not os.path.islink(f):
272 f = os.path.normpath(f)
273 realmatch = True
274 for w in overlap_allowed:
275 w = os.path.normpath(w)
276 if f.startswith(w):
277 realmatch = False
278 break
279 if realmatch:
280 match.append(f)
281 sstate_search_cmd = "grep -rlF '%s' %s --exclude=master.list | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
282 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
283 if search_output:
284 match.append(" (matched in %s)" % search_output.decode('utf-8').rstrip())
285 else:
286 match.append(" (not matched to any task)")
287 if match:
288 bb.error("The recipe %s is trying to install files into a shared " \
289 "area when those files already exist. Those files and their manifest " \
290 "location are:\n %s\nPlease verify which recipe should provide the " \
291 "above files.\n\nThe build has stopped, as continuing in this scenario WILL " \
292 "break things - if not now, possibly in the future (we've seen builds fail " \
293 "several months later). If the system knew how to recover from this " \
294 "automatically it would, however there are several different scenarios " \
295 "which can result in this and we don't know which one this is. It may be " \
296 "you have switched providers of something like virtual/kernel (e.g. from " \
297 "linux-yocto to linux-yocto-dev), in that case you need to execute the " \
298 "clean task for both recipes and it will resolve this error. It may be " \
299 "you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \
300 "those recipes should again resolve this error, however switching " \
301 "DISTRO_FEATURES on an existing build directory is not supported - you " \
302 "should really clean out tmp and rebuild (reusing sstate should be safe). " \
303 "It could be the overlapping files detected are harmless in which case " \
304 "adding them to SSTATE_ALLOW_OVERLAP_FILES may be the correct solution. It could " \
305 "also be your build is including two different conflicting versions of " \
306 "things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
307 "be to resolve the conflict. If in doubt, please ask on the mailing list, " \
308 "sharing the error and filelist above." % \
309 (d.getVar('PN'), "\n ".join(match)))
310 bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.")
311
312 if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"):
313 sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd")
314 sharedfiles.append(ss['fixmedir'] + "/fixmepath")
315
316 # Write out the manifest
317 f = open(manifest, "w")
318 for file in sharedfiles:
319 f.write(file + "\n")
320
321 # We want to ensure that directories appear at the end of the manifest
322 # so that when we test to see if they should be deleted any contents
323 # added by the task will have been removed first.
324 dirs = sorted(shareddirs, key=len)
325 # Must remove children first, which will have a longer path than the parent
326 for di in reversed(dirs):
327 f.write(di + "\n")
328 f.close()
329
330 # Append to the list of manifests for this PACKAGE_ARCH
331
332 i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
333 l = bb.utils.lockfile(i + ".lock")
334 filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n"
335 manifests = []
336 if os.path.exists(i):
337 with open(i, "r") as f:
338 manifests = f.readlines()
339 # We append new entries, we don't remove older entries which may have the same
340 # manifest name but different versions from stamp/workdir. See below.
341 if filedata not in manifests:
342 with open(i, "a+") as f:
343 f.write(filedata)
344 bb.utils.unlockfile(l)
345
346 # Run the actual file install
347 for state in ss['dirs']:
348 if os.path.exists(state[1]):
349 oe.path.copyhardlinktree(state[1], state[2])
350
351 for postinst in (d.getVar('SSTATEPOSTINSTFUNCS') or '').split():
352 # All hooks should run in the SSTATE_INSTDIR
353 bb.build.exec_func(postinst, d, (sstateinst,))
354
355 for lock in locks:
356 bb.utils.unlockfile(lock)
357
358sstate_install[vardepsexclude] += "SSTATE_ALLOW_OVERLAP_FILES STATE_MANMACH SSTATE_MANFILEPREFIX"
359sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
360
361def sstate_installpkg(ss, d):
362 from oe.gpg_sign import get_signer
363
364 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
365 d.setVar("SSTATE_CURRTASK", ss['task'])
366 sstatefetch = d.getVar('SSTATE_PKGNAME')
367 sstatepkg = d.getVar('SSTATE_PKG')
368
369 if not os.path.exists(sstatepkg):
370 pstaging_fetch(sstatefetch, d)
371
372 if not os.path.isfile(sstatepkg):
373 bb.note("Sstate package %s does not exist" % sstatepkg)
374 return False
375
376 sstate_clean(ss, d)
377
378 d.setVar('SSTATE_INSTDIR', sstateinst)
379
380 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
381 if not os.path.isfile(sstatepkg + '.sig'):
382 bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
383 return False
384 signer = get_signer(d, 'local')
385 if not signer.verify(sstatepkg + '.sig', d.getVar("SSTATE_VALID_SIGS")):
386 bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
387 return False
388
389 # Empty sstateinst directory, ensure its clean
390 if os.path.exists(sstateinst):
391 oe.path.remove(sstateinst)
392 bb.utils.mkdirhier(sstateinst)
393
394 sstateinst = d.getVar("SSTATE_INSTDIR")
395 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
396
397 for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']:
398 # All hooks should run in the SSTATE_INSTDIR
399 bb.build.exec_func(f, d, (sstateinst,))
400
401 return sstate_installpkgdir(ss, d)
402
403def sstate_installpkgdir(ss, d):
404 import oe.path
405 import subprocess
406
407 sstateinst = d.getVar("SSTATE_INSTDIR")
408 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
409
410 for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split():
411 # All hooks should run in the SSTATE_INSTDIR
412 bb.build.exec_func(f, d, (sstateinst,))
413
414 def prepdir(dir):
415 # remove dir if it exists, ensure any parent directories do exist
416 if os.path.exists(dir):
417 oe.path.remove(dir)
418 bb.utils.mkdirhier(dir)
419 oe.path.remove(dir)
420
421 for state in ss['dirs']:
422 prepdir(state[1])
423 bb.utils.rename(sstateinst + state[0], state[1])
424 sstate_install(ss, d)
425
426 for plain in ss['plaindirs']:
427 workdir = d.getVar('WORKDIR')
428 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
429 src = sstateinst + "/" + plain.replace(workdir, '')
430 if sharedworkdir in plain:
431 src = sstateinst + "/" + plain.replace(sharedworkdir, '')
432 dest = plain
433 bb.utils.mkdirhier(src)
434 prepdir(dest)
435 bb.utils.rename(src, dest)
436
437 return True
438
439python sstate_hardcode_path_unpack () {
440 # Fixup hardcoded paths
441 #
442 # Note: The logic below must match the reverse logic in
443 # sstate_hardcode_path(d)
444 import subprocess
445
446 sstateinst = d.getVar('SSTATE_INSTDIR')
447 sstatefixmedir = d.getVar('SSTATE_FIXMEDIR')
448 fixmefn = sstateinst + "fixmepath"
449 if os.path.isfile(fixmefn):
450 staging_target = d.getVar('RECIPE_SYSROOT')
451 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
452
453 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
454 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
455 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
456 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host)
457 else:
458 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target)
459
460 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
461 for fixmevar in extra_staging_fixmes.split():
462 fixme_path = d.getVar(fixmevar)
463 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
464
465 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
466 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
467
468 # Defer do_populate_sysroot relocation command
469 if sstatefixmedir:
470 bb.utils.mkdirhier(sstatefixmedir)
471 with open(sstatefixmedir + "/fixmepath.cmd", "w") as f:
472 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath")
473 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST")
474 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST")
475 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET")
476 f.write(sstate_hardcode_cmd)
477 bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath")
478 return
479
480 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
481 subprocess.check_call(sstate_hardcode_cmd, shell=True)
482
483 # Need to remove this or we'd copy it into the target directory and may
484 # conflict with another writer
485 os.remove(fixmefn)
486}
487
488def sstate_clean_cachefile(ss, d):
489 import oe.path
490
491 if d.getVarFlag('do_%s' % ss['task'], 'task'):
492 d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
493 sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
494 bb.note("Removing %s" % sstatepkgfile)
495 oe.path.remove(sstatepkgfile)
496
497def sstate_clean_cachefiles(d):
498 for task in (d.getVar('SSTATETASKS') or "").split():
499 ld = d.createCopy()
500 ss = sstate_state_fromvars(ld, task)
501 sstate_clean_cachefile(ss, ld)
502
503def sstate_clean_manifest(manifest, d, canrace=False, prefix=None):
504 import oe.path
505
506 mfile = open(manifest)
507 entries = mfile.readlines()
508 mfile.close()
509
510 for entry in entries:
511 entry = entry.strip()
512 if prefix and not entry.startswith("/"):
513 entry = prefix + "/" + entry
514 bb.debug(2, "Removing manifest: %s" % entry)
515 # We can race against another package populating directories as we're removing them
516 # so we ignore errors here.
517 try:
518 if entry.endswith("/"):
519 if os.path.islink(entry[:-1]):
520 os.remove(entry[:-1])
521 elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace:
522 # Removing directories whilst builds are in progress exposes a race. Only
523 # do it in contexts where it is safe to do so.
524 os.rmdir(entry[:-1])
525 else:
526 os.remove(entry)
527 except OSError:
528 pass
529
530 postrm = manifest + ".postrm"
531 if os.path.exists(manifest + ".postrm"):
532 import subprocess
533 os.chmod(postrm, 0o755)
534 subprocess.check_call(postrm, shell=True)
535 oe.path.remove(postrm)
536
537 oe.path.remove(manifest)
538
539def sstate_clean(ss, d):
540 import oe.path
541 import glob
542
543 d2 = d.createCopy()
544 stamp_clean = d.getVar("STAMPCLEAN")
545 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info')
546 if extrainf:
547 d2.setVar("SSTATE_MANMACH", extrainf)
548 wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
549 else:
550 wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
551
552 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
553
554 if os.path.exists(manifest):
555 locks = []
556 for lock in ss['lockfiles-shared']:
557 locks.append(bb.utils.lockfile(lock))
558 for lock in ss['lockfiles']:
559 locks.append(bb.utils.lockfile(lock))
560
561 sstate_clean_manifest(manifest, d, canrace=True)
562
563 for lock in locks:
564 bb.utils.unlockfile(lock)
565
566 # Remove the current and previous stamps, but keep the sigdata.
567 #
568 # The glob() matches do_task* which may match multiple tasks, for
569 # example: do_package and do_package_write_ipk, so we need to
570 # exactly match *.do_task.* and *.do_task_setscene.*
571 rm_stamp = '.do_%s.' % ss['task']
572 rm_setscene = '.do_%s_setscene.' % ss['task']
573 # For BB_SIGNATURE_HANDLER = "noop"
574 rm_nohash = ".do_%s" % ss['task']
575 for stfile in glob.glob(wildcard_stfile):
576 # Keep the sigdata
577 if ".sigdata." in stfile or ".sigbasedata." in stfile:
578 continue
579 # Preserve taint files in the stamps directory
580 if stfile.endswith('.taint'):
581 continue
582 if rm_stamp in stfile or rm_setscene in stfile or \
583 stfile.endswith(rm_nohash):
584 oe.path.remove(stfile)
585
586sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
587
588CLEANFUNCS += "sstate_cleanall"
589
590python sstate_cleanall() {
591 bb.note("Removing shared state for package %s" % d.getVar('PN'))
592
593 manifest_dir = d.getVar('SSTATE_MANIFESTS')
594 if not os.path.exists(manifest_dir):
595 return
596
597 tasks = d.getVar('SSTATETASKS').split()
598 for name in tasks:
599 ld = d.createCopy()
600 shared_state = sstate_state_fromvars(ld, name)
601 sstate_clean(shared_state, ld)
602}
603
604python sstate_hardcode_path () {
605 import subprocess, platform
606
607 # Need to remove hardcoded paths and fix these when we install the
608 # staging packages.
609 #
610 # Note: the logic in this function needs to match the reverse logic
611 # in sstate_installpkg(ss, d)
612
613 staging_target = d.getVar('RECIPE_SYSROOT')
614 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
615 sstate_builddir = d.getVar('SSTATE_BUILDDIR')
616
617 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host
618 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
619 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
620 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
621 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
622 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
623 else:
624 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
625 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
626
627 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
628 for fixmevar in extra_staging_fixmes.split():
629 fixme_path = d.getVar(fixmevar)
630 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
631 sstate_grep_cmd += " -e '%s'" % (fixme_path)
632
633 fixmefn = sstate_builddir + "fixmepath"
634
635 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD')
636 sstate_filelist_cmd = "tee %s" % (fixmefn)
637
638 # fixmepath file needs relative paths, drop sstate_builddir prefix
639 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
640
641 xargs_no_empty_run_cmd = '--no-run-if-empty'
642 if platform.system() == 'Darwin':
643 xargs_no_empty_run_cmd = ''
644
645 # Limit the fixpaths and sed operations based on the initial grep search
646 # This has the side effect of making sure the vfs cache is hot
647 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
648
649 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
650 subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir)
651
652 # If the fixmefn is empty, remove it..
653 if os.stat(fixmefn).st_size == 0:
654 os.remove(fixmefn)
655 else:
656 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
657 subprocess.check_output(sstate_filelist_relative_cmd, shell=True)
658}
659
660def sstate_package(ss, d):
661 import oe.path
662 import time
663
664 tmpdir = d.getVar('TMPDIR')
665
666 fixtime = False
667 if ss['task'] == "package":
668 fixtime = True
669
670 def fixtimestamp(root, path):
671 f = os.path.join(root, path)
672 if os.lstat(f).st_mtime > sde:
673 os.utime(f, (sde, sde), follow_symlinks=False)
674
675 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
676 sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
677 d.setVar("SSTATE_CURRTASK", ss['task'])
678 bb.utils.remove(sstatebuild, recurse=True)
679 bb.utils.mkdirhier(sstatebuild)
680 for state in ss['dirs']:
681 if not os.path.exists(state[1]):
682 continue
683 srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
684 # Find and error for absolute symlinks. We could attempt to relocate but its not
685 # clear where the symlink is relative to in this context. We could add that markup
686 # to sstate tasks but there aren't many of these so better just avoid them entirely.
687 for walkroot, dirs, files in os.walk(state[1]):
688 for file in files + dirs:
689 if fixtime:
690 fixtimestamp(walkroot, file)
691 srcpath = os.path.join(walkroot, file)
692 if not os.path.islink(srcpath):
693 continue
694 link = os.readlink(srcpath)
695 if not os.path.isabs(link):
696 continue
697 if not link.startswith(tmpdir):
698 continue
699 bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
700 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
701 bb.utils.rename(state[1], sstatebuild + state[0])
702
703 workdir = d.getVar('WORKDIR')
704 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
705 for plain in ss['plaindirs']:
706 pdir = plain.replace(workdir, sstatebuild)
707 if sharedworkdir in plain:
708 pdir = plain.replace(sharedworkdir, sstatebuild)
709 bb.utils.mkdirhier(plain)
710 bb.utils.mkdirhier(pdir)
711 bb.utils.rename(plain, pdir)
712 if fixtime:
713 fixtimestamp(pdir, "")
714 for walkroot, dirs, files in os.walk(pdir):
715 for file in files + dirs:
716 fixtimestamp(walkroot, file)
717
718 d.setVar('SSTATE_BUILDDIR', sstatebuild)
719 d.setVar('SSTATE_INSTDIR', sstatebuild)
720
721 if d.getVar('SSTATE_SKIP_CREATION') == '1':
722 return
723
724 sstate_create_package = ['sstate_report_unihash', 'sstate_create_package']
725 if d.getVar('SSTATE_SIG_KEY'):
726 sstate_create_package.append('sstate_sign_package')
727
728 for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
729 sstate_create_package + \
730 (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
731 # All hooks should run in SSTATE_BUILDDIR.
732 bb.build.exec_func(f, d, (sstatebuild,))
733
734 # SSTATE_PKG may have been changed by sstate_report_unihash
735 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
736 if not os.path.exists(siginfo):
737 bb.siggen.dump_this_task(siginfo, d)
738 else:
739 try:
740 os.utime(siginfo, None)
741 except PermissionError:
742 pass
743 except OSError as e:
744 # Handle read-only file systems gracefully
745 import errno
746 if e.errno != errno.EROFS:
747 raise e
748
749 return
750
751sstate_package[vardepsexclude] += "SSTATE_SIG_KEY"
752
753def pstaging_fetch(sstatefetch, d):
754 import bb.fetch2
755
756 # Only try and fetch if the user has configured a mirror
757 mirrors = d.getVar('SSTATE_MIRRORS')
758 if not mirrors:
759 return
760
761 # Copy the data object and override DL_DIR and SRC_URI
762 localdata = bb.data.createCopy(d)
763
764 dldir = localdata.expand("${SSTATE_DIR}")
765 bb.utils.mkdirhier(dldir)
766
767 localdata.delVar('MIRRORS')
768 localdata.setVar('FILESPATH', dldir)
769 localdata.setVar('DL_DIR', dldir)
770 localdata.setVar('PREMIRRORS', mirrors)
771 localdata.setVar('SRCPV', d.getVar('SRCPV'))
772
773 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
774 # we'll want to allow network access for the current set of fetches.
775 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
776 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
777 localdata.delVar('BB_NO_NETWORK')
778
779 # Try a fetch from the sstate mirror, if it fails just return and
780 # we will build the package
781 uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
782 'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
783 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
784 uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
785
786 for srcuri in uris:
Andrew Geissler87f5cff2022-09-30 13:13:31 -0500787 localdata.delVar('SRC_URI')
Patrick Williams92b42cb2022-09-03 06:53:57 -0500788 localdata.setVar('SRC_URI', srcuri)
789 try:
790 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
791 fetcher.checkstatus()
792 fetcher.download()
793
794 except bb.fetch2.BBFetchException:
795 pass
796
797pstaging_fetch[vardepsexclude] += "SRCPV"
798
799
800def sstate_setscene(d):
801 shared_state = sstate_state_fromvars(d)
802 accelerate = sstate_installpkg(shared_state, d)
803 if not accelerate:
804 msg = "No sstate archive obtainable, will run full task instead."
805 bb.warn(msg)
806 raise bb.BBHandledException(msg)
807
808python sstate_task_prefunc () {
809 shared_state = sstate_state_fromvars(d)
810 sstate_clean(shared_state, d)
811}
812sstate_task_prefunc[dirs] = "${WORKDIR}"
813
814python sstate_task_postfunc () {
815 shared_state = sstate_state_fromvars(d)
816
817 for intercept in shared_state['interceptfuncs']:
818 bb.build.exec_func(intercept, d, (d.getVar("WORKDIR"),))
819
820 omask = os.umask(0o002)
821 if omask != 0o002:
822 bb.note("Using umask 0o002 (not %0o) for sstate packaging" % omask)
823 sstate_package(shared_state, d)
824 os.umask(omask)
825
826 sstateinst = d.getVar("SSTATE_INSTDIR")
827 d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir'])
828
829 sstate_installpkgdir(shared_state, d)
830
831 bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True)
832}
833sstate_task_postfunc[dirs] = "${WORKDIR}"
834
835
836#
837# Shell function to generate a sstate package from a directory
838# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
839#
840sstate_create_package () {
841 # Exit early if it already exists
842 if [ -e ${SSTATE_PKG} ]; then
843 touch ${SSTATE_PKG} 2>/dev/null || true
844 return
845 fi
846
847 mkdir --mode=0775 -p `dirname ${SSTATE_PKG}`
848 TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
849
850 OPT="-cS"
851 ZSTD="zstd -${SSTATE_ZSTD_CLEVEL} -T${ZSTD_THREADS}"
852 # Use pzstd if available
853 if [ -x "$(command -v pzstd)" ]; then
854 ZSTD="pzstd -${SSTATE_ZSTD_CLEVEL} -p ${ZSTD_THREADS}"
855 fi
856
857 # Need to handle empty directories
858 if [ "$(ls -A)" ]; then
859 set +e
860 tar -I "$ZSTD" $OPT -f $TFILE *
861 ret=$?
862 if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
863 exit 1
864 fi
865 set -e
866 else
867 tar -I "$ZSTD" $OPT --file=$TFILE --files-from=/dev/null
868 fi
869 chmod 0664 $TFILE
870 # Skip if it was already created by some other process
871 if [ -h ${SSTATE_PKG} ] && [ ! -e ${SSTATE_PKG} ]; then
872 # There is a symbolic link, but it links to nothing.
873 # Forcefully replace it with the new file.
874 ln -f $TFILE ${SSTATE_PKG} || true
875 elif [ ! -e ${SSTATE_PKG} ]; then
876 # Move into place using ln to attempt an atomic op.
877 # Abort if it already exists
878 ln $TFILE ${SSTATE_PKG} || true
879 else
880 touch ${SSTATE_PKG} 2>/dev/null || true
881 fi
882 rm $TFILE
883}
884
885python sstate_sign_package () {
886 from oe.gpg_sign import get_signer
887
888
889 signer = get_signer(d, 'local')
890 sstate_pkg = d.getVar('SSTATE_PKG')
891 if os.path.exists(sstate_pkg + '.sig'):
892 os.unlink(sstate_pkg + '.sig')
893 signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
894 d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
895}
896
897python sstate_report_unihash() {
898 report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
899
900 if report_unihash:
901 ss = sstate_state_fromvars(d)
902 report_unihash(os.getcwd(), ss['task'], d)
903}
904
905#
906# Shell function to decompress and prepare a package for installation
907# Will be run from within SSTATE_INSTDIR.
908#
909sstate_unpack_package () {
910 ZSTD="zstd -T${ZSTD_THREADS}"
911 # Use pzstd if available
912 if [ -x "$(command -v pzstd)" ]; then
913 ZSTD="pzstd -p ${ZSTD_THREADS}"
914 fi
915
916 tar -I "$ZSTD" -xvpf ${SSTATE_PKG}
917 # update .siginfo atime on local/NFS mirror if it is a symbolic link
918 [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
919 # update each symbolic link instead of any referenced file
920 touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
921 [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
922 [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
923}
924
925BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
926
927def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
Andrew Geissler517393d2023-01-13 08:55:19 -0600928 import itertools
929
Patrick Williams92b42cb2022-09-03 06:53:57 -0500930 found = set()
931 missed = set()
932
933 def gethash(task):
934 return sq_data['unihash'][task]
935
936 def getpathcomponents(task, d):
937 # Magic data from BB_HASHFILENAME
938 splithashfn = sq_data['hashfn'][task].split(" ")
939 spec = splithashfn[1]
940 if splithashfn[0] == "True":
941 extrapath = d.getVar("NATIVELSBSTRING") + "/"
942 else:
943 extrapath = ""
944
945 tname = bb.runqueue.taskname_from_tid(task)[3:]
946
947 if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
948 spec = splithashfn[2]
949 extrapath = ""
950
951 return spec, extrapath, tname
952
953 def getsstatefile(tid, siginfo, d):
954 spec, extrapath, tname = getpathcomponents(tid, d)
955 return extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d)
956
957 for tid in sq_data['hash']:
958
959 sstatefile = d.expand("${SSTATE_DIR}/" + getsstatefile(tid, siginfo, d))
960
961 if os.path.exists(sstatefile):
962 found.add(tid)
963 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
964 else:
965 missed.add(tid)
966 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
967
968 foundLocal = len(found)
969 mirrors = d.getVar("SSTATE_MIRRORS")
970 if mirrors:
971 # Copy the data object and override DL_DIR and SRC_URI
972 localdata = bb.data.createCopy(d)
973
974 dldir = localdata.expand("${SSTATE_DIR}")
975 localdata.delVar('MIRRORS')
976 localdata.setVar('FILESPATH', dldir)
977 localdata.setVar('DL_DIR', dldir)
978 localdata.setVar('PREMIRRORS', mirrors)
979
980 bb.debug(2, "SState using premirror of: %s" % mirrors)
981
982 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
983 # we'll want to allow network access for the current set of fetches.
984 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
985 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
986 localdata.delVar('BB_NO_NETWORK')
987
988 from bb.fetch2 import FetchConnectionCache
989 def checkstatus_init():
990 while not connection_cache_pool.full():
991 connection_cache_pool.put(FetchConnectionCache())
992
993 def checkstatus_end():
994 while not connection_cache_pool.empty():
995 connection_cache = connection_cache_pool.get()
996 connection_cache.close_connections()
997
998 def checkstatus(arg):
999 (tid, sstatefile) = arg
1000
1001 connection_cache = connection_cache_pool.get()
1002 localdata2 = bb.data.createCopy(localdata)
1003 srcuri = "file://" + sstatefile
1004 localdata2.setVar('SRC_URI', srcuri)
1005 bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
1006
1007 import traceback
1008
1009 try:
1010 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
1011 connection_cache=connection_cache)
1012 fetcher.checkstatus()
1013 bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
1014 found.add(tid)
1015 missed.remove(tid)
1016 except bb.fetch2.FetchError as e:
1017 bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)\n%s" % (srcuri, repr(e), traceback.format_exc()))
1018 except Exception as e:
1019 bb.error("SState: cannot test %s: %s\n%s" % (srcuri, repr(e), traceback.format_exc()))
1020
1021 connection_cache_pool.put(connection_cache)
1022
1023 if progress:
Andrew Geissler517393d2023-01-13 08:55:19 -06001024 bb.event.fire(bb.event.ProcessProgress(msg, next(cnt_tasks_done)), d)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001025
1026 tasklist = []
1027 for tid in missed:
1028 sstatefile = d.expand(getsstatefile(tid, siginfo, d))
1029 tasklist.append((tid, sstatefile))
1030
1031 if tasklist:
1032 nproc = min(int(d.getVar("BB_NUMBER_THREADS")), len(tasklist))
1033
Andrew Geissler517393d2023-01-13 08:55:19 -06001034 ## thread-safe counter
1035 cnt_tasks_done = itertools.count(start = 1)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001036 progress = len(tasklist) >= 100
1037 if progress:
1038 msg = "Checking sstate mirror object availability"
1039 bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
1040
1041 # Have to setup the fetcher environment here rather than in each thread as it would race
1042 fetcherenv = bb.fetch2.get_fetcher_environment(d)
1043 with bb.utils.environment(**fetcherenv):
1044 bb.event.enable_threadlock()
1045 import concurrent.futures
1046 from queue import Queue
1047 connection_cache_pool = Queue(nproc)
1048 checkstatus_init()
1049 with concurrent.futures.ThreadPoolExecutor(max_workers=nproc) as executor:
1050 executor.map(checkstatus, tasklist.copy())
1051 checkstatus_end()
1052 bb.event.disable_threadlock()
1053
1054 if progress:
1055 bb.event.fire(bb.event.ProcessFinished(msg), d)
1056
1057 inheritlist = d.getVar("INHERIT")
1058 if "toaster" in inheritlist:
1059 evdata = {'missed': [], 'found': []};
1060 for tid in missed:
1061 sstatefile = d.expand(getsstatefile(tid, False, d))
1062 evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1063 for tid in found:
1064 sstatefile = d.expand(getsstatefile(tid, False, d))
1065 evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1066 bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
1067
1068 if summary:
1069 # Print some summary statistics about the current task completion and how much sstate
1070 # reuse there was. Avoid divide by zero errors.
1071 total = len(sq_data['hash'])
1072 complete = 0
1073 if currentcount:
1074 complete = (len(found) + currentcount) / (total + currentcount) * 100
1075 match = 0
1076 if total:
1077 match = len(found) / total * 100
1078 bb.plain("Sstate summary: Wanted %d Local %d Mirrors %d Missed %d Current %d (%d%% match, %d%% complete)" %
1079 (total, foundLocal, len(found)-foundLocal, len(missed), currentcount, match, complete))
1080
1081 if hasattr(bb.parse.siggen, "checkhashes"):
1082 bb.parse.siggen.checkhashes(sq_data, missed, found, d)
1083
1084 return found
1085setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT"
1086
1087BB_SETSCENE_DEPVALID = "setscene_depvalid"
1088
1089def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
1090 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
1091 # task is included in taskdependees too
1092 # Return - False - We need this dependency
1093 # - True - We can skip this dependency
1094 import re
1095
1096 def logit(msg, log):
1097 if log is not None:
1098 log.append(msg)
1099 else:
1100 bb.debug(2, msg)
1101
1102 logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
1103
Patrick Williams7784c422022-11-17 07:29:11 -06001104 directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx", "do_deploy_archives"]
Patrick Williams92b42cb2022-09-03 06:53:57 -05001105
1106 def isNativeCross(x):
1107 return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
1108
1109 # We only need to trigger deploy_source_date_epoch through direct dependencies
1110 if taskdependees[task][1] in directtasks:
1111 return True
1112
1113 # We only need to trigger packagedata through direct dependencies
1114 # but need to preserve packagedata on packagedata links
1115 if taskdependees[task][1] == "do_packagedata":
1116 for dep in taskdependees:
1117 if taskdependees[dep][1] == "do_packagedata":
1118 return False
1119 return True
1120
1121 for dep in taskdependees:
1122 logit(" considering dependency: %s" % (str(taskdependees[dep])), log)
1123 if task == dep:
1124 continue
1125 if dep in notneeded:
1126 continue
1127 # do_package_write_* and do_package doesn't need do_package
1128 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
1129 continue
1130 # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
1131 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
1132 return False
1133 # do_package/packagedata/package_qa/deploy don't need do_populate_sysroot
1134 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa', 'do_deploy']:
1135 continue
1136 # Native/Cross packages don't exist and are noexec anyway
1137 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
1138 continue
1139
1140 # This is due to the [depends] in useradd.bbclass complicating matters
1141 # The logic *is* reversed here due to the way hard setscene dependencies are injected
1142 if (taskdependees[task][1] == 'do_package' or taskdependees[task][1] == 'do_populate_sysroot') and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
1143 continue
1144
1145 # Consider sysroot depending on sysroot tasks
1146 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
1147 # Allow excluding certain recursive dependencies. If a recipe needs it should add a
1148 # specific dependency itself, rather than relying on one of its dependees to pull
1149 # them in.
1150 # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html
1151 not_needed = False
1152 excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT')
1153 if excludedeps is None:
1154 # Cache the regular expressions for speed
1155 excludedeps = []
1156 for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split():
1157 excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1])))
1158 d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps)
1159 for excl in excludedeps:
1160 if excl[0].match(taskdependees[dep][0]):
1161 if excl[1].match(taskdependees[task][0]):
1162 not_needed = True
1163 break
1164 if not_needed:
1165 continue
1166 # For meta-extsdk-toolchain we want all sysroot dependencies
1167 if taskdependees[dep][0] == 'meta-extsdk-toolchain':
1168 return False
1169 # Native/Cross populate_sysroot need their dependencies
1170 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
1171 return False
1172 # Target populate_sysroot depended on by cross tools need to be installed
1173 if isNativeCross(taskdependees[dep][0]):
1174 return False
1175 # Native/cross tools depended upon by target sysroot are not needed
1176 # Add an exception for shadow-native as required by useradd.bbclass
1177 if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
1178 continue
1179 # Target populate_sysroot need their dependencies
1180 return False
1181
1182 if taskdependees[dep][1] in directtasks:
1183 continue
1184
1185 # Safe fallthrough default
1186 logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
1187 return False
1188 return True
1189
1190addhandler sstate_eventhandler
1191sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
1192python sstate_eventhandler() {
1193 d = e.data
1194 writtensstate = d.getVar('SSTATE_CURRTASK')
1195 if not writtensstate:
1196 taskname = d.getVar("BB_RUNTASK")[3:]
1197 spec = d.getVar('SSTATE_PKGSPEC')
1198 swspec = d.getVar('SSTATE_SWSPEC')
1199 if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
1200 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
1201 d.setVar("SSTATE_EXTRAPATH", "")
1202 d.setVar("SSTATE_CURRTASK", taskname)
1203 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
1204 if not os.path.exists(siginfo):
1205 bb.siggen.dump_this_task(siginfo, d)
1206 else:
1207 try:
1208 os.utime(siginfo, None)
1209 except PermissionError:
1210 pass
1211 except OSError as e:
1212 # Handle read-only file systems gracefully
1213 import errno
1214 if e.errno != errno.EROFS:
1215 raise e
1216
1217}
1218
1219SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
1220
1221#
1222# Event handler which removes manifests and stamps file for recipes which are no
1223# longer 'reachable' in a build where they once were. 'Reachable' refers to
1224# whether a recipe is parsed so recipes in a layer which was removed would no
1225# longer be reachable. Switching between systemd and sysvinit where recipes
1226# became skipped would be another example.
1227#
1228# Also optionally removes the workdir of those tasks/recipes
1229#
1230addhandler sstate_eventhandler_reachablestamps
1231sstate_eventhandler_reachablestamps[eventmask] = "bb.event.ReachableStamps"
1232python sstate_eventhandler_reachablestamps() {
1233 import glob
1234 d = e.data
1235 stamps = e.stamps.values()
1236 removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
1237 preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps')
1238 preservestamps = []
1239 if os.path.exists(preservestampfile):
1240 with open(preservestampfile, 'r') as f:
1241 preservestamps = f.readlines()
1242 seen = []
1243
1244 # The machine index contains all the stamps this machine has ever seen in this build directory.
1245 # We should only remove things which this machine once accessed but no longer does.
1246 machineindex = set()
1247 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1248 mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}")
1249 if os.path.exists(mi):
1250 with open(mi, "r") as f:
1251 machineindex = set(line.strip() for line in f.readlines())
1252
1253 for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
1254 toremove = []
1255 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1256 if not os.path.exists(i):
1257 continue
1258 manseen = set()
1259 ignore = []
1260 with open(i, "r") as f:
1261 lines = f.readlines()
1262 for l in reversed(lines):
1263 try:
1264 (stamp, manifest, workdir) = l.split()
1265 # The index may have multiple entries for the same manifest as the code above only appends
1266 # new entries and there may be an entry with matching manifest but differing version in stamp/workdir.
1267 # The last entry in the list is the valid one, any earlier entries with matching manifests
1268 # should be ignored.
1269 if manifest in manseen:
1270 ignore.append(l)
1271 continue
1272 manseen.add(manifest)
1273 if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
1274 toremove.append(l)
1275 if stamp not in seen:
1276 bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
1277 seen.append(stamp)
1278 except ValueError:
1279 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1280
1281 if toremove:
1282 msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
1283 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1284
1285 removed = 0
1286 for r in toremove:
1287 (stamp, manifest, workdir) = r.split()
1288 for m in glob.glob(manifest + ".*"):
1289 if m.endswith(".postrm"):
1290 continue
1291 sstate_clean_manifest(m, d)
1292 bb.utils.remove(stamp + "*")
1293 if removeworkdir:
1294 bb.utils.remove(workdir, recurse = True)
1295 lines.remove(r)
1296 removed = removed + 1
1297 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1298
1299 bb.event.fire(bb.event.ProcessFinished(msg), d)
1300
1301 with open(i, "w") as f:
1302 for l in lines:
1303 if l in ignore:
1304 continue
1305 f.write(l)
1306 machineindex |= set(stamps)
1307 with open(mi, "w") as f:
1308 for l in machineindex:
1309 f.write(l + "\n")
1310
1311 if preservestamps:
1312 os.remove(preservestampfile)
1313}
1314
1315
1316#
1317# Bitbake can generate an event showing which setscene tasks are 'stale',
1318# i.e. which ones will be rerun. These are ones where a stamp file is present but
1319# it is stable (e.g. taskhash doesn't match). With that list we can go through
1320# the manifests for matching tasks and "uninstall" those manifests now. We do
1321# this now rather than mid build since the distribution of files between sstate
1322# objects may have changed, new tasks may run first and if those new tasks overlap
1323# with the stale tasks, we'd see overlapping files messages and failures. Thankfully
1324# removing these files is fast.
1325#
1326addhandler sstate_eventhandler_stalesstate
1327sstate_eventhandler_stalesstate[eventmask] = "bb.event.StaleSetSceneTasks"
1328python sstate_eventhandler_stalesstate() {
1329 d = e.data
1330 tasks = e.tasks
1331
1332 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1333
1334 for a in list(set(d.getVar("SSTATE_ARCHS").split())):
1335 toremove = []
1336 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1337 if not os.path.exists(i):
1338 continue
1339 with open(i, "r") as f:
1340 lines = f.readlines()
1341 for l in lines:
1342 try:
1343 (stamp, manifest, workdir) = l.split()
1344 for tid in tasks:
1345 for s in tasks[tid]:
1346 if s.startswith(stamp):
1347 taskname = bb.runqueue.taskname_from_tid(tid)[3:]
1348 manname = manifest + "." + taskname
1349 if os.path.exists(manname):
1350 bb.debug(2, "Sstate for %s is stale, removing related manifest %s" % (tid, manname))
1351 toremove.append((manname, tid, tasks[tid]))
1352 break
1353 except ValueError:
1354 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1355
1356 if toremove:
1357 msg = "Removing %d stale sstate objects for arch %s" % (len(toremove), a)
1358 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1359
1360 removed = 0
1361 for (manname, tid, stamps) in toremove:
1362 sstate_clean_manifest(manname, d)
1363 for stamp in stamps:
1364 bb.utils.remove(stamp)
1365 removed = removed + 1
1366 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1367
1368 bb.event.fire(bb.event.ProcessFinished(msg), d)
1369}