blob: c50198449c24ce4e50e27947f9601cef8945904b [file] [log] [blame]
Patrick Williams92b42cb2022-09-03 06:53:57 -05001#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
Andrew Geisslerfc113ea2023-03-31 09:59:46 -05007SSTATE_VERSION = "11"
Patrick Williams92b42cb2022-09-03 06:53:57 -05008
9SSTATE_ZSTD_CLEVEL ??= "8"
10
11SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
12SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
13
14def generate_sstatefn(spec, hash, taskname, siginfo, d):
15 if taskname is None:
16 return ""
17 extension = ".tar.zst"
18 # 8 chars reserved for siginfo
19 limit = 254 - 8
20 if siginfo:
21 limit = 254
22 extension = ".tar.zst.siginfo"
23 if not hash:
24 hash = "INVALID"
25 fn = spec + hash + "_" + taskname + extension
26 # If the filename is too long, attempt to reduce it
27 if len(fn) > limit:
28 components = spec.split(":")
29 # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
30 # 7 is for the separators
31 avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
32 components[2] = components[2][:avail]
33 components[3] = components[3][:avail]
34 components[4] = components[4][:avail]
35 spec = ":".join(components)
36 fn = spec + hash + "_" + taskname + extension
37 if len(fn) > limit:
38 bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
39 return hash[:2] + "/" + hash[2:4] + "/" + fn
40
41SSTATE_PKGARCH = "${PACKAGE_ARCH}"
42SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
43SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
44SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
45SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
46SSTATE_EXTRAPATH = ""
47SSTATE_EXTRAPATHWILDCARD = ""
48SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tar.zst*"
49
50# explicitly make PV to depend on evaluated value of PV variable
51PV[vardepvalue] = "${PV}"
52
53# We don't want the sstate to depend on things like the distro string
54# of the system, we let the sstate paths take care of this.
55SSTATE_EXTRAPATH[vardepvalue] = ""
56SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
57
58# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
59SSTATE_ALLOW_OVERLAP_FILES = "${DEPLOY_DIR}/licenses/"
60# Avoid docbook/sgml catalog warnings for now
61SSTATE_ALLOW_OVERLAP_FILES += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
62# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
63SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
64SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
65# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
66SSTATE_ALLOW_OVERLAP_FILES += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
67# Archive the sources for many architectures in one deploy folder
68SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}"
69# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
70SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/ovmf"
71SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/grub-efi"
72SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/systemd-boot"
73SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/microcode"
74
75SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
76SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
77SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
78SSTATE_HASHEQUIV_FILEMAP ?= " \
79 populate_sysroot:*/postinst-useradd-*:${TMPDIR} \
80 populate_sysroot:*/postinst-useradd-*:${COREBASE} \
81 populate_sysroot:*/postinst-useradd-*:regex-\s(PATH|PSEUDO_IGNORE_PATHS|HOME|LOGNAME|OMP_NUM_THREADS|USER)=.*\s \
82 populate_sysroot:*/crossscripts/*:${TMPDIR} \
83 populate_sysroot:*/crossscripts/*:${COREBASE} \
84 "
85
86BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
87
88SSTATE_ARCHS = " \
89 ${BUILD_ARCH} \
90 ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
91 ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
92 ${SDK_ARCH}_${SDK_OS} \
93 ${SDK_ARCH}_${PACKAGE_ARCH} \
94 allarch \
95 ${PACKAGE_ARCH} \
96 ${PACKAGE_EXTRA_ARCHS} \
97 ${MACHINE_ARCH}"
98SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
99
100SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
101
102SSTATECREATEFUNCS += "sstate_hardcode_path"
103SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
104SSTATEPOSTCREATEFUNCS = ""
105SSTATEPREINSTFUNCS = ""
106SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
107SSTATEPOSTINSTFUNCS = ""
108EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
109
110# Check whether sstate exists for tasks that support sstate and are in the
111# locked signatures file.
112SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error'
113
114# Check whether the task's computed hash matches the task's hash in the
115# locked signatures file.
116SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error"
117
118# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
119# not sign)
120SSTATE_SIG_KEY ?= ""
121SSTATE_SIG_PASSPHRASE ?= ""
122# Whether to verify the GnUPG signatures when extracting sstate archives
123SSTATE_VERIFY_SIG ?= "0"
124# List of signatures to consider valid.
125SSTATE_VALID_SIGS ??= ""
126SSTATE_VALID_SIGS[vardepvalue] = ""
127
128SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
129SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
130 the output hash for a task, which in turn is used to determine equivalency. \
131 "
132
133SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
134SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
135 hash equivalency server, such as PN, PV, taskname, etc. This information \
136 is very useful for developers looking at task data, but may leak sensitive \
137 data if the equivalence server is public. \
138 "
139
140python () {
141 if bb.data.inherits_class('native', d):
142 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
143 elif bb.data.inherits_class('crosssdk', d):
144 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
145 elif bb.data.inherits_class('cross', d):
146 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}"))
147 elif bb.data.inherits_class('nativesdk', d):
148 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
149 elif bb.data.inherits_class('cross-canadian', d):
150 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
151 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all":
152 d.setVar('SSTATE_PKGARCH', "allarch")
153 else:
154 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
155
156 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
157 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
158 d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
159 d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
160
161 unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
162 d.setVar('SSTATETASKS', " ".join(unique_tasks))
163 for task in unique_tasks:
164 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
165 d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
166 d.setVarFlag(task, 'network', '1')
167 d.setVarFlag(task + "_setscene", 'network', '1')
168}
169
170def sstate_init(task, d):
171 ss = {}
172 ss['task'] = task
173 ss['dirs'] = []
174 ss['plaindirs'] = []
175 ss['lockfiles'] = []
176 ss['lockfiles-shared'] = []
177 return ss
178
179def sstate_state_fromvars(d, task = None):
180 if task is None:
181 task = d.getVar('BB_CURRENTTASK')
182 if not task:
183 bb.fatal("sstate code running without task context?!")
184 task = task.replace("_setscene", "")
185
186 if task.startswith("do_"):
187 task = task[3:]
188 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split()
189 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split()
190 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split()
191 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split()
192 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split()
193 interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs') or "").split()
194 fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or ""
195 if not task or len(inputs) != len(outputs):
196 bb.fatal("sstate variables not setup correctly?!")
197
198 if task == "populate_lic":
199 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
200 d.setVar("SSTATE_EXTRAPATH", "")
201 d.setVar('SSTATE_EXTRAPATHWILDCARD', "")
202
203 ss = sstate_init(task, d)
204 for i in range(len(inputs)):
205 sstate_add(ss, inputs[i], outputs[i], d)
206 ss['lockfiles'] = lockfiles
207 ss['lockfiles-shared'] = lockfilesshared
208 ss['plaindirs'] = plaindirs
209 ss['interceptfuncs'] = interceptfuncs
210 ss['fixmedir'] = fixmedir
211 return ss
212
213def sstate_add(ss, source, dest, d):
214 if not source.endswith("/"):
215 source = source + "/"
216 if not dest.endswith("/"):
217 dest = dest + "/"
218 source = os.path.normpath(source)
219 dest = os.path.normpath(dest)
220 srcbase = os.path.basename(source)
221 ss['dirs'].append([srcbase, source, dest])
222 return ss
223
224def sstate_install(ss, d):
225 import oe.path
226 import oe.sstatesig
227 import subprocess
228
229 sharedfiles = []
230 shareddirs = []
231 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
232
233 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
234
235 manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
236
237 if os.access(manifest, os.R_OK):
238 bb.fatal("Package already staged (%s)?!" % manifest)
239
240 d.setVar("SSTATE_INST_POSTRM", manifest + ".postrm")
241
242 locks = []
243 for lock in ss['lockfiles-shared']:
244 locks.append(bb.utils.lockfile(lock, True))
245 for lock in ss['lockfiles']:
246 locks.append(bb.utils.lockfile(lock))
247
248 for state in ss['dirs']:
249 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
250 for walkroot, dirs, files in os.walk(state[1]):
251 for file in files:
252 srcpath = os.path.join(walkroot, file)
253 dstpath = srcpath.replace(state[1], state[2])
254 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
255 sharedfiles.append(dstpath)
256 for dir in dirs:
257 srcdir = os.path.join(walkroot, dir)
258 dstdir = srcdir.replace(state[1], state[2])
259 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
260 if os.path.islink(srcdir):
261 sharedfiles.append(dstdir)
262 continue
263 if not dstdir.endswith("/"):
264 dstdir = dstdir + "/"
265 shareddirs.append(dstdir)
266
267 # Check the file list for conflicts against files which already exist
268 overlap_allowed = (d.getVar("SSTATE_ALLOW_OVERLAP_FILES") or "").split()
269 match = []
270 for f in sharedfiles:
271 if os.path.exists(f) and not os.path.islink(f):
272 f = os.path.normpath(f)
273 realmatch = True
274 for w in overlap_allowed:
275 w = os.path.normpath(w)
276 if f.startswith(w):
277 realmatch = False
278 break
279 if realmatch:
280 match.append(f)
281 sstate_search_cmd = "grep -rlF '%s' %s --exclude=master.list | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
282 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
283 if search_output:
284 match.append(" (matched in %s)" % search_output.decode('utf-8').rstrip())
285 else:
286 match.append(" (not matched to any task)")
287 if match:
288 bb.error("The recipe %s is trying to install files into a shared " \
289 "area when those files already exist. Those files and their manifest " \
290 "location are:\n %s\nPlease verify which recipe should provide the " \
291 "above files.\n\nThe build has stopped, as continuing in this scenario WILL " \
292 "break things - if not now, possibly in the future (we've seen builds fail " \
293 "several months later). If the system knew how to recover from this " \
294 "automatically it would, however there are several different scenarios " \
295 "which can result in this and we don't know which one this is. It may be " \
296 "you have switched providers of something like virtual/kernel (e.g. from " \
297 "linux-yocto to linux-yocto-dev), in that case you need to execute the " \
298 "clean task for both recipes and it will resolve this error. It may be " \
299 "you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \
300 "those recipes should again resolve this error, however switching " \
301 "DISTRO_FEATURES on an existing build directory is not supported - you " \
302 "should really clean out tmp and rebuild (reusing sstate should be safe). " \
303 "It could be the overlapping files detected are harmless in which case " \
304 "adding them to SSTATE_ALLOW_OVERLAP_FILES may be the correct solution. It could " \
305 "also be your build is including two different conflicting versions of " \
306 "things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
307 "be to resolve the conflict. If in doubt, please ask on the mailing list, " \
308 "sharing the error and filelist above." % \
309 (d.getVar('PN'), "\n ".join(match)))
310 bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.")
311
312 if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"):
313 sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd")
314 sharedfiles.append(ss['fixmedir'] + "/fixmepath")
315
316 # Write out the manifest
317 f = open(manifest, "w")
318 for file in sharedfiles:
319 f.write(file + "\n")
320
321 # We want to ensure that directories appear at the end of the manifest
322 # so that when we test to see if they should be deleted any contents
323 # added by the task will have been removed first.
324 dirs = sorted(shareddirs, key=len)
325 # Must remove children first, which will have a longer path than the parent
326 for di in reversed(dirs):
327 f.write(di + "\n")
328 f.close()
329
330 # Append to the list of manifests for this PACKAGE_ARCH
331
332 i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
333 l = bb.utils.lockfile(i + ".lock")
334 filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n"
335 manifests = []
336 if os.path.exists(i):
337 with open(i, "r") as f:
338 manifests = f.readlines()
339 # We append new entries, we don't remove older entries which may have the same
340 # manifest name but different versions from stamp/workdir. See below.
341 if filedata not in manifests:
342 with open(i, "a+") as f:
343 f.write(filedata)
344 bb.utils.unlockfile(l)
345
346 # Run the actual file install
347 for state in ss['dirs']:
348 if os.path.exists(state[1]):
349 oe.path.copyhardlinktree(state[1], state[2])
350
351 for postinst in (d.getVar('SSTATEPOSTINSTFUNCS') or '').split():
352 # All hooks should run in the SSTATE_INSTDIR
353 bb.build.exec_func(postinst, d, (sstateinst,))
354
355 for lock in locks:
356 bb.utils.unlockfile(lock)
357
358sstate_install[vardepsexclude] += "SSTATE_ALLOW_OVERLAP_FILES STATE_MANMACH SSTATE_MANFILEPREFIX"
359sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
360
361def sstate_installpkg(ss, d):
362 from oe.gpg_sign import get_signer
363
364 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
365 d.setVar("SSTATE_CURRTASK", ss['task'])
366 sstatefetch = d.getVar('SSTATE_PKGNAME')
367 sstatepkg = d.getVar('SSTATE_PKG')
Patrick Williams864cc432023-02-09 14:54:44 -0600368 verify_sig = bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False)
Patrick Williams92b42cb2022-09-03 06:53:57 -0500369
Patrick Williams864cc432023-02-09 14:54:44 -0600370 if not os.path.exists(sstatepkg) or (verify_sig and not os.path.exists(sstatepkg + '.sig')):
Patrick Williams92b42cb2022-09-03 06:53:57 -0500371 pstaging_fetch(sstatefetch, d)
372
373 if not os.path.isfile(sstatepkg):
374 bb.note("Sstate package %s does not exist" % sstatepkg)
375 return False
376
377 sstate_clean(ss, d)
378
379 d.setVar('SSTATE_INSTDIR', sstateinst)
380
Patrick Williams864cc432023-02-09 14:54:44 -0600381 if verify_sig:
Patrick Williams92b42cb2022-09-03 06:53:57 -0500382 if not os.path.isfile(sstatepkg + '.sig'):
383 bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
384 return False
385 signer = get_signer(d, 'local')
386 if not signer.verify(sstatepkg + '.sig', d.getVar("SSTATE_VALID_SIGS")):
387 bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
388 return False
389
390 # Empty sstateinst directory, ensure its clean
391 if os.path.exists(sstateinst):
392 oe.path.remove(sstateinst)
393 bb.utils.mkdirhier(sstateinst)
394
395 sstateinst = d.getVar("SSTATE_INSTDIR")
396 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
397
398 for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']:
399 # All hooks should run in the SSTATE_INSTDIR
400 bb.build.exec_func(f, d, (sstateinst,))
401
402 return sstate_installpkgdir(ss, d)
403
404def sstate_installpkgdir(ss, d):
405 import oe.path
406 import subprocess
407
408 sstateinst = d.getVar("SSTATE_INSTDIR")
409 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
410
411 for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split():
412 # All hooks should run in the SSTATE_INSTDIR
413 bb.build.exec_func(f, d, (sstateinst,))
414
415 def prepdir(dir):
416 # remove dir if it exists, ensure any parent directories do exist
417 if os.path.exists(dir):
418 oe.path.remove(dir)
419 bb.utils.mkdirhier(dir)
420 oe.path.remove(dir)
421
422 for state in ss['dirs']:
423 prepdir(state[1])
424 bb.utils.rename(sstateinst + state[0], state[1])
425 sstate_install(ss, d)
426
427 for plain in ss['plaindirs']:
428 workdir = d.getVar('WORKDIR')
429 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
430 src = sstateinst + "/" + plain.replace(workdir, '')
431 if sharedworkdir in plain:
432 src = sstateinst + "/" + plain.replace(sharedworkdir, '')
433 dest = plain
434 bb.utils.mkdirhier(src)
435 prepdir(dest)
436 bb.utils.rename(src, dest)
437
438 return True
439
440python sstate_hardcode_path_unpack () {
441 # Fixup hardcoded paths
442 #
443 # Note: The logic below must match the reverse logic in
444 # sstate_hardcode_path(d)
445 import subprocess
446
447 sstateinst = d.getVar('SSTATE_INSTDIR')
448 sstatefixmedir = d.getVar('SSTATE_FIXMEDIR')
449 fixmefn = sstateinst + "fixmepath"
450 if os.path.isfile(fixmefn):
451 staging_target = d.getVar('RECIPE_SYSROOT')
452 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
453
454 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
455 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
456 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
457 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host)
458 else:
459 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target)
460
461 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
462 for fixmevar in extra_staging_fixmes.split():
463 fixme_path = d.getVar(fixmevar)
464 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
465
466 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
467 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
468
469 # Defer do_populate_sysroot relocation command
470 if sstatefixmedir:
471 bb.utils.mkdirhier(sstatefixmedir)
472 with open(sstatefixmedir + "/fixmepath.cmd", "w") as f:
473 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath")
474 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST")
475 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST")
476 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET")
477 f.write(sstate_hardcode_cmd)
478 bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath")
479 return
480
481 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
482 subprocess.check_call(sstate_hardcode_cmd, shell=True)
483
484 # Need to remove this or we'd copy it into the target directory and may
485 # conflict with another writer
486 os.remove(fixmefn)
487}
488
489def sstate_clean_cachefile(ss, d):
490 import oe.path
491
492 if d.getVarFlag('do_%s' % ss['task'], 'task'):
493 d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
494 sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
495 bb.note("Removing %s" % sstatepkgfile)
496 oe.path.remove(sstatepkgfile)
497
498def sstate_clean_cachefiles(d):
499 for task in (d.getVar('SSTATETASKS') or "").split():
500 ld = d.createCopy()
501 ss = sstate_state_fromvars(ld, task)
502 sstate_clean_cachefile(ss, ld)
503
504def sstate_clean_manifest(manifest, d, canrace=False, prefix=None):
505 import oe.path
506
507 mfile = open(manifest)
508 entries = mfile.readlines()
509 mfile.close()
510
511 for entry in entries:
512 entry = entry.strip()
513 if prefix and not entry.startswith("/"):
514 entry = prefix + "/" + entry
515 bb.debug(2, "Removing manifest: %s" % entry)
516 # We can race against another package populating directories as we're removing them
517 # so we ignore errors here.
518 try:
519 if entry.endswith("/"):
520 if os.path.islink(entry[:-1]):
521 os.remove(entry[:-1])
522 elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace:
523 # Removing directories whilst builds are in progress exposes a race. Only
524 # do it in contexts where it is safe to do so.
525 os.rmdir(entry[:-1])
526 else:
527 os.remove(entry)
528 except OSError:
529 pass
530
531 postrm = manifest + ".postrm"
532 if os.path.exists(manifest + ".postrm"):
533 import subprocess
534 os.chmod(postrm, 0o755)
535 subprocess.check_call(postrm, shell=True)
536 oe.path.remove(postrm)
537
538 oe.path.remove(manifest)
539
540def sstate_clean(ss, d):
541 import oe.path
542 import glob
543
544 d2 = d.createCopy()
545 stamp_clean = d.getVar("STAMPCLEAN")
546 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info')
547 if extrainf:
548 d2.setVar("SSTATE_MANMACH", extrainf)
549 wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
550 else:
551 wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
552
553 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
554
555 if os.path.exists(manifest):
556 locks = []
557 for lock in ss['lockfiles-shared']:
558 locks.append(bb.utils.lockfile(lock))
559 for lock in ss['lockfiles']:
560 locks.append(bb.utils.lockfile(lock))
561
562 sstate_clean_manifest(manifest, d, canrace=True)
563
564 for lock in locks:
565 bb.utils.unlockfile(lock)
566
567 # Remove the current and previous stamps, but keep the sigdata.
568 #
569 # The glob() matches do_task* which may match multiple tasks, for
570 # example: do_package and do_package_write_ipk, so we need to
571 # exactly match *.do_task.* and *.do_task_setscene.*
572 rm_stamp = '.do_%s.' % ss['task']
573 rm_setscene = '.do_%s_setscene.' % ss['task']
574 # For BB_SIGNATURE_HANDLER = "noop"
575 rm_nohash = ".do_%s" % ss['task']
576 for stfile in glob.glob(wildcard_stfile):
577 # Keep the sigdata
578 if ".sigdata." in stfile or ".sigbasedata." in stfile:
579 continue
580 # Preserve taint files in the stamps directory
581 if stfile.endswith('.taint'):
582 continue
583 if rm_stamp in stfile or rm_setscene in stfile or \
584 stfile.endswith(rm_nohash):
585 oe.path.remove(stfile)
586
587sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
588
589CLEANFUNCS += "sstate_cleanall"
590
591python sstate_cleanall() {
592 bb.note("Removing shared state for package %s" % d.getVar('PN'))
593
594 manifest_dir = d.getVar('SSTATE_MANIFESTS')
595 if not os.path.exists(manifest_dir):
596 return
597
598 tasks = d.getVar('SSTATETASKS').split()
599 for name in tasks:
600 ld = d.createCopy()
601 shared_state = sstate_state_fromvars(ld, name)
602 sstate_clean(shared_state, ld)
603}
604
605python sstate_hardcode_path () {
606 import subprocess, platform
607
608 # Need to remove hardcoded paths and fix these when we install the
609 # staging packages.
610 #
611 # Note: the logic in this function needs to match the reverse logic
612 # in sstate_installpkg(ss, d)
613
614 staging_target = d.getVar('RECIPE_SYSROOT')
615 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
616 sstate_builddir = d.getVar('SSTATE_BUILDDIR')
617
618 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host
619 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
620 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
621 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
622 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
623 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
624 else:
625 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
626 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
627
628 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
629 for fixmevar in extra_staging_fixmes.split():
630 fixme_path = d.getVar(fixmevar)
631 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
632 sstate_grep_cmd += " -e '%s'" % (fixme_path)
633
634 fixmefn = sstate_builddir + "fixmepath"
635
636 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD')
637 sstate_filelist_cmd = "tee %s" % (fixmefn)
638
639 # fixmepath file needs relative paths, drop sstate_builddir prefix
640 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
641
642 xargs_no_empty_run_cmd = '--no-run-if-empty'
643 if platform.system() == 'Darwin':
644 xargs_no_empty_run_cmd = ''
645
646 # Limit the fixpaths and sed operations based on the initial grep search
647 # This has the side effect of making sure the vfs cache is hot
648 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
649
650 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
651 subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir)
652
653 # If the fixmefn is empty, remove it..
654 if os.stat(fixmefn).st_size == 0:
655 os.remove(fixmefn)
656 else:
657 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
658 subprocess.check_output(sstate_filelist_relative_cmd, shell=True)
659}
660
661def sstate_package(ss, d):
662 import oe.path
663 import time
664
665 tmpdir = d.getVar('TMPDIR')
666
667 fixtime = False
668 if ss['task'] == "package":
669 fixtime = True
670
671 def fixtimestamp(root, path):
672 f = os.path.join(root, path)
673 if os.lstat(f).st_mtime > sde:
674 os.utime(f, (sde, sde), follow_symlinks=False)
675
676 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
677 sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
678 d.setVar("SSTATE_CURRTASK", ss['task'])
679 bb.utils.remove(sstatebuild, recurse=True)
680 bb.utils.mkdirhier(sstatebuild)
681 for state in ss['dirs']:
682 if not os.path.exists(state[1]):
683 continue
684 srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
685 # Find and error for absolute symlinks. We could attempt to relocate but its not
686 # clear where the symlink is relative to in this context. We could add that markup
687 # to sstate tasks but there aren't many of these so better just avoid them entirely.
688 for walkroot, dirs, files in os.walk(state[1]):
689 for file in files + dirs:
690 if fixtime:
691 fixtimestamp(walkroot, file)
692 srcpath = os.path.join(walkroot, file)
693 if not os.path.islink(srcpath):
694 continue
695 link = os.readlink(srcpath)
696 if not os.path.isabs(link):
697 continue
698 if not link.startswith(tmpdir):
699 continue
700 bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
701 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
702 bb.utils.rename(state[1], sstatebuild + state[0])
703
704 workdir = d.getVar('WORKDIR')
705 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
706 for plain in ss['plaindirs']:
707 pdir = plain.replace(workdir, sstatebuild)
708 if sharedworkdir in plain:
709 pdir = plain.replace(sharedworkdir, sstatebuild)
710 bb.utils.mkdirhier(plain)
711 bb.utils.mkdirhier(pdir)
712 bb.utils.rename(plain, pdir)
713 if fixtime:
714 fixtimestamp(pdir, "")
715 for walkroot, dirs, files in os.walk(pdir):
716 for file in files + dirs:
717 fixtimestamp(walkroot, file)
718
719 d.setVar('SSTATE_BUILDDIR', sstatebuild)
720 d.setVar('SSTATE_INSTDIR', sstatebuild)
721
722 if d.getVar('SSTATE_SKIP_CREATION') == '1':
723 return
724
725 sstate_create_package = ['sstate_report_unihash', 'sstate_create_package']
726 if d.getVar('SSTATE_SIG_KEY'):
727 sstate_create_package.append('sstate_sign_package')
728
729 for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
730 sstate_create_package + \
731 (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
732 # All hooks should run in SSTATE_BUILDDIR.
733 bb.build.exec_func(f, d, (sstatebuild,))
734
735 # SSTATE_PKG may have been changed by sstate_report_unihash
736 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
737 if not os.path.exists(siginfo):
738 bb.siggen.dump_this_task(siginfo, d)
739 else:
740 try:
741 os.utime(siginfo, None)
742 except PermissionError:
743 pass
744 except OSError as e:
745 # Handle read-only file systems gracefully
746 import errno
747 if e.errno != errno.EROFS:
748 raise e
749
750 return
751
752sstate_package[vardepsexclude] += "SSTATE_SIG_KEY"
753
754def pstaging_fetch(sstatefetch, d):
755 import bb.fetch2
756
757 # Only try and fetch if the user has configured a mirror
758 mirrors = d.getVar('SSTATE_MIRRORS')
759 if not mirrors:
760 return
761
762 # Copy the data object and override DL_DIR and SRC_URI
763 localdata = bb.data.createCopy(d)
764
765 dldir = localdata.expand("${SSTATE_DIR}")
766 bb.utils.mkdirhier(dldir)
767
768 localdata.delVar('MIRRORS')
769 localdata.setVar('FILESPATH', dldir)
770 localdata.setVar('DL_DIR', dldir)
771 localdata.setVar('PREMIRRORS', mirrors)
Patrick Williams92b42cb2022-09-03 06:53:57 -0500772
773 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
774 # we'll want to allow network access for the current set of fetches.
775 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
776 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
777 localdata.delVar('BB_NO_NETWORK')
778
779 # Try a fetch from the sstate mirror, if it fails just return and
780 # we will build the package
781 uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
782 'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
783 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
784 uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
785
786 for srcuri in uris:
Andrew Geissler87f5cff2022-09-30 13:13:31 -0500787 localdata.delVar('SRC_URI')
Patrick Williams92b42cb2022-09-03 06:53:57 -0500788 localdata.setVar('SRC_URI', srcuri)
789 try:
790 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
791 fetcher.checkstatus()
792 fetcher.download()
793
794 except bb.fetch2.BBFetchException:
795 pass
796
Patrick Williams92b42cb2022-09-03 06:53:57 -0500797def sstate_setscene(d):
798 shared_state = sstate_state_fromvars(d)
799 accelerate = sstate_installpkg(shared_state, d)
800 if not accelerate:
801 msg = "No sstate archive obtainable, will run full task instead."
802 bb.warn(msg)
803 raise bb.BBHandledException(msg)
804
805python sstate_task_prefunc () {
806 shared_state = sstate_state_fromvars(d)
807 sstate_clean(shared_state, d)
808}
809sstate_task_prefunc[dirs] = "${WORKDIR}"
810
811python sstate_task_postfunc () {
812 shared_state = sstate_state_fromvars(d)
813
814 for intercept in shared_state['interceptfuncs']:
815 bb.build.exec_func(intercept, d, (d.getVar("WORKDIR"),))
816
817 omask = os.umask(0o002)
818 if omask != 0o002:
819 bb.note("Using umask 0o002 (not %0o) for sstate packaging" % omask)
820 sstate_package(shared_state, d)
821 os.umask(omask)
822
823 sstateinst = d.getVar("SSTATE_INSTDIR")
824 d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir'])
825
826 sstate_installpkgdir(shared_state, d)
827
828 bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True)
829}
830sstate_task_postfunc[dirs] = "${WORKDIR}"
831
832
833#
834# Shell function to generate a sstate package from a directory
835# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
836#
837sstate_create_package () {
838 # Exit early if it already exists
839 if [ -e ${SSTATE_PKG} ]; then
840 touch ${SSTATE_PKG} 2>/dev/null || true
841 return
842 fi
843
844 mkdir --mode=0775 -p `dirname ${SSTATE_PKG}`
845 TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
846
847 OPT="-cS"
848 ZSTD="zstd -${SSTATE_ZSTD_CLEVEL} -T${ZSTD_THREADS}"
849 # Use pzstd if available
850 if [ -x "$(command -v pzstd)" ]; then
851 ZSTD="pzstd -${SSTATE_ZSTD_CLEVEL} -p ${ZSTD_THREADS}"
852 fi
853
854 # Need to handle empty directories
855 if [ "$(ls -A)" ]; then
856 set +e
857 tar -I "$ZSTD" $OPT -f $TFILE *
858 ret=$?
859 if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
860 exit 1
861 fi
862 set -e
863 else
864 tar -I "$ZSTD" $OPT --file=$TFILE --files-from=/dev/null
865 fi
866 chmod 0664 $TFILE
867 # Skip if it was already created by some other process
868 if [ -h ${SSTATE_PKG} ] && [ ! -e ${SSTATE_PKG} ]; then
869 # There is a symbolic link, but it links to nothing.
870 # Forcefully replace it with the new file.
871 ln -f $TFILE ${SSTATE_PKG} || true
872 elif [ ! -e ${SSTATE_PKG} ]; then
873 # Move into place using ln to attempt an atomic op.
874 # Abort if it already exists
875 ln $TFILE ${SSTATE_PKG} || true
876 else
877 touch ${SSTATE_PKG} 2>/dev/null || true
878 fi
879 rm $TFILE
880}
881
882python sstate_sign_package () {
883 from oe.gpg_sign import get_signer
884
885
886 signer = get_signer(d, 'local')
887 sstate_pkg = d.getVar('SSTATE_PKG')
888 if os.path.exists(sstate_pkg + '.sig'):
889 os.unlink(sstate_pkg + '.sig')
890 signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
891 d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
892}
893
894python sstate_report_unihash() {
895 report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
896
897 if report_unihash:
898 ss = sstate_state_fromvars(d)
899 report_unihash(os.getcwd(), ss['task'], d)
900}
901
902#
903# Shell function to decompress and prepare a package for installation
904# Will be run from within SSTATE_INSTDIR.
905#
906sstate_unpack_package () {
907 ZSTD="zstd -T${ZSTD_THREADS}"
908 # Use pzstd if available
909 if [ -x "$(command -v pzstd)" ]; then
910 ZSTD="pzstd -p ${ZSTD_THREADS}"
911 fi
912
913 tar -I "$ZSTD" -xvpf ${SSTATE_PKG}
914 # update .siginfo atime on local/NFS mirror if it is a symbolic link
915 [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
916 # update each symbolic link instead of any referenced file
917 touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
918 [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
919 [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
920}
921
922BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
923
924def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
Andrew Geissler517393d2023-01-13 08:55:19 -0600925 import itertools
926
Patrick Williams92b42cb2022-09-03 06:53:57 -0500927 found = set()
928 missed = set()
929
930 def gethash(task):
931 return sq_data['unihash'][task]
932
933 def getpathcomponents(task, d):
934 # Magic data from BB_HASHFILENAME
935 splithashfn = sq_data['hashfn'][task].split(" ")
936 spec = splithashfn[1]
937 if splithashfn[0] == "True":
938 extrapath = d.getVar("NATIVELSBSTRING") + "/"
939 else:
940 extrapath = ""
941
942 tname = bb.runqueue.taskname_from_tid(task)[3:]
943
944 if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
945 spec = splithashfn[2]
946 extrapath = ""
947
948 return spec, extrapath, tname
949
950 def getsstatefile(tid, siginfo, d):
951 spec, extrapath, tname = getpathcomponents(tid, d)
952 return extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d)
953
954 for tid in sq_data['hash']:
955
956 sstatefile = d.expand("${SSTATE_DIR}/" + getsstatefile(tid, siginfo, d))
957
958 if os.path.exists(sstatefile):
959 found.add(tid)
960 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
961 else:
962 missed.add(tid)
963 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
964
965 foundLocal = len(found)
966 mirrors = d.getVar("SSTATE_MIRRORS")
967 if mirrors:
968 # Copy the data object and override DL_DIR and SRC_URI
969 localdata = bb.data.createCopy(d)
970
971 dldir = localdata.expand("${SSTATE_DIR}")
972 localdata.delVar('MIRRORS')
973 localdata.setVar('FILESPATH', dldir)
974 localdata.setVar('DL_DIR', dldir)
975 localdata.setVar('PREMIRRORS', mirrors)
976
977 bb.debug(2, "SState using premirror of: %s" % mirrors)
978
979 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
980 # we'll want to allow network access for the current set of fetches.
981 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
982 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
983 localdata.delVar('BB_NO_NETWORK')
984
985 from bb.fetch2 import FetchConnectionCache
986 def checkstatus_init():
987 while not connection_cache_pool.full():
988 connection_cache_pool.put(FetchConnectionCache())
989
990 def checkstatus_end():
991 while not connection_cache_pool.empty():
992 connection_cache = connection_cache_pool.get()
993 connection_cache.close_connections()
994
995 def checkstatus(arg):
996 (tid, sstatefile) = arg
997
998 connection_cache = connection_cache_pool.get()
999 localdata2 = bb.data.createCopy(localdata)
1000 srcuri = "file://" + sstatefile
1001 localdata2.setVar('SRC_URI', srcuri)
1002 bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
1003
1004 import traceback
1005
1006 try:
1007 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
1008 connection_cache=connection_cache)
1009 fetcher.checkstatus()
1010 bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
1011 found.add(tid)
1012 missed.remove(tid)
1013 except bb.fetch2.FetchError as e:
1014 bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)\n%s" % (srcuri, repr(e), traceback.format_exc()))
1015 except Exception as e:
1016 bb.error("SState: cannot test %s: %s\n%s" % (srcuri, repr(e), traceback.format_exc()))
1017
1018 connection_cache_pool.put(connection_cache)
1019
1020 if progress:
Andrew Geissler517393d2023-01-13 08:55:19 -06001021 bb.event.fire(bb.event.ProcessProgress(msg, next(cnt_tasks_done)), d)
Andrew Geissler6aa7eec2023-03-03 12:41:14 -06001022 bb.event.check_for_interrupts(d)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001023
1024 tasklist = []
1025 for tid in missed:
1026 sstatefile = d.expand(getsstatefile(tid, siginfo, d))
1027 tasklist.append((tid, sstatefile))
1028
1029 if tasklist:
1030 nproc = min(int(d.getVar("BB_NUMBER_THREADS")), len(tasklist))
1031
Andrew Geissler517393d2023-01-13 08:55:19 -06001032 ## thread-safe counter
1033 cnt_tasks_done = itertools.count(start = 1)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001034 progress = len(tasklist) >= 100
1035 if progress:
1036 msg = "Checking sstate mirror object availability"
1037 bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
1038
1039 # Have to setup the fetcher environment here rather than in each thread as it would race
1040 fetcherenv = bb.fetch2.get_fetcher_environment(d)
1041 with bb.utils.environment(**fetcherenv):
1042 bb.event.enable_threadlock()
1043 import concurrent.futures
1044 from queue import Queue
1045 connection_cache_pool = Queue(nproc)
1046 checkstatus_init()
1047 with concurrent.futures.ThreadPoolExecutor(max_workers=nproc) as executor:
1048 executor.map(checkstatus, tasklist.copy())
1049 checkstatus_end()
1050 bb.event.disable_threadlock()
1051
1052 if progress:
1053 bb.event.fire(bb.event.ProcessFinished(msg), d)
1054
1055 inheritlist = d.getVar("INHERIT")
1056 if "toaster" in inheritlist:
1057 evdata = {'missed': [], 'found': []};
1058 for tid in missed:
1059 sstatefile = d.expand(getsstatefile(tid, False, d))
1060 evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1061 for tid in found:
1062 sstatefile = d.expand(getsstatefile(tid, False, d))
1063 evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1064 bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
1065
1066 if summary:
1067 # Print some summary statistics about the current task completion and how much sstate
1068 # reuse there was. Avoid divide by zero errors.
1069 total = len(sq_data['hash'])
1070 complete = 0
1071 if currentcount:
1072 complete = (len(found) + currentcount) / (total + currentcount) * 100
1073 match = 0
1074 if total:
1075 match = len(found) / total * 100
1076 bb.plain("Sstate summary: Wanted %d Local %d Mirrors %d Missed %d Current %d (%d%% match, %d%% complete)" %
1077 (total, foundLocal, len(found)-foundLocal, len(missed), currentcount, match, complete))
1078
1079 if hasattr(bb.parse.siggen, "checkhashes"):
1080 bb.parse.siggen.checkhashes(sq_data, missed, found, d)
1081
1082 return found
1083setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT"
1084
1085BB_SETSCENE_DEPVALID = "setscene_depvalid"
1086
1087def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
1088 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
1089 # task is included in taskdependees too
1090 # Return - False - We need this dependency
1091 # - True - We can skip this dependency
1092 import re
1093
1094 def logit(msg, log):
1095 if log is not None:
1096 log.append(msg)
1097 else:
1098 bb.debug(2, msg)
1099
1100 logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
1101
Patrick Williams7784c422022-11-17 07:29:11 -06001102 directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx", "do_deploy_archives"]
Patrick Williams92b42cb2022-09-03 06:53:57 -05001103
1104 def isNativeCross(x):
1105 return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
1106
1107 # We only need to trigger deploy_source_date_epoch through direct dependencies
1108 if taskdependees[task][1] in directtasks:
1109 return True
1110
1111 # We only need to trigger packagedata through direct dependencies
1112 # but need to preserve packagedata on packagedata links
1113 if taskdependees[task][1] == "do_packagedata":
1114 for dep in taskdependees:
1115 if taskdependees[dep][1] == "do_packagedata":
1116 return False
1117 return True
1118
1119 for dep in taskdependees:
1120 logit(" considering dependency: %s" % (str(taskdependees[dep])), log)
1121 if task == dep:
1122 continue
1123 if dep in notneeded:
1124 continue
1125 # do_package_write_* and do_package doesn't need do_package
1126 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
1127 continue
1128 # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
1129 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
1130 return False
1131 # do_package/packagedata/package_qa/deploy don't need do_populate_sysroot
1132 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa', 'do_deploy']:
1133 continue
1134 # Native/Cross packages don't exist and are noexec anyway
1135 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
1136 continue
1137
1138 # This is due to the [depends] in useradd.bbclass complicating matters
1139 # The logic *is* reversed here due to the way hard setscene dependencies are injected
1140 if (taskdependees[task][1] == 'do_package' or taskdependees[task][1] == 'do_populate_sysroot') and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
1141 continue
1142
1143 # Consider sysroot depending on sysroot tasks
1144 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
1145 # Allow excluding certain recursive dependencies. If a recipe needs it should add a
1146 # specific dependency itself, rather than relying on one of its dependees to pull
1147 # them in.
1148 # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html
1149 not_needed = False
1150 excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT')
1151 if excludedeps is None:
1152 # Cache the regular expressions for speed
1153 excludedeps = []
1154 for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split():
1155 excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1])))
1156 d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps)
1157 for excl in excludedeps:
1158 if excl[0].match(taskdependees[dep][0]):
1159 if excl[1].match(taskdependees[task][0]):
1160 not_needed = True
1161 break
1162 if not_needed:
1163 continue
1164 # For meta-extsdk-toolchain we want all sysroot dependencies
1165 if taskdependees[dep][0] == 'meta-extsdk-toolchain':
1166 return False
1167 # Native/Cross populate_sysroot need their dependencies
1168 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
1169 return False
1170 # Target populate_sysroot depended on by cross tools need to be installed
1171 if isNativeCross(taskdependees[dep][0]):
1172 return False
1173 # Native/cross tools depended upon by target sysroot are not needed
1174 # Add an exception for shadow-native as required by useradd.bbclass
1175 if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
1176 continue
1177 # Target populate_sysroot need their dependencies
1178 return False
1179
1180 if taskdependees[dep][1] in directtasks:
1181 continue
1182
1183 # Safe fallthrough default
1184 logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
1185 return False
1186 return True
1187
1188addhandler sstate_eventhandler
1189sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
1190python sstate_eventhandler() {
1191 d = e.data
1192 writtensstate = d.getVar('SSTATE_CURRTASK')
1193 if not writtensstate:
1194 taskname = d.getVar("BB_RUNTASK")[3:]
1195 spec = d.getVar('SSTATE_PKGSPEC')
1196 swspec = d.getVar('SSTATE_SWSPEC')
1197 if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
1198 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
1199 d.setVar("SSTATE_EXTRAPATH", "")
1200 d.setVar("SSTATE_CURRTASK", taskname)
1201 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
1202 if not os.path.exists(siginfo):
1203 bb.siggen.dump_this_task(siginfo, d)
1204 else:
1205 try:
1206 os.utime(siginfo, None)
1207 except PermissionError:
1208 pass
1209 except OSError as e:
1210 # Handle read-only file systems gracefully
1211 import errno
1212 if e.errno != errno.EROFS:
1213 raise e
1214
1215}
1216
1217SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
1218
1219#
1220# Event handler which removes manifests and stamps file for recipes which are no
1221# longer 'reachable' in a build where they once were. 'Reachable' refers to
1222# whether a recipe is parsed so recipes in a layer which was removed would no
1223# longer be reachable. Switching between systemd and sysvinit where recipes
1224# became skipped would be another example.
1225#
1226# Also optionally removes the workdir of those tasks/recipes
1227#
1228addhandler sstate_eventhandler_reachablestamps
1229sstate_eventhandler_reachablestamps[eventmask] = "bb.event.ReachableStamps"
1230python sstate_eventhandler_reachablestamps() {
1231 import glob
1232 d = e.data
1233 stamps = e.stamps.values()
1234 removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
1235 preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps')
1236 preservestamps = []
1237 if os.path.exists(preservestampfile):
1238 with open(preservestampfile, 'r') as f:
1239 preservestamps = f.readlines()
1240 seen = []
1241
1242 # The machine index contains all the stamps this machine has ever seen in this build directory.
1243 # We should only remove things which this machine once accessed but no longer does.
1244 machineindex = set()
1245 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1246 mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}")
1247 if os.path.exists(mi):
1248 with open(mi, "r") as f:
1249 machineindex = set(line.strip() for line in f.readlines())
1250
1251 for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
1252 toremove = []
1253 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1254 if not os.path.exists(i):
1255 continue
1256 manseen = set()
1257 ignore = []
1258 with open(i, "r") as f:
1259 lines = f.readlines()
1260 for l in reversed(lines):
1261 try:
1262 (stamp, manifest, workdir) = l.split()
1263 # The index may have multiple entries for the same manifest as the code above only appends
1264 # new entries and there may be an entry with matching manifest but differing version in stamp/workdir.
1265 # The last entry in the list is the valid one, any earlier entries with matching manifests
1266 # should be ignored.
1267 if manifest in manseen:
1268 ignore.append(l)
1269 continue
1270 manseen.add(manifest)
1271 if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
1272 toremove.append(l)
1273 if stamp not in seen:
1274 bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
1275 seen.append(stamp)
1276 except ValueError:
1277 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1278
1279 if toremove:
1280 msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
1281 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1282
1283 removed = 0
1284 for r in toremove:
1285 (stamp, manifest, workdir) = r.split()
1286 for m in glob.glob(manifest + ".*"):
1287 if m.endswith(".postrm"):
1288 continue
1289 sstate_clean_manifest(m, d)
1290 bb.utils.remove(stamp + "*")
1291 if removeworkdir:
1292 bb.utils.remove(workdir, recurse = True)
1293 lines.remove(r)
1294 removed = removed + 1
1295 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
Andrew Geissler6aa7eec2023-03-03 12:41:14 -06001296 bb.event.check_for_interrupts(d)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001297
1298 bb.event.fire(bb.event.ProcessFinished(msg), d)
1299
1300 with open(i, "w") as f:
1301 for l in lines:
1302 if l in ignore:
1303 continue
1304 f.write(l)
1305 machineindex |= set(stamps)
1306 with open(mi, "w") as f:
1307 for l in machineindex:
1308 f.write(l + "\n")
1309
1310 if preservestamps:
1311 os.remove(preservestampfile)
1312}
1313
1314
1315#
1316# Bitbake can generate an event showing which setscene tasks are 'stale',
1317# i.e. which ones will be rerun. These are ones where a stamp file is present but
1318# it is stable (e.g. taskhash doesn't match). With that list we can go through
1319# the manifests for matching tasks and "uninstall" those manifests now. We do
1320# this now rather than mid build since the distribution of files between sstate
1321# objects may have changed, new tasks may run first and if those new tasks overlap
1322# with the stale tasks, we'd see overlapping files messages and failures. Thankfully
1323# removing these files is fast.
1324#
1325addhandler sstate_eventhandler_stalesstate
1326sstate_eventhandler_stalesstate[eventmask] = "bb.event.StaleSetSceneTasks"
1327python sstate_eventhandler_stalesstate() {
1328 d = e.data
1329 tasks = e.tasks
1330
1331 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1332
1333 for a in list(set(d.getVar("SSTATE_ARCHS").split())):
1334 toremove = []
1335 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1336 if not os.path.exists(i):
1337 continue
1338 with open(i, "r") as f:
1339 lines = f.readlines()
1340 for l in lines:
1341 try:
1342 (stamp, manifest, workdir) = l.split()
1343 for tid in tasks:
1344 for s in tasks[tid]:
1345 if s.startswith(stamp):
1346 taskname = bb.runqueue.taskname_from_tid(tid)[3:]
1347 manname = manifest + "." + taskname
1348 if os.path.exists(manname):
1349 bb.debug(2, "Sstate for %s is stale, removing related manifest %s" % (tid, manname))
1350 toremove.append((manname, tid, tasks[tid]))
1351 break
1352 except ValueError:
1353 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1354
1355 if toremove:
1356 msg = "Removing %d stale sstate objects for arch %s" % (len(toremove), a)
1357 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1358
1359 removed = 0
1360 for (manname, tid, stamps) in toremove:
1361 sstate_clean_manifest(manname, d)
1362 for stamp in stamps:
1363 bb.utils.remove(stamp)
1364 removed = removed + 1
1365 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
Andrew Geissler6aa7eec2023-03-03 12:41:14 -06001366 bb.event.check_for_interrupts(d)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001367
1368 bb.event.fire(bb.event.ProcessFinished(msg), d)
1369}