blob: 5b63582b5ae9f16ed2ad199459e0858916f37a7b [file] [log] [blame]
Patrick Williams92b42cb2022-09-03 06:53:57 -05001#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7SSTATE_VERSION = "10"
8
9SSTATE_ZSTD_CLEVEL ??= "8"
10
11SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
12SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
13
14def generate_sstatefn(spec, hash, taskname, siginfo, d):
15 if taskname is None:
16 return ""
17 extension = ".tar.zst"
18 # 8 chars reserved for siginfo
19 limit = 254 - 8
20 if siginfo:
21 limit = 254
22 extension = ".tar.zst.siginfo"
23 if not hash:
24 hash = "INVALID"
25 fn = spec + hash + "_" + taskname + extension
26 # If the filename is too long, attempt to reduce it
27 if len(fn) > limit:
28 components = spec.split(":")
29 # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
30 # 7 is for the separators
31 avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
32 components[2] = components[2][:avail]
33 components[3] = components[3][:avail]
34 components[4] = components[4][:avail]
35 spec = ":".join(components)
36 fn = spec + hash + "_" + taskname + extension
37 if len(fn) > limit:
38 bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
39 return hash[:2] + "/" + hash[2:4] + "/" + fn
40
41SSTATE_PKGARCH = "${PACKAGE_ARCH}"
42SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
43SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
44SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
45SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
46SSTATE_EXTRAPATH = ""
47SSTATE_EXTRAPATHWILDCARD = ""
48SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tar.zst*"
49
50# explicitly make PV to depend on evaluated value of PV variable
51PV[vardepvalue] = "${PV}"
52
53# We don't want the sstate to depend on things like the distro string
54# of the system, we let the sstate paths take care of this.
55SSTATE_EXTRAPATH[vardepvalue] = ""
56SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
57
58# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
59SSTATE_ALLOW_OVERLAP_FILES = "${DEPLOY_DIR}/licenses/"
60# Avoid docbook/sgml catalog warnings for now
61SSTATE_ALLOW_OVERLAP_FILES += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
62# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
63SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
64SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
65# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
66SSTATE_ALLOW_OVERLAP_FILES += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
67# Archive the sources for many architectures in one deploy folder
68SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}"
69# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
70SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/ovmf"
71SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/grub-efi"
72SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/systemd-boot"
73SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/microcode"
74
75SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
76SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
77SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
78SSTATE_HASHEQUIV_FILEMAP ?= " \
79 populate_sysroot:*/postinst-useradd-*:${TMPDIR} \
80 populate_sysroot:*/postinst-useradd-*:${COREBASE} \
81 populate_sysroot:*/postinst-useradd-*:regex-\s(PATH|PSEUDO_IGNORE_PATHS|HOME|LOGNAME|OMP_NUM_THREADS|USER)=.*\s \
82 populate_sysroot:*/crossscripts/*:${TMPDIR} \
83 populate_sysroot:*/crossscripts/*:${COREBASE} \
84 "
85
86BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
87
88SSTATE_ARCHS = " \
89 ${BUILD_ARCH} \
90 ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
91 ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
92 ${SDK_ARCH}_${SDK_OS} \
93 ${SDK_ARCH}_${PACKAGE_ARCH} \
94 allarch \
95 ${PACKAGE_ARCH} \
96 ${PACKAGE_EXTRA_ARCHS} \
97 ${MACHINE_ARCH}"
98SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
99
100SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
101
102SSTATECREATEFUNCS += "sstate_hardcode_path"
103SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
104SSTATEPOSTCREATEFUNCS = ""
105SSTATEPREINSTFUNCS = ""
106SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
107SSTATEPOSTINSTFUNCS = ""
108EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
109
110# Check whether sstate exists for tasks that support sstate and are in the
111# locked signatures file.
112SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error'
113
114# Check whether the task's computed hash matches the task's hash in the
115# locked signatures file.
116SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error"
117
118# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
119# not sign)
120SSTATE_SIG_KEY ?= ""
121SSTATE_SIG_PASSPHRASE ?= ""
122# Whether to verify the GnUPG signatures when extracting sstate archives
123SSTATE_VERIFY_SIG ?= "0"
124# List of signatures to consider valid.
125SSTATE_VALID_SIGS ??= ""
126SSTATE_VALID_SIGS[vardepvalue] = ""
127
128SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
129SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
130 the output hash for a task, which in turn is used to determine equivalency. \
131 "
132
133SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
134SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
135 hash equivalency server, such as PN, PV, taskname, etc. This information \
136 is very useful for developers looking at task data, but may leak sensitive \
137 data if the equivalence server is public. \
138 "
139
140python () {
141 if bb.data.inherits_class('native', d):
142 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
143 elif bb.data.inherits_class('crosssdk', d):
144 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
145 elif bb.data.inherits_class('cross', d):
146 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}"))
147 elif bb.data.inherits_class('nativesdk', d):
148 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
149 elif bb.data.inherits_class('cross-canadian', d):
150 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
151 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all":
152 d.setVar('SSTATE_PKGARCH', "allarch")
153 else:
154 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
155
156 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
157 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
158 d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
159 d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
160
161 unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
162 d.setVar('SSTATETASKS', " ".join(unique_tasks))
163 for task in unique_tasks:
164 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
165 d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
166 d.setVarFlag(task, 'network', '1')
167 d.setVarFlag(task + "_setscene", 'network', '1')
168}
169
170def sstate_init(task, d):
171 ss = {}
172 ss['task'] = task
173 ss['dirs'] = []
174 ss['plaindirs'] = []
175 ss['lockfiles'] = []
176 ss['lockfiles-shared'] = []
177 return ss
178
179def sstate_state_fromvars(d, task = None):
180 if task is None:
181 task = d.getVar('BB_CURRENTTASK')
182 if not task:
183 bb.fatal("sstate code running without task context?!")
184 task = task.replace("_setscene", "")
185
186 if task.startswith("do_"):
187 task = task[3:]
188 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split()
189 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split()
190 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split()
191 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split()
192 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split()
193 interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs') or "").split()
194 fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or ""
195 if not task or len(inputs) != len(outputs):
196 bb.fatal("sstate variables not setup correctly?!")
197
198 if task == "populate_lic":
199 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
200 d.setVar("SSTATE_EXTRAPATH", "")
201 d.setVar('SSTATE_EXTRAPATHWILDCARD', "")
202
203 ss = sstate_init(task, d)
204 for i in range(len(inputs)):
205 sstate_add(ss, inputs[i], outputs[i], d)
206 ss['lockfiles'] = lockfiles
207 ss['lockfiles-shared'] = lockfilesshared
208 ss['plaindirs'] = plaindirs
209 ss['interceptfuncs'] = interceptfuncs
210 ss['fixmedir'] = fixmedir
211 return ss
212
213def sstate_add(ss, source, dest, d):
214 if not source.endswith("/"):
215 source = source + "/"
216 if not dest.endswith("/"):
217 dest = dest + "/"
218 source = os.path.normpath(source)
219 dest = os.path.normpath(dest)
220 srcbase = os.path.basename(source)
221 ss['dirs'].append([srcbase, source, dest])
222 return ss
223
224def sstate_install(ss, d):
225 import oe.path
226 import oe.sstatesig
227 import subprocess
228
229 sharedfiles = []
230 shareddirs = []
231 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
232
233 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
234
235 manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
236
237 if os.access(manifest, os.R_OK):
238 bb.fatal("Package already staged (%s)?!" % manifest)
239
240 d.setVar("SSTATE_INST_POSTRM", manifest + ".postrm")
241
242 locks = []
243 for lock in ss['lockfiles-shared']:
244 locks.append(bb.utils.lockfile(lock, True))
245 for lock in ss['lockfiles']:
246 locks.append(bb.utils.lockfile(lock))
247
248 for state in ss['dirs']:
249 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
250 for walkroot, dirs, files in os.walk(state[1]):
251 for file in files:
252 srcpath = os.path.join(walkroot, file)
253 dstpath = srcpath.replace(state[1], state[2])
254 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
255 sharedfiles.append(dstpath)
256 for dir in dirs:
257 srcdir = os.path.join(walkroot, dir)
258 dstdir = srcdir.replace(state[1], state[2])
259 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
260 if os.path.islink(srcdir):
261 sharedfiles.append(dstdir)
262 continue
263 if not dstdir.endswith("/"):
264 dstdir = dstdir + "/"
265 shareddirs.append(dstdir)
266
267 # Check the file list for conflicts against files which already exist
268 overlap_allowed = (d.getVar("SSTATE_ALLOW_OVERLAP_FILES") or "").split()
269 match = []
270 for f in sharedfiles:
271 if os.path.exists(f) and not os.path.islink(f):
272 f = os.path.normpath(f)
273 realmatch = True
274 for w in overlap_allowed:
275 w = os.path.normpath(w)
276 if f.startswith(w):
277 realmatch = False
278 break
279 if realmatch:
280 match.append(f)
281 sstate_search_cmd = "grep -rlF '%s' %s --exclude=master.list | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
282 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
283 if search_output:
284 match.append(" (matched in %s)" % search_output.decode('utf-8').rstrip())
285 else:
286 match.append(" (not matched to any task)")
287 if match:
288 bb.error("The recipe %s is trying to install files into a shared " \
289 "area when those files already exist. Those files and their manifest " \
290 "location are:\n %s\nPlease verify which recipe should provide the " \
291 "above files.\n\nThe build has stopped, as continuing in this scenario WILL " \
292 "break things - if not now, possibly in the future (we've seen builds fail " \
293 "several months later). If the system knew how to recover from this " \
294 "automatically it would, however there are several different scenarios " \
295 "which can result in this and we don't know which one this is. It may be " \
296 "you have switched providers of something like virtual/kernel (e.g. from " \
297 "linux-yocto to linux-yocto-dev), in that case you need to execute the " \
298 "clean task for both recipes and it will resolve this error. It may be " \
299 "you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \
300 "those recipes should again resolve this error, however switching " \
301 "DISTRO_FEATURES on an existing build directory is not supported - you " \
302 "should really clean out tmp and rebuild (reusing sstate should be safe). " \
303 "It could be the overlapping files detected are harmless in which case " \
304 "adding them to SSTATE_ALLOW_OVERLAP_FILES may be the correct solution. It could " \
305 "also be your build is including two different conflicting versions of " \
306 "things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
307 "be to resolve the conflict. If in doubt, please ask on the mailing list, " \
308 "sharing the error and filelist above." % \
309 (d.getVar('PN'), "\n ".join(match)))
310 bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.")
311
312 if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"):
313 sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd")
314 sharedfiles.append(ss['fixmedir'] + "/fixmepath")
315
316 # Write out the manifest
317 f = open(manifest, "w")
318 for file in sharedfiles:
319 f.write(file + "\n")
320
321 # We want to ensure that directories appear at the end of the manifest
322 # so that when we test to see if they should be deleted any contents
323 # added by the task will have been removed first.
324 dirs = sorted(shareddirs, key=len)
325 # Must remove children first, which will have a longer path than the parent
326 for di in reversed(dirs):
327 f.write(di + "\n")
328 f.close()
329
330 # Append to the list of manifests for this PACKAGE_ARCH
331
332 i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
333 l = bb.utils.lockfile(i + ".lock")
334 filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n"
335 manifests = []
336 if os.path.exists(i):
337 with open(i, "r") as f:
338 manifests = f.readlines()
339 # We append new entries, we don't remove older entries which may have the same
340 # manifest name but different versions from stamp/workdir. See below.
341 if filedata not in manifests:
342 with open(i, "a+") as f:
343 f.write(filedata)
344 bb.utils.unlockfile(l)
345
346 # Run the actual file install
347 for state in ss['dirs']:
348 if os.path.exists(state[1]):
349 oe.path.copyhardlinktree(state[1], state[2])
350
351 for postinst in (d.getVar('SSTATEPOSTINSTFUNCS') or '').split():
352 # All hooks should run in the SSTATE_INSTDIR
353 bb.build.exec_func(postinst, d, (sstateinst,))
354
355 for lock in locks:
356 bb.utils.unlockfile(lock)
357
358sstate_install[vardepsexclude] += "SSTATE_ALLOW_OVERLAP_FILES STATE_MANMACH SSTATE_MANFILEPREFIX"
359sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
360
361def sstate_installpkg(ss, d):
362 from oe.gpg_sign import get_signer
363
364 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
365 d.setVar("SSTATE_CURRTASK", ss['task'])
366 sstatefetch = d.getVar('SSTATE_PKGNAME')
367 sstatepkg = d.getVar('SSTATE_PKG')
Patrick Williams864cc432023-02-09 14:54:44 -0600368 verify_sig = bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False)
Patrick Williams92b42cb2022-09-03 06:53:57 -0500369
Patrick Williams864cc432023-02-09 14:54:44 -0600370 if not os.path.exists(sstatepkg) or (verify_sig and not os.path.exists(sstatepkg + '.sig')):
Patrick Williams92b42cb2022-09-03 06:53:57 -0500371 pstaging_fetch(sstatefetch, d)
372
373 if not os.path.isfile(sstatepkg):
374 bb.note("Sstate package %s does not exist" % sstatepkg)
375 return False
376
377 sstate_clean(ss, d)
378
379 d.setVar('SSTATE_INSTDIR', sstateinst)
380
Patrick Williams864cc432023-02-09 14:54:44 -0600381 if verify_sig:
Patrick Williams92b42cb2022-09-03 06:53:57 -0500382 if not os.path.isfile(sstatepkg + '.sig'):
383 bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
384 return False
385 signer = get_signer(d, 'local')
386 if not signer.verify(sstatepkg + '.sig', d.getVar("SSTATE_VALID_SIGS")):
387 bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
388 return False
389
390 # Empty sstateinst directory, ensure its clean
391 if os.path.exists(sstateinst):
392 oe.path.remove(sstateinst)
393 bb.utils.mkdirhier(sstateinst)
394
395 sstateinst = d.getVar("SSTATE_INSTDIR")
396 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
397
398 for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']:
399 # All hooks should run in the SSTATE_INSTDIR
400 bb.build.exec_func(f, d, (sstateinst,))
401
402 return sstate_installpkgdir(ss, d)
403
404def sstate_installpkgdir(ss, d):
405 import oe.path
406 import subprocess
407
408 sstateinst = d.getVar("SSTATE_INSTDIR")
409 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
410
411 for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split():
412 # All hooks should run in the SSTATE_INSTDIR
413 bb.build.exec_func(f, d, (sstateinst,))
414
415 def prepdir(dir):
416 # remove dir if it exists, ensure any parent directories do exist
417 if os.path.exists(dir):
418 oe.path.remove(dir)
419 bb.utils.mkdirhier(dir)
420 oe.path.remove(dir)
421
422 for state in ss['dirs']:
423 prepdir(state[1])
424 bb.utils.rename(sstateinst + state[0], state[1])
425 sstate_install(ss, d)
426
427 for plain in ss['plaindirs']:
428 workdir = d.getVar('WORKDIR')
429 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
430 src = sstateinst + "/" + plain.replace(workdir, '')
431 if sharedworkdir in plain:
432 src = sstateinst + "/" + plain.replace(sharedworkdir, '')
433 dest = plain
434 bb.utils.mkdirhier(src)
435 prepdir(dest)
436 bb.utils.rename(src, dest)
437
438 return True
439
440python sstate_hardcode_path_unpack () {
441 # Fixup hardcoded paths
442 #
443 # Note: The logic below must match the reverse logic in
444 # sstate_hardcode_path(d)
445 import subprocess
446
447 sstateinst = d.getVar('SSTATE_INSTDIR')
448 sstatefixmedir = d.getVar('SSTATE_FIXMEDIR')
449 fixmefn = sstateinst + "fixmepath"
450 if os.path.isfile(fixmefn):
451 staging_target = d.getVar('RECIPE_SYSROOT')
452 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
453
454 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
455 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
456 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
457 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host)
458 else:
459 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target)
460
461 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
462 for fixmevar in extra_staging_fixmes.split():
463 fixme_path = d.getVar(fixmevar)
464 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
465
466 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
467 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
468
469 # Defer do_populate_sysroot relocation command
470 if sstatefixmedir:
471 bb.utils.mkdirhier(sstatefixmedir)
472 with open(sstatefixmedir + "/fixmepath.cmd", "w") as f:
473 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath")
474 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST")
475 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST")
476 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET")
477 f.write(sstate_hardcode_cmd)
478 bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath")
479 return
480
481 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
482 subprocess.check_call(sstate_hardcode_cmd, shell=True)
483
484 # Need to remove this or we'd copy it into the target directory and may
485 # conflict with another writer
486 os.remove(fixmefn)
487}
488
489def sstate_clean_cachefile(ss, d):
490 import oe.path
491
492 if d.getVarFlag('do_%s' % ss['task'], 'task'):
493 d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
494 sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
495 bb.note("Removing %s" % sstatepkgfile)
496 oe.path.remove(sstatepkgfile)
497
498def sstate_clean_cachefiles(d):
499 for task in (d.getVar('SSTATETASKS') or "").split():
500 ld = d.createCopy()
501 ss = sstate_state_fromvars(ld, task)
502 sstate_clean_cachefile(ss, ld)
503
504def sstate_clean_manifest(manifest, d, canrace=False, prefix=None):
505 import oe.path
506
507 mfile = open(manifest)
508 entries = mfile.readlines()
509 mfile.close()
510
511 for entry in entries:
512 entry = entry.strip()
513 if prefix and not entry.startswith("/"):
514 entry = prefix + "/" + entry
515 bb.debug(2, "Removing manifest: %s" % entry)
516 # We can race against another package populating directories as we're removing them
517 # so we ignore errors here.
518 try:
519 if entry.endswith("/"):
520 if os.path.islink(entry[:-1]):
521 os.remove(entry[:-1])
522 elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace:
523 # Removing directories whilst builds are in progress exposes a race. Only
524 # do it in contexts where it is safe to do so.
525 os.rmdir(entry[:-1])
526 else:
527 os.remove(entry)
528 except OSError:
529 pass
530
531 postrm = manifest + ".postrm"
532 if os.path.exists(manifest + ".postrm"):
533 import subprocess
534 os.chmod(postrm, 0o755)
535 subprocess.check_call(postrm, shell=True)
536 oe.path.remove(postrm)
537
538 oe.path.remove(manifest)
539
540def sstate_clean(ss, d):
541 import oe.path
542 import glob
543
544 d2 = d.createCopy()
545 stamp_clean = d.getVar("STAMPCLEAN")
546 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info')
547 if extrainf:
548 d2.setVar("SSTATE_MANMACH", extrainf)
549 wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
550 else:
551 wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
552
553 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
554
555 if os.path.exists(manifest):
556 locks = []
557 for lock in ss['lockfiles-shared']:
558 locks.append(bb.utils.lockfile(lock))
559 for lock in ss['lockfiles']:
560 locks.append(bb.utils.lockfile(lock))
561
562 sstate_clean_manifest(manifest, d, canrace=True)
563
564 for lock in locks:
565 bb.utils.unlockfile(lock)
566
567 # Remove the current and previous stamps, but keep the sigdata.
568 #
569 # The glob() matches do_task* which may match multiple tasks, for
570 # example: do_package and do_package_write_ipk, so we need to
571 # exactly match *.do_task.* and *.do_task_setscene.*
572 rm_stamp = '.do_%s.' % ss['task']
573 rm_setscene = '.do_%s_setscene.' % ss['task']
574 # For BB_SIGNATURE_HANDLER = "noop"
575 rm_nohash = ".do_%s" % ss['task']
576 for stfile in glob.glob(wildcard_stfile):
577 # Keep the sigdata
578 if ".sigdata." in stfile or ".sigbasedata." in stfile:
579 continue
580 # Preserve taint files in the stamps directory
581 if stfile.endswith('.taint'):
582 continue
583 if rm_stamp in stfile or rm_setscene in stfile or \
584 stfile.endswith(rm_nohash):
585 oe.path.remove(stfile)
586
587sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
588
589CLEANFUNCS += "sstate_cleanall"
590
591python sstate_cleanall() {
592 bb.note("Removing shared state for package %s" % d.getVar('PN'))
593
594 manifest_dir = d.getVar('SSTATE_MANIFESTS')
595 if not os.path.exists(manifest_dir):
596 return
597
598 tasks = d.getVar('SSTATETASKS').split()
599 for name in tasks:
600 ld = d.createCopy()
601 shared_state = sstate_state_fromvars(ld, name)
602 sstate_clean(shared_state, ld)
603}
604
605python sstate_hardcode_path () {
606 import subprocess, platform
607
608 # Need to remove hardcoded paths and fix these when we install the
609 # staging packages.
610 #
611 # Note: the logic in this function needs to match the reverse logic
612 # in sstate_installpkg(ss, d)
613
614 staging_target = d.getVar('RECIPE_SYSROOT')
615 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
616 sstate_builddir = d.getVar('SSTATE_BUILDDIR')
617
618 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host
619 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
620 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
621 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
622 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
623 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
624 else:
625 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
626 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
627
628 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
629 for fixmevar in extra_staging_fixmes.split():
630 fixme_path = d.getVar(fixmevar)
631 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
632 sstate_grep_cmd += " -e '%s'" % (fixme_path)
633
634 fixmefn = sstate_builddir + "fixmepath"
635
636 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD')
637 sstate_filelist_cmd = "tee %s" % (fixmefn)
638
639 # fixmepath file needs relative paths, drop sstate_builddir prefix
640 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
641
642 xargs_no_empty_run_cmd = '--no-run-if-empty'
643 if platform.system() == 'Darwin':
644 xargs_no_empty_run_cmd = ''
645
646 # Limit the fixpaths and sed operations based on the initial grep search
647 # This has the side effect of making sure the vfs cache is hot
648 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
649
650 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
651 subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir)
652
653 # If the fixmefn is empty, remove it..
654 if os.stat(fixmefn).st_size == 0:
655 os.remove(fixmefn)
656 else:
657 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
658 subprocess.check_output(sstate_filelist_relative_cmd, shell=True)
659}
660
661def sstate_package(ss, d):
662 import oe.path
663 import time
664
665 tmpdir = d.getVar('TMPDIR')
666
667 fixtime = False
668 if ss['task'] == "package":
669 fixtime = True
670
671 def fixtimestamp(root, path):
672 f = os.path.join(root, path)
673 if os.lstat(f).st_mtime > sde:
674 os.utime(f, (sde, sde), follow_symlinks=False)
675
676 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
677 sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
678 d.setVar("SSTATE_CURRTASK", ss['task'])
679 bb.utils.remove(sstatebuild, recurse=True)
680 bb.utils.mkdirhier(sstatebuild)
681 for state in ss['dirs']:
682 if not os.path.exists(state[1]):
683 continue
684 srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
685 # Find and error for absolute symlinks. We could attempt to relocate but its not
686 # clear where the symlink is relative to in this context. We could add that markup
687 # to sstate tasks but there aren't many of these so better just avoid them entirely.
688 for walkroot, dirs, files in os.walk(state[1]):
689 for file in files + dirs:
690 if fixtime:
691 fixtimestamp(walkroot, file)
692 srcpath = os.path.join(walkroot, file)
693 if not os.path.islink(srcpath):
694 continue
695 link = os.readlink(srcpath)
696 if not os.path.isabs(link):
697 continue
698 if not link.startswith(tmpdir):
699 continue
700 bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
701 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
702 bb.utils.rename(state[1], sstatebuild + state[0])
703
704 workdir = d.getVar('WORKDIR')
705 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
706 for plain in ss['plaindirs']:
707 pdir = plain.replace(workdir, sstatebuild)
708 if sharedworkdir in plain:
709 pdir = plain.replace(sharedworkdir, sstatebuild)
710 bb.utils.mkdirhier(plain)
711 bb.utils.mkdirhier(pdir)
712 bb.utils.rename(plain, pdir)
713 if fixtime:
714 fixtimestamp(pdir, "")
715 for walkroot, dirs, files in os.walk(pdir):
716 for file in files + dirs:
717 fixtimestamp(walkroot, file)
718
719 d.setVar('SSTATE_BUILDDIR', sstatebuild)
720 d.setVar('SSTATE_INSTDIR', sstatebuild)
721
722 if d.getVar('SSTATE_SKIP_CREATION') == '1':
723 return
724
725 sstate_create_package = ['sstate_report_unihash', 'sstate_create_package']
726 if d.getVar('SSTATE_SIG_KEY'):
727 sstate_create_package.append('sstate_sign_package')
728
729 for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
730 sstate_create_package + \
731 (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
732 # All hooks should run in SSTATE_BUILDDIR.
733 bb.build.exec_func(f, d, (sstatebuild,))
734
735 # SSTATE_PKG may have been changed by sstate_report_unihash
736 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
737 if not os.path.exists(siginfo):
738 bb.siggen.dump_this_task(siginfo, d)
739 else:
740 try:
741 os.utime(siginfo, None)
742 except PermissionError:
743 pass
744 except OSError as e:
745 # Handle read-only file systems gracefully
746 import errno
747 if e.errno != errno.EROFS:
748 raise e
749
750 return
751
752sstate_package[vardepsexclude] += "SSTATE_SIG_KEY"
753
754def pstaging_fetch(sstatefetch, d):
755 import bb.fetch2
756
757 # Only try and fetch if the user has configured a mirror
758 mirrors = d.getVar('SSTATE_MIRRORS')
759 if not mirrors:
760 return
761
762 # Copy the data object and override DL_DIR and SRC_URI
763 localdata = bb.data.createCopy(d)
764
765 dldir = localdata.expand("${SSTATE_DIR}")
766 bb.utils.mkdirhier(dldir)
767
768 localdata.delVar('MIRRORS')
769 localdata.setVar('FILESPATH', dldir)
770 localdata.setVar('DL_DIR', dldir)
771 localdata.setVar('PREMIRRORS', mirrors)
772 localdata.setVar('SRCPV', d.getVar('SRCPV'))
773
774 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
775 # we'll want to allow network access for the current set of fetches.
776 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
777 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
778 localdata.delVar('BB_NO_NETWORK')
779
780 # Try a fetch from the sstate mirror, if it fails just return and
781 # we will build the package
782 uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
783 'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
784 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
785 uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
786
787 for srcuri in uris:
Andrew Geissler87f5cff2022-09-30 13:13:31 -0500788 localdata.delVar('SRC_URI')
Patrick Williams92b42cb2022-09-03 06:53:57 -0500789 localdata.setVar('SRC_URI', srcuri)
790 try:
791 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
792 fetcher.checkstatus()
793 fetcher.download()
794
795 except bb.fetch2.BBFetchException:
796 pass
797
798pstaging_fetch[vardepsexclude] += "SRCPV"
799
800
801def sstate_setscene(d):
802 shared_state = sstate_state_fromvars(d)
803 accelerate = sstate_installpkg(shared_state, d)
804 if not accelerate:
805 msg = "No sstate archive obtainable, will run full task instead."
806 bb.warn(msg)
807 raise bb.BBHandledException(msg)
808
809python sstate_task_prefunc () {
810 shared_state = sstate_state_fromvars(d)
811 sstate_clean(shared_state, d)
812}
813sstate_task_prefunc[dirs] = "${WORKDIR}"
814
815python sstate_task_postfunc () {
816 shared_state = sstate_state_fromvars(d)
817
818 for intercept in shared_state['interceptfuncs']:
819 bb.build.exec_func(intercept, d, (d.getVar("WORKDIR"),))
820
821 omask = os.umask(0o002)
822 if omask != 0o002:
823 bb.note("Using umask 0o002 (not %0o) for sstate packaging" % omask)
824 sstate_package(shared_state, d)
825 os.umask(omask)
826
827 sstateinst = d.getVar("SSTATE_INSTDIR")
828 d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir'])
829
830 sstate_installpkgdir(shared_state, d)
831
832 bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True)
833}
834sstate_task_postfunc[dirs] = "${WORKDIR}"
835
836
837#
838# Shell function to generate a sstate package from a directory
839# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
840#
841sstate_create_package () {
842 # Exit early if it already exists
843 if [ -e ${SSTATE_PKG} ]; then
844 touch ${SSTATE_PKG} 2>/dev/null || true
845 return
846 fi
847
848 mkdir --mode=0775 -p `dirname ${SSTATE_PKG}`
849 TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
850
851 OPT="-cS"
852 ZSTD="zstd -${SSTATE_ZSTD_CLEVEL} -T${ZSTD_THREADS}"
853 # Use pzstd if available
854 if [ -x "$(command -v pzstd)" ]; then
855 ZSTD="pzstd -${SSTATE_ZSTD_CLEVEL} -p ${ZSTD_THREADS}"
856 fi
857
858 # Need to handle empty directories
859 if [ "$(ls -A)" ]; then
860 set +e
861 tar -I "$ZSTD" $OPT -f $TFILE *
862 ret=$?
863 if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
864 exit 1
865 fi
866 set -e
867 else
868 tar -I "$ZSTD" $OPT --file=$TFILE --files-from=/dev/null
869 fi
870 chmod 0664 $TFILE
871 # Skip if it was already created by some other process
872 if [ -h ${SSTATE_PKG} ] && [ ! -e ${SSTATE_PKG} ]; then
873 # There is a symbolic link, but it links to nothing.
874 # Forcefully replace it with the new file.
875 ln -f $TFILE ${SSTATE_PKG} || true
876 elif [ ! -e ${SSTATE_PKG} ]; then
877 # Move into place using ln to attempt an atomic op.
878 # Abort if it already exists
879 ln $TFILE ${SSTATE_PKG} || true
880 else
881 touch ${SSTATE_PKG} 2>/dev/null || true
882 fi
883 rm $TFILE
884}
885
886python sstate_sign_package () {
887 from oe.gpg_sign import get_signer
888
889
890 signer = get_signer(d, 'local')
891 sstate_pkg = d.getVar('SSTATE_PKG')
892 if os.path.exists(sstate_pkg + '.sig'):
893 os.unlink(sstate_pkg + '.sig')
894 signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
895 d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
896}
897
898python sstate_report_unihash() {
899 report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
900
901 if report_unihash:
902 ss = sstate_state_fromvars(d)
903 report_unihash(os.getcwd(), ss['task'], d)
904}
905
906#
907# Shell function to decompress and prepare a package for installation
908# Will be run from within SSTATE_INSTDIR.
909#
910sstate_unpack_package () {
911 ZSTD="zstd -T${ZSTD_THREADS}"
912 # Use pzstd if available
913 if [ -x "$(command -v pzstd)" ]; then
914 ZSTD="pzstd -p ${ZSTD_THREADS}"
915 fi
916
917 tar -I "$ZSTD" -xvpf ${SSTATE_PKG}
918 # update .siginfo atime on local/NFS mirror if it is a symbolic link
919 [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
920 # update each symbolic link instead of any referenced file
921 touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
922 [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
923 [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
924}
925
926BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
927
928def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
Andrew Geissler517393d2023-01-13 08:55:19 -0600929 import itertools
930
Patrick Williams92b42cb2022-09-03 06:53:57 -0500931 found = set()
932 missed = set()
933
934 def gethash(task):
935 return sq_data['unihash'][task]
936
937 def getpathcomponents(task, d):
938 # Magic data from BB_HASHFILENAME
939 splithashfn = sq_data['hashfn'][task].split(" ")
940 spec = splithashfn[1]
941 if splithashfn[0] == "True":
942 extrapath = d.getVar("NATIVELSBSTRING") + "/"
943 else:
944 extrapath = ""
945
946 tname = bb.runqueue.taskname_from_tid(task)[3:]
947
948 if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
949 spec = splithashfn[2]
950 extrapath = ""
951
952 return spec, extrapath, tname
953
954 def getsstatefile(tid, siginfo, d):
955 spec, extrapath, tname = getpathcomponents(tid, d)
956 return extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d)
957
958 for tid in sq_data['hash']:
959
960 sstatefile = d.expand("${SSTATE_DIR}/" + getsstatefile(tid, siginfo, d))
961
962 if os.path.exists(sstatefile):
963 found.add(tid)
964 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
965 else:
966 missed.add(tid)
967 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
968
969 foundLocal = len(found)
970 mirrors = d.getVar("SSTATE_MIRRORS")
971 if mirrors:
972 # Copy the data object and override DL_DIR and SRC_URI
973 localdata = bb.data.createCopy(d)
974
975 dldir = localdata.expand("${SSTATE_DIR}")
976 localdata.delVar('MIRRORS')
977 localdata.setVar('FILESPATH', dldir)
978 localdata.setVar('DL_DIR', dldir)
979 localdata.setVar('PREMIRRORS', mirrors)
980
981 bb.debug(2, "SState using premirror of: %s" % mirrors)
982
983 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
984 # we'll want to allow network access for the current set of fetches.
985 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
986 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
987 localdata.delVar('BB_NO_NETWORK')
988
989 from bb.fetch2 import FetchConnectionCache
990 def checkstatus_init():
991 while not connection_cache_pool.full():
992 connection_cache_pool.put(FetchConnectionCache())
993
994 def checkstatus_end():
995 while not connection_cache_pool.empty():
996 connection_cache = connection_cache_pool.get()
997 connection_cache.close_connections()
998
999 def checkstatus(arg):
1000 (tid, sstatefile) = arg
1001
1002 connection_cache = connection_cache_pool.get()
1003 localdata2 = bb.data.createCopy(localdata)
1004 srcuri = "file://" + sstatefile
1005 localdata2.setVar('SRC_URI', srcuri)
1006 bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
1007
1008 import traceback
1009
1010 try:
1011 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
1012 connection_cache=connection_cache)
1013 fetcher.checkstatus()
1014 bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
1015 found.add(tid)
1016 missed.remove(tid)
1017 except bb.fetch2.FetchError as e:
1018 bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)\n%s" % (srcuri, repr(e), traceback.format_exc()))
1019 except Exception as e:
1020 bb.error("SState: cannot test %s: %s\n%s" % (srcuri, repr(e), traceback.format_exc()))
1021
1022 connection_cache_pool.put(connection_cache)
1023
1024 if progress:
Andrew Geissler517393d2023-01-13 08:55:19 -06001025 bb.event.fire(bb.event.ProcessProgress(msg, next(cnt_tasks_done)), d)
Andrew Geissler6aa7eec2023-03-03 12:41:14 -06001026 bb.event.check_for_interrupts(d)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001027
1028 tasklist = []
1029 for tid in missed:
1030 sstatefile = d.expand(getsstatefile(tid, siginfo, d))
1031 tasklist.append((tid, sstatefile))
1032
1033 if tasklist:
1034 nproc = min(int(d.getVar("BB_NUMBER_THREADS")), len(tasklist))
1035
Andrew Geissler517393d2023-01-13 08:55:19 -06001036 ## thread-safe counter
1037 cnt_tasks_done = itertools.count(start = 1)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001038 progress = len(tasklist) >= 100
1039 if progress:
1040 msg = "Checking sstate mirror object availability"
1041 bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
1042
1043 # Have to setup the fetcher environment here rather than in each thread as it would race
1044 fetcherenv = bb.fetch2.get_fetcher_environment(d)
1045 with bb.utils.environment(**fetcherenv):
1046 bb.event.enable_threadlock()
1047 import concurrent.futures
1048 from queue import Queue
1049 connection_cache_pool = Queue(nproc)
1050 checkstatus_init()
1051 with concurrent.futures.ThreadPoolExecutor(max_workers=nproc) as executor:
1052 executor.map(checkstatus, tasklist.copy())
1053 checkstatus_end()
1054 bb.event.disable_threadlock()
1055
1056 if progress:
1057 bb.event.fire(bb.event.ProcessFinished(msg), d)
1058
1059 inheritlist = d.getVar("INHERIT")
1060 if "toaster" in inheritlist:
1061 evdata = {'missed': [], 'found': []};
1062 for tid in missed:
1063 sstatefile = d.expand(getsstatefile(tid, False, d))
1064 evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1065 for tid in found:
1066 sstatefile = d.expand(getsstatefile(tid, False, d))
1067 evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1068 bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
1069
1070 if summary:
1071 # Print some summary statistics about the current task completion and how much sstate
1072 # reuse there was. Avoid divide by zero errors.
1073 total = len(sq_data['hash'])
1074 complete = 0
1075 if currentcount:
1076 complete = (len(found) + currentcount) / (total + currentcount) * 100
1077 match = 0
1078 if total:
1079 match = len(found) / total * 100
1080 bb.plain("Sstate summary: Wanted %d Local %d Mirrors %d Missed %d Current %d (%d%% match, %d%% complete)" %
1081 (total, foundLocal, len(found)-foundLocal, len(missed), currentcount, match, complete))
1082
1083 if hasattr(bb.parse.siggen, "checkhashes"):
1084 bb.parse.siggen.checkhashes(sq_data, missed, found, d)
1085
1086 return found
1087setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT"
1088
1089BB_SETSCENE_DEPVALID = "setscene_depvalid"
1090
1091def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
1092 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
1093 # task is included in taskdependees too
1094 # Return - False - We need this dependency
1095 # - True - We can skip this dependency
1096 import re
1097
1098 def logit(msg, log):
1099 if log is not None:
1100 log.append(msg)
1101 else:
1102 bb.debug(2, msg)
1103
1104 logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
1105
Patrick Williams7784c422022-11-17 07:29:11 -06001106 directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx", "do_deploy_archives"]
Patrick Williams92b42cb2022-09-03 06:53:57 -05001107
1108 def isNativeCross(x):
1109 return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
1110
1111 # We only need to trigger deploy_source_date_epoch through direct dependencies
1112 if taskdependees[task][1] in directtasks:
1113 return True
1114
1115 # We only need to trigger packagedata through direct dependencies
1116 # but need to preserve packagedata on packagedata links
1117 if taskdependees[task][1] == "do_packagedata":
1118 for dep in taskdependees:
1119 if taskdependees[dep][1] == "do_packagedata":
1120 return False
1121 return True
1122
1123 for dep in taskdependees:
1124 logit(" considering dependency: %s" % (str(taskdependees[dep])), log)
1125 if task == dep:
1126 continue
1127 if dep in notneeded:
1128 continue
1129 # do_package_write_* and do_package doesn't need do_package
1130 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
1131 continue
1132 # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
1133 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
1134 return False
1135 # do_package/packagedata/package_qa/deploy don't need do_populate_sysroot
1136 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa', 'do_deploy']:
1137 continue
1138 # Native/Cross packages don't exist and are noexec anyway
1139 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
1140 continue
1141
1142 # This is due to the [depends] in useradd.bbclass complicating matters
1143 # The logic *is* reversed here due to the way hard setscene dependencies are injected
1144 if (taskdependees[task][1] == 'do_package' or taskdependees[task][1] == 'do_populate_sysroot') and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
1145 continue
1146
1147 # Consider sysroot depending on sysroot tasks
1148 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
1149 # Allow excluding certain recursive dependencies. If a recipe needs it should add a
1150 # specific dependency itself, rather than relying on one of its dependees to pull
1151 # them in.
1152 # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html
1153 not_needed = False
1154 excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT')
1155 if excludedeps is None:
1156 # Cache the regular expressions for speed
1157 excludedeps = []
1158 for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split():
1159 excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1])))
1160 d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps)
1161 for excl in excludedeps:
1162 if excl[0].match(taskdependees[dep][0]):
1163 if excl[1].match(taskdependees[task][0]):
1164 not_needed = True
1165 break
1166 if not_needed:
1167 continue
1168 # For meta-extsdk-toolchain we want all sysroot dependencies
1169 if taskdependees[dep][0] == 'meta-extsdk-toolchain':
1170 return False
1171 # Native/Cross populate_sysroot need their dependencies
1172 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
1173 return False
1174 # Target populate_sysroot depended on by cross tools need to be installed
1175 if isNativeCross(taskdependees[dep][0]):
1176 return False
1177 # Native/cross tools depended upon by target sysroot are not needed
1178 # Add an exception for shadow-native as required by useradd.bbclass
1179 if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
1180 continue
1181 # Target populate_sysroot need their dependencies
1182 return False
1183
1184 if taskdependees[dep][1] in directtasks:
1185 continue
1186
1187 # Safe fallthrough default
1188 logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
1189 return False
1190 return True
1191
1192addhandler sstate_eventhandler
1193sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
1194python sstate_eventhandler() {
1195 d = e.data
1196 writtensstate = d.getVar('SSTATE_CURRTASK')
1197 if not writtensstate:
1198 taskname = d.getVar("BB_RUNTASK")[3:]
1199 spec = d.getVar('SSTATE_PKGSPEC')
1200 swspec = d.getVar('SSTATE_SWSPEC')
1201 if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
1202 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
1203 d.setVar("SSTATE_EXTRAPATH", "")
1204 d.setVar("SSTATE_CURRTASK", taskname)
1205 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
1206 if not os.path.exists(siginfo):
1207 bb.siggen.dump_this_task(siginfo, d)
1208 else:
1209 try:
1210 os.utime(siginfo, None)
1211 except PermissionError:
1212 pass
1213 except OSError as e:
1214 # Handle read-only file systems gracefully
1215 import errno
1216 if e.errno != errno.EROFS:
1217 raise e
1218
1219}
1220
1221SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
1222
1223#
1224# Event handler which removes manifests and stamps file for recipes which are no
1225# longer 'reachable' in a build where they once were. 'Reachable' refers to
1226# whether a recipe is parsed so recipes in a layer which was removed would no
1227# longer be reachable. Switching between systemd and sysvinit where recipes
1228# became skipped would be another example.
1229#
1230# Also optionally removes the workdir of those tasks/recipes
1231#
1232addhandler sstate_eventhandler_reachablestamps
1233sstate_eventhandler_reachablestamps[eventmask] = "bb.event.ReachableStamps"
1234python sstate_eventhandler_reachablestamps() {
1235 import glob
1236 d = e.data
1237 stamps = e.stamps.values()
1238 removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
1239 preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps')
1240 preservestamps = []
1241 if os.path.exists(preservestampfile):
1242 with open(preservestampfile, 'r') as f:
1243 preservestamps = f.readlines()
1244 seen = []
1245
1246 # The machine index contains all the stamps this machine has ever seen in this build directory.
1247 # We should only remove things which this machine once accessed but no longer does.
1248 machineindex = set()
1249 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1250 mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}")
1251 if os.path.exists(mi):
1252 with open(mi, "r") as f:
1253 machineindex = set(line.strip() for line in f.readlines())
1254
1255 for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
1256 toremove = []
1257 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1258 if not os.path.exists(i):
1259 continue
1260 manseen = set()
1261 ignore = []
1262 with open(i, "r") as f:
1263 lines = f.readlines()
1264 for l in reversed(lines):
1265 try:
1266 (stamp, manifest, workdir) = l.split()
1267 # The index may have multiple entries for the same manifest as the code above only appends
1268 # new entries and there may be an entry with matching manifest but differing version in stamp/workdir.
1269 # The last entry in the list is the valid one, any earlier entries with matching manifests
1270 # should be ignored.
1271 if manifest in manseen:
1272 ignore.append(l)
1273 continue
1274 manseen.add(manifest)
1275 if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
1276 toremove.append(l)
1277 if stamp not in seen:
1278 bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
1279 seen.append(stamp)
1280 except ValueError:
1281 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1282
1283 if toremove:
1284 msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
1285 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1286
1287 removed = 0
1288 for r in toremove:
1289 (stamp, manifest, workdir) = r.split()
1290 for m in glob.glob(manifest + ".*"):
1291 if m.endswith(".postrm"):
1292 continue
1293 sstate_clean_manifest(m, d)
1294 bb.utils.remove(stamp + "*")
1295 if removeworkdir:
1296 bb.utils.remove(workdir, recurse = True)
1297 lines.remove(r)
1298 removed = removed + 1
1299 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
Andrew Geissler6aa7eec2023-03-03 12:41:14 -06001300 bb.event.check_for_interrupts(d)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001301
1302 bb.event.fire(bb.event.ProcessFinished(msg), d)
1303
1304 with open(i, "w") as f:
1305 for l in lines:
1306 if l in ignore:
1307 continue
1308 f.write(l)
1309 machineindex |= set(stamps)
1310 with open(mi, "w") as f:
1311 for l in machineindex:
1312 f.write(l + "\n")
1313
1314 if preservestamps:
1315 os.remove(preservestampfile)
1316}
1317
1318
1319#
1320# Bitbake can generate an event showing which setscene tasks are 'stale',
1321# i.e. which ones will be rerun. These are ones where a stamp file is present but
1322# it is stable (e.g. taskhash doesn't match). With that list we can go through
1323# the manifests for matching tasks and "uninstall" those manifests now. We do
1324# this now rather than mid build since the distribution of files between sstate
1325# objects may have changed, new tasks may run first and if those new tasks overlap
1326# with the stale tasks, we'd see overlapping files messages and failures. Thankfully
1327# removing these files is fast.
1328#
1329addhandler sstate_eventhandler_stalesstate
1330sstate_eventhandler_stalesstate[eventmask] = "bb.event.StaleSetSceneTasks"
1331python sstate_eventhandler_stalesstate() {
1332 d = e.data
1333 tasks = e.tasks
1334
1335 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1336
1337 for a in list(set(d.getVar("SSTATE_ARCHS").split())):
1338 toremove = []
1339 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1340 if not os.path.exists(i):
1341 continue
1342 with open(i, "r") as f:
1343 lines = f.readlines()
1344 for l in lines:
1345 try:
1346 (stamp, manifest, workdir) = l.split()
1347 for tid in tasks:
1348 for s in tasks[tid]:
1349 if s.startswith(stamp):
1350 taskname = bb.runqueue.taskname_from_tid(tid)[3:]
1351 manname = manifest + "." + taskname
1352 if os.path.exists(manname):
1353 bb.debug(2, "Sstate for %s is stale, removing related manifest %s" % (tid, manname))
1354 toremove.append((manname, tid, tasks[tid]))
1355 break
1356 except ValueError:
1357 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1358
1359 if toremove:
1360 msg = "Removing %d stale sstate objects for arch %s" % (len(toremove), a)
1361 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1362
1363 removed = 0
1364 for (manname, tid, stamps) in toremove:
1365 sstate_clean_manifest(manname, d)
1366 for stamp in stamps:
1367 bb.utils.remove(stamp)
1368 removed = removed + 1
1369 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
Andrew Geissler6aa7eec2023-03-03 12:41:14 -06001370 bb.event.check_for_interrupts(d)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001371
1372 bb.event.fire(bb.event.ProcessFinished(msg), d)
1373}