blob: 2676f18e0a2d60880e77079cd7f81385ddcb970d [file] [log] [blame]
Patrick Williams92b42cb2022-09-03 06:53:57 -05001#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
Andrew Geisslerfc113ea2023-03-31 09:59:46 -05007SSTATE_VERSION = "11"
Patrick Williams92b42cb2022-09-03 06:53:57 -05008
9SSTATE_ZSTD_CLEVEL ??= "8"
10
11SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
12SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
13
14def generate_sstatefn(spec, hash, taskname, siginfo, d):
15 if taskname is None:
16 return ""
17 extension = ".tar.zst"
18 # 8 chars reserved for siginfo
19 limit = 254 - 8
20 if siginfo:
21 limit = 254
22 extension = ".tar.zst.siginfo"
23 if not hash:
24 hash = "INVALID"
25 fn = spec + hash + "_" + taskname + extension
26 # If the filename is too long, attempt to reduce it
27 if len(fn) > limit:
28 components = spec.split(":")
29 # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
30 # 7 is for the separators
31 avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
32 components[2] = components[2][:avail]
33 components[3] = components[3][:avail]
34 components[4] = components[4][:avail]
35 spec = ":".join(components)
36 fn = spec + hash + "_" + taskname + extension
37 if len(fn) > limit:
38 bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
39 return hash[:2] + "/" + hash[2:4] + "/" + fn
40
41SSTATE_PKGARCH = "${PACKAGE_ARCH}"
42SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
43SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
44SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
45SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
46SSTATE_EXTRAPATH = ""
47SSTATE_EXTRAPATHWILDCARD = ""
48SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tar.zst*"
49
50# explicitly make PV to depend on evaluated value of PV variable
51PV[vardepvalue] = "${PV}"
52
53# We don't want the sstate to depend on things like the distro string
54# of the system, we let the sstate paths take care of this.
55SSTATE_EXTRAPATH[vardepvalue] = ""
56SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
57
Patrick Williams92b42cb2022-09-03 06:53:57 -050058# Avoid docbook/sgml catalog warnings for now
59SSTATE_ALLOW_OVERLAP_FILES += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
60# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
61SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
62SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
63# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
64SSTATE_ALLOW_OVERLAP_FILES += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
65# Archive the sources for many architectures in one deploy folder
66SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}"
67# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
68SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/ovmf"
69SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/grub-efi"
70SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/systemd-boot"
71SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/microcode"
72
73SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
74SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
75SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
76SSTATE_HASHEQUIV_FILEMAP ?= " \
77 populate_sysroot:*/postinst-useradd-*:${TMPDIR} \
78 populate_sysroot:*/postinst-useradd-*:${COREBASE} \
79 populate_sysroot:*/postinst-useradd-*:regex-\s(PATH|PSEUDO_IGNORE_PATHS|HOME|LOGNAME|OMP_NUM_THREADS|USER)=.*\s \
80 populate_sysroot:*/crossscripts/*:${TMPDIR} \
81 populate_sysroot:*/crossscripts/*:${COREBASE} \
82 "
83
84BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
85
Andrew Geissler220dafd2023-10-04 10:18:08 -050086SSTATE_ARCHS_TUNEPKG ??= "${TUNE_PKGARCH}"
Patrick Williams92b42cb2022-09-03 06:53:57 -050087SSTATE_ARCHS = " \
88 ${BUILD_ARCH} \
89 ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
90 ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
91 ${SDK_ARCH}_${SDK_OS} \
Andrew Geissler220dafd2023-10-04 10:18:08 -050092 ${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX} \
Patrick Williams92b42cb2022-09-03 06:53:57 -050093 allarch \
Andrew Geissler220dafd2023-10-04 10:18:08 -050094 ${SSTATE_ARCHS_TUNEPKG} \
Patrick Williams92b42cb2022-09-03 06:53:57 -050095 ${PACKAGE_EXTRA_ARCHS} \
96 ${MACHINE_ARCH}"
97SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
98
99SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
100
101SSTATECREATEFUNCS += "sstate_hardcode_path"
102SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
103SSTATEPOSTCREATEFUNCS = ""
104SSTATEPREINSTFUNCS = ""
105SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
106SSTATEPOSTINSTFUNCS = ""
107EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
108
109# Check whether sstate exists for tasks that support sstate and are in the
110# locked signatures file.
111SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error'
112
113# Check whether the task's computed hash matches the task's hash in the
114# locked signatures file.
115SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error"
116
117# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
118# not sign)
119SSTATE_SIG_KEY ?= ""
120SSTATE_SIG_PASSPHRASE ?= ""
121# Whether to verify the GnUPG signatures when extracting sstate archives
122SSTATE_VERIFY_SIG ?= "0"
123# List of signatures to consider valid.
124SSTATE_VALID_SIGS ??= ""
125SSTATE_VALID_SIGS[vardepvalue] = ""
126
127SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
128SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
129 the output hash for a task, which in turn is used to determine equivalency. \
130 "
131
132SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
133SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
134 hash equivalency server, such as PN, PV, taskname, etc. This information \
135 is very useful for developers looking at task data, but may leak sensitive \
136 data if the equivalence server is public. \
137 "
138
139python () {
140 if bb.data.inherits_class('native', d):
141 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
142 elif bb.data.inherits_class('crosssdk', d):
143 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
144 elif bb.data.inherits_class('cross', d):
145 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}"))
146 elif bb.data.inherits_class('nativesdk', d):
147 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
148 elif bb.data.inherits_class('cross-canadian', d):
149 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
150 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all":
151 d.setVar('SSTATE_PKGARCH', "allarch")
152 else:
153 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
154
155 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
156 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
157 d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
158 d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
159
160 unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
161 d.setVar('SSTATETASKS', " ".join(unique_tasks))
162 for task in unique_tasks:
163 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
164 d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
165 d.setVarFlag(task, 'network', '1')
166 d.setVarFlag(task + "_setscene", 'network', '1')
167}
168
169def sstate_init(task, d):
170 ss = {}
171 ss['task'] = task
172 ss['dirs'] = []
173 ss['plaindirs'] = []
174 ss['lockfiles'] = []
175 ss['lockfiles-shared'] = []
176 return ss
177
178def sstate_state_fromvars(d, task = None):
179 if task is None:
180 task = d.getVar('BB_CURRENTTASK')
181 if not task:
182 bb.fatal("sstate code running without task context?!")
183 task = task.replace("_setscene", "")
184
185 if task.startswith("do_"):
186 task = task[3:]
187 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split()
188 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split()
189 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split()
190 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split()
191 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split()
192 interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs') or "").split()
193 fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or ""
194 if not task or len(inputs) != len(outputs):
195 bb.fatal("sstate variables not setup correctly?!")
196
197 if task == "populate_lic":
198 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
199 d.setVar("SSTATE_EXTRAPATH", "")
200 d.setVar('SSTATE_EXTRAPATHWILDCARD', "")
201
202 ss = sstate_init(task, d)
203 for i in range(len(inputs)):
204 sstate_add(ss, inputs[i], outputs[i], d)
205 ss['lockfiles'] = lockfiles
206 ss['lockfiles-shared'] = lockfilesshared
207 ss['plaindirs'] = plaindirs
208 ss['interceptfuncs'] = interceptfuncs
209 ss['fixmedir'] = fixmedir
210 return ss
211
212def sstate_add(ss, source, dest, d):
213 if not source.endswith("/"):
214 source = source + "/"
215 if not dest.endswith("/"):
216 dest = dest + "/"
217 source = os.path.normpath(source)
218 dest = os.path.normpath(dest)
219 srcbase = os.path.basename(source)
220 ss['dirs'].append([srcbase, source, dest])
221 return ss
222
223def sstate_install(ss, d):
224 import oe.path
225 import oe.sstatesig
226 import subprocess
227
228 sharedfiles = []
229 shareddirs = []
230 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
231
232 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
233
234 manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
235
236 if os.access(manifest, os.R_OK):
237 bb.fatal("Package already staged (%s)?!" % manifest)
238
239 d.setVar("SSTATE_INST_POSTRM", manifest + ".postrm")
240
241 locks = []
242 for lock in ss['lockfiles-shared']:
243 locks.append(bb.utils.lockfile(lock, True))
244 for lock in ss['lockfiles']:
245 locks.append(bb.utils.lockfile(lock))
246
247 for state in ss['dirs']:
248 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
249 for walkroot, dirs, files in os.walk(state[1]):
250 for file in files:
251 srcpath = os.path.join(walkroot, file)
252 dstpath = srcpath.replace(state[1], state[2])
253 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
254 sharedfiles.append(dstpath)
255 for dir in dirs:
256 srcdir = os.path.join(walkroot, dir)
257 dstdir = srcdir.replace(state[1], state[2])
258 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
259 if os.path.islink(srcdir):
260 sharedfiles.append(dstdir)
261 continue
262 if not dstdir.endswith("/"):
263 dstdir = dstdir + "/"
264 shareddirs.append(dstdir)
265
266 # Check the file list for conflicts against files which already exist
267 overlap_allowed = (d.getVar("SSTATE_ALLOW_OVERLAP_FILES") or "").split()
268 match = []
269 for f in sharedfiles:
Andrew Geissler220dafd2023-10-04 10:18:08 -0500270 if os.path.exists(f):
Patrick Williams92b42cb2022-09-03 06:53:57 -0500271 f = os.path.normpath(f)
272 realmatch = True
273 for w in overlap_allowed:
274 w = os.path.normpath(w)
275 if f.startswith(w):
276 realmatch = False
277 break
278 if realmatch:
279 match.append(f)
Andrew Geissler220dafd2023-10-04 10:18:08 -0500280 sstate_search_cmd = "grep -rlF '%s' %s --exclude=index-* | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
Patrick Williams92b42cb2022-09-03 06:53:57 -0500281 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
282 if search_output:
283 match.append(" (matched in %s)" % search_output.decode('utf-8').rstrip())
284 else:
285 match.append(" (not matched to any task)")
286 if match:
Andrew Geissler220dafd2023-10-04 10:18:08 -0500287 bb.fatal("Recipe %s is trying to install files into a shared " \
288 "area when those files already exist. The files and the manifests listing " \
289 "them are:\n %s\n"
290 "Please adjust the recipes so only one recipe provides a given file. " % \
Patrick Williams92b42cb2022-09-03 06:53:57 -0500291 (d.getVar('PN'), "\n ".join(match)))
Patrick Williams92b42cb2022-09-03 06:53:57 -0500292
293 if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"):
294 sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd")
295 sharedfiles.append(ss['fixmedir'] + "/fixmepath")
296
297 # Write out the manifest
298 f = open(manifest, "w")
299 for file in sharedfiles:
300 f.write(file + "\n")
301
302 # We want to ensure that directories appear at the end of the manifest
303 # so that when we test to see if they should be deleted any contents
304 # added by the task will have been removed first.
305 dirs = sorted(shareddirs, key=len)
306 # Must remove children first, which will have a longer path than the parent
307 for di in reversed(dirs):
308 f.write(di + "\n")
309 f.close()
310
311 # Append to the list of manifests for this PACKAGE_ARCH
312
313 i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
314 l = bb.utils.lockfile(i + ".lock")
315 filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n"
316 manifests = []
317 if os.path.exists(i):
318 with open(i, "r") as f:
319 manifests = f.readlines()
320 # We append new entries, we don't remove older entries which may have the same
321 # manifest name but different versions from stamp/workdir. See below.
322 if filedata not in manifests:
323 with open(i, "a+") as f:
324 f.write(filedata)
325 bb.utils.unlockfile(l)
326
327 # Run the actual file install
328 for state in ss['dirs']:
329 if os.path.exists(state[1]):
330 oe.path.copyhardlinktree(state[1], state[2])
331
332 for postinst in (d.getVar('SSTATEPOSTINSTFUNCS') or '').split():
333 # All hooks should run in the SSTATE_INSTDIR
334 bb.build.exec_func(postinst, d, (sstateinst,))
335
336 for lock in locks:
337 bb.utils.unlockfile(lock)
338
339sstate_install[vardepsexclude] += "SSTATE_ALLOW_OVERLAP_FILES STATE_MANMACH SSTATE_MANFILEPREFIX"
340sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
341
342def sstate_installpkg(ss, d):
343 from oe.gpg_sign import get_signer
344
345 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
346 d.setVar("SSTATE_CURRTASK", ss['task'])
347 sstatefetch = d.getVar('SSTATE_PKGNAME')
348 sstatepkg = d.getVar('SSTATE_PKG')
Patrick Williams864cc432023-02-09 14:54:44 -0600349 verify_sig = bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False)
Patrick Williams92b42cb2022-09-03 06:53:57 -0500350
Patrick Williams864cc432023-02-09 14:54:44 -0600351 if not os.path.exists(sstatepkg) or (verify_sig and not os.path.exists(sstatepkg + '.sig')):
Patrick Williams92b42cb2022-09-03 06:53:57 -0500352 pstaging_fetch(sstatefetch, d)
353
354 if not os.path.isfile(sstatepkg):
355 bb.note("Sstate package %s does not exist" % sstatepkg)
356 return False
357
358 sstate_clean(ss, d)
359
360 d.setVar('SSTATE_INSTDIR', sstateinst)
361
Patrick Williams864cc432023-02-09 14:54:44 -0600362 if verify_sig:
Patrick Williams92b42cb2022-09-03 06:53:57 -0500363 if not os.path.isfile(sstatepkg + '.sig'):
364 bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
365 return False
366 signer = get_signer(d, 'local')
367 if not signer.verify(sstatepkg + '.sig', d.getVar("SSTATE_VALID_SIGS")):
368 bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
369 return False
370
371 # Empty sstateinst directory, ensure its clean
372 if os.path.exists(sstateinst):
373 oe.path.remove(sstateinst)
374 bb.utils.mkdirhier(sstateinst)
375
376 sstateinst = d.getVar("SSTATE_INSTDIR")
377 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
378
379 for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']:
380 # All hooks should run in the SSTATE_INSTDIR
381 bb.build.exec_func(f, d, (sstateinst,))
382
383 return sstate_installpkgdir(ss, d)
384
385def sstate_installpkgdir(ss, d):
386 import oe.path
387 import subprocess
388
389 sstateinst = d.getVar("SSTATE_INSTDIR")
390 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
391
392 for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split():
393 # All hooks should run in the SSTATE_INSTDIR
394 bb.build.exec_func(f, d, (sstateinst,))
395
396 def prepdir(dir):
397 # remove dir if it exists, ensure any parent directories do exist
398 if os.path.exists(dir):
399 oe.path.remove(dir)
400 bb.utils.mkdirhier(dir)
401 oe.path.remove(dir)
402
403 for state in ss['dirs']:
404 prepdir(state[1])
405 bb.utils.rename(sstateinst + state[0], state[1])
406 sstate_install(ss, d)
407
408 for plain in ss['plaindirs']:
409 workdir = d.getVar('WORKDIR')
410 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
411 src = sstateinst + "/" + plain.replace(workdir, '')
412 if sharedworkdir in plain:
413 src = sstateinst + "/" + plain.replace(sharedworkdir, '')
414 dest = plain
415 bb.utils.mkdirhier(src)
416 prepdir(dest)
417 bb.utils.rename(src, dest)
418
419 return True
420
421python sstate_hardcode_path_unpack () {
422 # Fixup hardcoded paths
423 #
424 # Note: The logic below must match the reverse logic in
425 # sstate_hardcode_path(d)
426 import subprocess
427
428 sstateinst = d.getVar('SSTATE_INSTDIR')
429 sstatefixmedir = d.getVar('SSTATE_FIXMEDIR')
430 fixmefn = sstateinst + "fixmepath"
431 if os.path.isfile(fixmefn):
432 staging_target = d.getVar('RECIPE_SYSROOT')
433 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
434
435 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
436 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
437 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
438 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host)
439 else:
440 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target)
441
442 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
443 for fixmevar in extra_staging_fixmes.split():
444 fixme_path = d.getVar(fixmevar)
445 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
446
447 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
448 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
449
450 # Defer do_populate_sysroot relocation command
451 if sstatefixmedir:
452 bb.utils.mkdirhier(sstatefixmedir)
453 with open(sstatefixmedir + "/fixmepath.cmd", "w") as f:
454 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath")
455 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST")
456 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST")
457 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET")
458 f.write(sstate_hardcode_cmd)
459 bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath")
460 return
461
462 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
463 subprocess.check_call(sstate_hardcode_cmd, shell=True)
464
465 # Need to remove this or we'd copy it into the target directory and may
466 # conflict with another writer
467 os.remove(fixmefn)
468}
469
470def sstate_clean_cachefile(ss, d):
471 import oe.path
472
473 if d.getVarFlag('do_%s' % ss['task'], 'task'):
474 d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
475 sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
476 bb.note("Removing %s" % sstatepkgfile)
477 oe.path.remove(sstatepkgfile)
478
479def sstate_clean_cachefiles(d):
480 for task in (d.getVar('SSTATETASKS') or "").split():
481 ld = d.createCopy()
482 ss = sstate_state_fromvars(ld, task)
483 sstate_clean_cachefile(ss, ld)
484
485def sstate_clean_manifest(manifest, d, canrace=False, prefix=None):
486 import oe.path
487
488 mfile = open(manifest)
489 entries = mfile.readlines()
490 mfile.close()
491
492 for entry in entries:
493 entry = entry.strip()
494 if prefix and not entry.startswith("/"):
495 entry = prefix + "/" + entry
496 bb.debug(2, "Removing manifest: %s" % entry)
497 # We can race against another package populating directories as we're removing them
498 # so we ignore errors here.
499 try:
500 if entry.endswith("/"):
501 if os.path.islink(entry[:-1]):
502 os.remove(entry[:-1])
503 elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace:
504 # Removing directories whilst builds are in progress exposes a race. Only
505 # do it in contexts where it is safe to do so.
506 os.rmdir(entry[:-1])
507 else:
508 os.remove(entry)
509 except OSError:
510 pass
511
512 postrm = manifest + ".postrm"
513 if os.path.exists(manifest + ".postrm"):
514 import subprocess
515 os.chmod(postrm, 0o755)
516 subprocess.check_call(postrm, shell=True)
517 oe.path.remove(postrm)
518
519 oe.path.remove(manifest)
520
521def sstate_clean(ss, d):
522 import oe.path
523 import glob
524
525 d2 = d.createCopy()
526 stamp_clean = d.getVar("STAMPCLEAN")
527 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info')
528 if extrainf:
529 d2.setVar("SSTATE_MANMACH", extrainf)
530 wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
531 else:
532 wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
533
534 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
535
536 if os.path.exists(manifest):
537 locks = []
538 for lock in ss['lockfiles-shared']:
539 locks.append(bb.utils.lockfile(lock))
540 for lock in ss['lockfiles']:
541 locks.append(bb.utils.lockfile(lock))
542
543 sstate_clean_manifest(manifest, d, canrace=True)
544
545 for lock in locks:
546 bb.utils.unlockfile(lock)
547
548 # Remove the current and previous stamps, but keep the sigdata.
549 #
550 # The glob() matches do_task* which may match multiple tasks, for
551 # example: do_package and do_package_write_ipk, so we need to
552 # exactly match *.do_task.* and *.do_task_setscene.*
553 rm_stamp = '.do_%s.' % ss['task']
554 rm_setscene = '.do_%s_setscene.' % ss['task']
555 # For BB_SIGNATURE_HANDLER = "noop"
556 rm_nohash = ".do_%s" % ss['task']
557 for stfile in glob.glob(wildcard_stfile):
558 # Keep the sigdata
559 if ".sigdata." in stfile or ".sigbasedata." in stfile:
560 continue
561 # Preserve taint files in the stamps directory
562 if stfile.endswith('.taint'):
563 continue
564 if rm_stamp in stfile or rm_setscene in stfile or \
565 stfile.endswith(rm_nohash):
566 oe.path.remove(stfile)
567
568sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
569
570CLEANFUNCS += "sstate_cleanall"
571
572python sstate_cleanall() {
573 bb.note("Removing shared state for package %s" % d.getVar('PN'))
574
575 manifest_dir = d.getVar('SSTATE_MANIFESTS')
576 if not os.path.exists(manifest_dir):
577 return
578
579 tasks = d.getVar('SSTATETASKS').split()
580 for name in tasks:
581 ld = d.createCopy()
582 shared_state = sstate_state_fromvars(ld, name)
583 sstate_clean(shared_state, ld)
584}
585
586python sstate_hardcode_path () {
587 import subprocess, platform
588
589 # Need to remove hardcoded paths and fix these when we install the
590 # staging packages.
591 #
592 # Note: the logic in this function needs to match the reverse logic
593 # in sstate_installpkg(ss, d)
594
595 staging_target = d.getVar('RECIPE_SYSROOT')
596 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
597 sstate_builddir = d.getVar('SSTATE_BUILDDIR')
598
599 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host
600 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
601 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
602 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
603 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
604 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
605 else:
606 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
607 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
608
609 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
610 for fixmevar in extra_staging_fixmes.split():
611 fixme_path = d.getVar(fixmevar)
612 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
613 sstate_grep_cmd += " -e '%s'" % (fixme_path)
614
615 fixmefn = sstate_builddir + "fixmepath"
616
617 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD')
618 sstate_filelist_cmd = "tee %s" % (fixmefn)
619
620 # fixmepath file needs relative paths, drop sstate_builddir prefix
621 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
622
623 xargs_no_empty_run_cmd = '--no-run-if-empty'
624 if platform.system() == 'Darwin':
625 xargs_no_empty_run_cmd = ''
626
627 # Limit the fixpaths and sed operations based on the initial grep search
628 # This has the side effect of making sure the vfs cache is hot
629 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
630
631 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
632 subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir)
633
634 # If the fixmefn is empty, remove it..
635 if os.stat(fixmefn).st_size == 0:
636 os.remove(fixmefn)
637 else:
638 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
639 subprocess.check_output(sstate_filelist_relative_cmd, shell=True)
640}
641
642def sstate_package(ss, d):
643 import oe.path
644 import time
645
646 tmpdir = d.getVar('TMPDIR')
647
648 fixtime = False
649 if ss['task'] == "package":
650 fixtime = True
651
652 def fixtimestamp(root, path):
653 f = os.path.join(root, path)
654 if os.lstat(f).st_mtime > sde:
655 os.utime(f, (sde, sde), follow_symlinks=False)
656
657 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
658 sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
659 d.setVar("SSTATE_CURRTASK", ss['task'])
660 bb.utils.remove(sstatebuild, recurse=True)
661 bb.utils.mkdirhier(sstatebuild)
662 for state in ss['dirs']:
663 if not os.path.exists(state[1]):
664 continue
665 srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
666 # Find and error for absolute symlinks. We could attempt to relocate but its not
667 # clear where the symlink is relative to in this context. We could add that markup
668 # to sstate tasks but there aren't many of these so better just avoid them entirely.
669 for walkroot, dirs, files in os.walk(state[1]):
670 for file in files + dirs:
671 if fixtime:
672 fixtimestamp(walkroot, file)
673 srcpath = os.path.join(walkroot, file)
674 if not os.path.islink(srcpath):
675 continue
676 link = os.readlink(srcpath)
677 if not os.path.isabs(link):
678 continue
679 if not link.startswith(tmpdir):
680 continue
681 bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
682 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
683 bb.utils.rename(state[1], sstatebuild + state[0])
684
685 workdir = d.getVar('WORKDIR')
686 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
687 for plain in ss['plaindirs']:
688 pdir = plain.replace(workdir, sstatebuild)
689 if sharedworkdir in plain:
690 pdir = plain.replace(sharedworkdir, sstatebuild)
691 bb.utils.mkdirhier(plain)
692 bb.utils.mkdirhier(pdir)
693 bb.utils.rename(plain, pdir)
694 if fixtime:
695 fixtimestamp(pdir, "")
696 for walkroot, dirs, files in os.walk(pdir):
697 for file in files + dirs:
698 fixtimestamp(walkroot, file)
699
700 d.setVar('SSTATE_BUILDDIR', sstatebuild)
701 d.setVar('SSTATE_INSTDIR', sstatebuild)
702
703 if d.getVar('SSTATE_SKIP_CREATION') == '1':
704 return
705
706 sstate_create_package = ['sstate_report_unihash', 'sstate_create_package']
707 if d.getVar('SSTATE_SIG_KEY'):
708 sstate_create_package.append('sstate_sign_package')
709
710 for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
711 sstate_create_package + \
712 (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
713 # All hooks should run in SSTATE_BUILDDIR.
714 bb.build.exec_func(f, d, (sstatebuild,))
715
716 # SSTATE_PKG may have been changed by sstate_report_unihash
717 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
718 if not os.path.exists(siginfo):
719 bb.siggen.dump_this_task(siginfo, d)
720 else:
721 try:
722 os.utime(siginfo, None)
723 except PermissionError:
724 pass
725 except OSError as e:
726 # Handle read-only file systems gracefully
727 import errno
728 if e.errno != errno.EROFS:
729 raise e
730
731 return
732
733sstate_package[vardepsexclude] += "SSTATE_SIG_KEY"
734
735def pstaging_fetch(sstatefetch, d):
736 import bb.fetch2
737
738 # Only try and fetch if the user has configured a mirror
739 mirrors = d.getVar('SSTATE_MIRRORS')
740 if not mirrors:
741 return
742
743 # Copy the data object and override DL_DIR and SRC_URI
744 localdata = bb.data.createCopy(d)
745
746 dldir = localdata.expand("${SSTATE_DIR}")
747 bb.utils.mkdirhier(dldir)
748
749 localdata.delVar('MIRRORS')
750 localdata.setVar('FILESPATH', dldir)
751 localdata.setVar('DL_DIR', dldir)
752 localdata.setVar('PREMIRRORS', mirrors)
Patrick Williams92b42cb2022-09-03 06:53:57 -0500753
754 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
755 # we'll want to allow network access for the current set of fetches.
756 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
757 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
758 localdata.delVar('BB_NO_NETWORK')
759
760 # Try a fetch from the sstate mirror, if it fails just return and
761 # we will build the package
762 uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
763 'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
764 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
765 uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
766
767 for srcuri in uris:
Andrew Geissler87f5cff2022-09-30 13:13:31 -0500768 localdata.delVar('SRC_URI')
Patrick Williams92b42cb2022-09-03 06:53:57 -0500769 localdata.setVar('SRC_URI', srcuri)
770 try:
771 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
772 fetcher.checkstatus()
773 fetcher.download()
774
775 except bb.fetch2.BBFetchException:
776 pass
777
Patrick Williams92b42cb2022-09-03 06:53:57 -0500778def sstate_setscene(d):
779 shared_state = sstate_state_fromvars(d)
780 accelerate = sstate_installpkg(shared_state, d)
781 if not accelerate:
782 msg = "No sstate archive obtainable, will run full task instead."
783 bb.warn(msg)
784 raise bb.BBHandledException(msg)
785
786python sstate_task_prefunc () {
787 shared_state = sstate_state_fromvars(d)
788 sstate_clean(shared_state, d)
789}
790sstate_task_prefunc[dirs] = "${WORKDIR}"
791
792python sstate_task_postfunc () {
793 shared_state = sstate_state_fromvars(d)
794
795 for intercept in shared_state['interceptfuncs']:
796 bb.build.exec_func(intercept, d, (d.getVar("WORKDIR"),))
797
798 omask = os.umask(0o002)
799 if omask != 0o002:
800 bb.note("Using umask 0o002 (not %0o) for sstate packaging" % omask)
801 sstate_package(shared_state, d)
802 os.umask(omask)
803
804 sstateinst = d.getVar("SSTATE_INSTDIR")
805 d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir'])
806
807 sstate_installpkgdir(shared_state, d)
808
809 bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True)
810}
811sstate_task_postfunc[dirs] = "${WORKDIR}"
812
813
814#
815# Shell function to generate a sstate package from a directory
816# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
817#
818sstate_create_package () {
819 # Exit early if it already exists
820 if [ -e ${SSTATE_PKG} ]; then
821 touch ${SSTATE_PKG} 2>/dev/null || true
822 return
823 fi
824
825 mkdir --mode=0775 -p `dirname ${SSTATE_PKG}`
826 TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
827
828 OPT="-cS"
829 ZSTD="zstd -${SSTATE_ZSTD_CLEVEL} -T${ZSTD_THREADS}"
830 # Use pzstd if available
831 if [ -x "$(command -v pzstd)" ]; then
832 ZSTD="pzstd -${SSTATE_ZSTD_CLEVEL} -p ${ZSTD_THREADS}"
833 fi
834
835 # Need to handle empty directories
836 if [ "$(ls -A)" ]; then
837 set +e
838 tar -I "$ZSTD" $OPT -f $TFILE *
839 ret=$?
840 if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
841 exit 1
842 fi
843 set -e
844 else
845 tar -I "$ZSTD" $OPT --file=$TFILE --files-from=/dev/null
846 fi
847 chmod 0664 $TFILE
848 # Skip if it was already created by some other process
849 if [ -h ${SSTATE_PKG} ] && [ ! -e ${SSTATE_PKG} ]; then
850 # There is a symbolic link, but it links to nothing.
851 # Forcefully replace it with the new file.
852 ln -f $TFILE ${SSTATE_PKG} || true
853 elif [ ! -e ${SSTATE_PKG} ]; then
854 # Move into place using ln to attempt an atomic op.
855 # Abort if it already exists
856 ln $TFILE ${SSTATE_PKG} || true
857 else
858 touch ${SSTATE_PKG} 2>/dev/null || true
859 fi
860 rm $TFILE
861}
862
863python sstate_sign_package () {
864 from oe.gpg_sign import get_signer
865
866
867 signer = get_signer(d, 'local')
868 sstate_pkg = d.getVar('SSTATE_PKG')
869 if os.path.exists(sstate_pkg + '.sig'):
870 os.unlink(sstate_pkg + '.sig')
871 signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
872 d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
873}
874
875python sstate_report_unihash() {
876 report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
877
878 if report_unihash:
879 ss = sstate_state_fromvars(d)
880 report_unihash(os.getcwd(), ss['task'], d)
881}
882
883#
884# Shell function to decompress and prepare a package for installation
885# Will be run from within SSTATE_INSTDIR.
886#
887sstate_unpack_package () {
888 ZSTD="zstd -T${ZSTD_THREADS}"
889 # Use pzstd if available
890 if [ -x "$(command -v pzstd)" ]; then
891 ZSTD="pzstd -p ${ZSTD_THREADS}"
892 fi
893
894 tar -I "$ZSTD" -xvpf ${SSTATE_PKG}
895 # update .siginfo atime on local/NFS mirror if it is a symbolic link
896 [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
897 # update each symbolic link instead of any referenced file
898 touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
899 [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
900 [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
901}
902
903BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
904
905def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
Andrew Geissler517393d2023-01-13 08:55:19 -0600906 import itertools
907
Patrick Williams92b42cb2022-09-03 06:53:57 -0500908 found = set()
909 missed = set()
910
911 def gethash(task):
912 return sq_data['unihash'][task]
913
914 def getpathcomponents(task, d):
915 # Magic data from BB_HASHFILENAME
916 splithashfn = sq_data['hashfn'][task].split(" ")
917 spec = splithashfn[1]
918 if splithashfn[0] == "True":
919 extrapath = d.getVar("NATIVELSBSTRING") + "/"
920 else:
921 extrapath = ""
922
923 tname = bb.runqueue.taskname_from_tid(task)[3:]
924
925 if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
926 spec = splithashfn[2]
927 extrapath = ""
928
929 return spec, extrapath, tname
930
931 def getsstatefile(tid, siginfo, d):
932 spec, extrapath, tname = getpathcomponents(tid, d)
933 return extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d)
934
935 for tid in sq_data['hash']:
936
937 sstatefile = d.expand("${SSTATE_DIR}/" + getsstatefile(tid, siginfo, d))
938
939 if os.path.exists(sstatefile):
940 found.add(tid)
941 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
942 else:
943 missed.add(tid)
944 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
945
946 foundLocal = len(found)
947 mirrors = d.getVar("SSTATE_MIRRORS")
948 if mirrors:
949 # Copy the data object and override DL_DIR and SRC_URI
950 localdata = bb.data.createCopy(d)
951
952 dldir = localdata.expand("${SSTATE_DIR}")
953 localdata.delVar('MIRRORS')
954 localdata.setVar('FILESPATH', dldir)
955 localdata.setVar('DL_DIR', dldir)
956 localdata.setVar('PREMIRRORS', mirrors)
957
958 bb.debug(2, "SState using premirror of: %s" % mirrors)
959
960 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
961 # we'll want to allow network access for the current set of fetches.
962 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
963 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
964 localdata.delVar('BB_NO_NETWORK')
965
966 from bb.fetch2 import FetchConnectionCache
967 def checkstatus_init():
968 while not connection_cache_pool.full():
969 connection_cache_pool.put(FetchConnectionCache())
970
971 def checkstatus_end():
972 while not connection_cache_pool.empty():
973 connection_cache = connection_cache_pool.get()
974 connection_cache.close_connections()
975
976 def checkstatus(arg):
977 (tid, sstatefile) = arg
978
979 connection_cache = connection_cache_pool.get()
980 localdata2 = bb.data.createCopy(localdata)
981 srcuri = "file://" + sstatefile
982 localdata2.setVar('SRC_URI', srcuri)
983 bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
984
985 import traceback
986
987 try:
988 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
989 connection_cache=connection_cache)
990 fetcher.checkstatus()
991 bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
992 found.add(tid)
993 missed.remove(tid)
994 except bb.fetch2.FetchError as e:
995 bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)\n%s" % (srcuri, repr(e), traceback.format_exc()))
996 except Exception as e:
997 bb.error("SState: cannot test %s: %s\n%s" % (srcuri, repr(e), traceback.format_exc()))
998
999 connection_cache_pool.put(connection_cache)
1000
1001 if progress:
Andrew Geissler517393d2023-01-13 08:55:19 -06001002 bb.event.fire(bb.event.ProcessProgress(msg, next(cnt_tasks_done)), d)
Andrew Geissler6aa7eec2023-03-03 12:41:14 -06001003 bb.event.check_for_interrupts(d)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001004
1005 tasklist = []
1006 for tid in missed:
1007 sstatefile = d.expand(getsstatefile(tid, siginfo, d))
1008 tasklist.append((tid, sstatefile))
1009
1010 if tasklist:
1011 nproc = min(int(d.getVar("BB_NUMBER_THREADS")), len(tasklist))
1012
Andrew Geissler517393d2023-01-13 08:55:19 -06001013 ## thread-safe counter
1014 cnt_tasks_done = itertools.count(start = 1)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001015 progress = len(tasklist) >= 100
1016 if progress:
1017 msg = "Checking sstate mirror object availability"
1018 bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
1019
1020 # Have to setup the fetcher environment here rather than in each thread as it would race
1021 fetcherenv = bb.fetch2.get_fetcher_environment(d)
1022 with bb.utils.environment(**fetcherenv):
1023 bb.event.enable_threadlock()
1024 import concurrent.futures
1025 from queue import Queue
1026 connection_cache_pool = Queue(nproc)
1027 checkstatus_init()
1028 with concurrent.futures.ThreadPoolExecutor(max_workers=nproc) as executor:
1029 executor.map(checkstatus, tasklist.copy())
1030 checkstatus_end()
1031 bb.event.disable_threadlock()
1032
1033 if progress:
1034 bb.event.fire(bb.event.ProcessFinished(msg), d)
1035
1036 inheritlist = d.getVar("INHERIT")
1037 if "toaster" in inheritlist:
1038 evdata = {'missed': [], 'found': []};
1039 for tid in missed:
1040 sstatefile = d.expand(getsstatefile(tid, False, d))
1041 evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1042 for tid in found:
1043 sstatefile = d.expand(getsstatefile(tid, False, d))
1044 evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1045 bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
1046
1047 if summary:
1048 # Print some summary statistics about the current task completion and how much sstate
1049 # reuse there was. Avoid divide by zero errors.
1050 total = len(sq_data['hash'])
1051 complete = 0
1052 if currentcount:
1053 complete = (len(found) + currentcount) / (total + currentcount) * 100
1054 match = 0
1055 if total:
1056 match = len(found) / total * 100
1057 bb.plain("Sstate summary: Wanted %d Local %d Mirrors %d Missed %d Current %d (%d%% match, %d%% complete)" %
1058 (total, foundLocal, len(found)-foundLocal, len(missed), currentcount, match, complete))
1059
1060 if hasattr(bb.parse.siggen, "checkhashes"):
1061 bb.parse.siggen.checkhashes(sq_data, missed, found, d)
1062
1063 return found
1064setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT"
1065
1066BB_SETSCENE_DEPVALID = "setscene_depvalid"
1067
1068def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
1069 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
1070 # task is included in taskdependees too
1071 # Return - False - We need this dependency
1072 # - True - We can skip this dependency
1073 import re
1074
1075 def logit(msg, log):
1076 if log is not None:
1077 log.append(msg)
1078 else:
1079 bb.debug(2, msg)
1080
1081 logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
1082
Patrick Williams7784c422022-11-17 07:29:11 -06001083 directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx", "do_deploy_archives"]
Patrick Williams92b42cb2022-09-03 06:53:57 -05001084
1085 def isNativeCross(x):
1086 return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
1087
1088 # We only need to trigger deploy_source_date_epoch through direct dependencies
1089 if taskdependees[task][1] in directtasks:
1090 return True
1091
1092 # We only need to trigger packagedata through direct dependencies
1093 # but need to preserve packagedata on packagedata links
1094 if taskdependees[task][1] == "do_packagedata":
1095 for dep in taskdependees:
1096 if taskdependees[dep][1] == "do_packagedata":
1097 return False
1098 return True
1099
1100 for dep in taskdependees:
1101 logit(" considering dependency: %s" % (str(taskdependees[dep])), log)
1102 if task == dep:
1103 continue
1104 if dep in notneeded:
1105 continue
1106 # do_package_write_* and do_package doesn't need do_package
1107 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
1108 continue
1109 # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
1110 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
1111 return False
1112 # do_package/packagedata/package_qa/deploy don't need do_populate_sysroot
1113 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa', 'do_deploy']:
1114 continue
1115 # Native/Cross packages don't exist and are noexec anyway
1116 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
1117 continue
1118
1119 # This is due to the [depends] in useradd.bbclass complicating matters
1120 # The logic *is* reversed here due to the way hard setscene dependencies are injected
1121 if (taskdependees[task][1] == 'do_package' or taskdependees[task][1] == 'do_populate_sysroot') and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
1122 continue
1123
1124 # Consider sysroot depending on sysroot tasks
1125 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
1126 # Allow excluding certain recursive dependencies. If a recipe needs it should add a
1127 # specific dependency itself, rather than relying on one of its dependees to pull
1128 # them in.
1129 # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html
1130 not_needed = False
1131 excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT')
1132 if excludedeps is None:
1133 # Cache the regular expressions for speed
1134 excludedeps = []
1135 for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split():
1136 excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1])))
1137 d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps)
1138 for excl in excludedeps:
1139 if excl[0].match(taskdependees[dep][0]):
1140 if excl[1].match(taskdependees[task][0]):
1141 not_needed = True
1142 break
1143 if not_needed:
1144 continue
1145 # For meta-extsdk-toolchain we want all sysroot dependencies
1146 if taskdependees[dep][0] == 'meta-extsdk-toolchain':
1147 return False
1148 # Native/Cross populate_sysroot need their dependencies
1149 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
1150 return False
1151 # Target populate_sysroot depended on by cross tools need to be installed
1152 if isNativeCross(taskdependees[dep][0]):
1153 return False
1154 # Native/cross tools depended upon by target sysroot are not needed
1155 # Add an exception for shadow-native as required by useradd.bbclass
1156 if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
1157 continue
1158 # Target populate_sysroot need their dependencies
1159 return False
1160
1161 if taskdependees[dep][1] in directtasks:
1162 continue
1163
1164 # Safe fallthrough default
1165 logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
1166 return False
1167 return True
1168
1169addhandler sstate_eventhandler
1170sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
1171python sstate_eventhandler() {
1172 d = e.data
1173 writtensstate = d.getVar('SSTATE_CURRTASK')
1174 if not writtensstate:
1175 taskname = d.getVar("BB_RUNTASK")[3:]
1176 spec = d.getVar('SSTATE_PKGSPEC')
1177 swspec = d.getVar('SSTATE_SWSPEC')
1178 if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
1179 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
1180 d.setVar("SSTATE_EXTRAPATH", "")
1181 d.setVar("SSTATE_CURRTASK", taskname)
1182 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
1183 if not os.path.exists(siginfo):
1184 bb.siggen.dump_this_task(siginfo, d)
1185 else:
1186 try:
1187 os.utime(siginfo, None)
1188 except PermissionError:
1189 pass
1190 except OSError as e:
1191 # Handle read-only file systems gracefully
1192 import errno
1193 if e.errno != errno.EROFS:
1194 raise e
1195
1196}
1197
1198SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
1199
1200#
1201# Event handler which removes manifests and stamps file for recipes which are no
1202# longer 'reachable' in a build where they once were. 'Reachable' refers to
1203# whether a recipe is parsed so recipes in a layer which was removed would no
1204# longer be reachable. Switching between systemd and sysvinit where recipes
1205# became skipped would be another example.
1206#
1207# Also optionally removes the workdir of those tasks/recipes
1208#
1209addhandler sstate_eventhandler_reachablestamps
1210sstate_eventhandler_reachablestamps[eventmask] = "bb.event.ReachableStamps"
1211python sstate_eventhandler_reachablestamps() {
1212 import glob
1213 d = e.data
1214 stamps = e.stamps.values()
1215 removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
1216 preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps')
1217 preservestamps = []
1218 if os.path.exists(preservestampfile):
1219 with open(preservestampfile, 'r') as f:
1220 preservestamps = f.readlines()
1221 seen = []
1222
1223 # The machine index contains all the stamps this machine has ever seen in this build directory.
1224 # We should only remove things which this machine once accessed but no longer does.
1225 machineindex = set()
1226 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1227 mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}")
1228 if os.path.exists(mi):
1229 with open(mi, "r") as f:
1230 machineindex = set(line.strip() for line in f.readlines())
1231
1232 for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
1233 toremove = []
1234 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1235 if not os.path.exists(i):
1236 continue
1237 manseen = set()
1238 ignore = []
1239 with open(i, "r") as f:
1240 lines = f.readlines()
1241 for l in reversed(lines):
1242 try:
1243 (stamp, manifest, workdir) = l.split()
1244 # The index may have multiple entries for the same manifest as the code above only appends
1245 # new entries and there may be an entry with matching manifest but differing version in stamp/workdir.
1246 # The last entry in the list is the valid one, any earlier entries with matching manifests
1247 # should be ignored.
1248 if manifest in manseen:
1249 ignore.append(l)
1250 continue
1251 manseen.add(manifest)
1252 if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
1253 toremove.append(l)
1254 if stamp not in seen:
1255 bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
1256 seen.append(stamp)
1257 except ValueError:
1258 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1259
1260 if toremove:
1261 msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
1262 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1263
1264 removed = 0
1265 for r in toremove:
1266 (stamp, manifest, workdir) = r.split()
1267 for m in glob.glob(manifest + ".*"):
1268 if m.endswith(".postrm"):
1269 continue
1270 sstate_clean_manifest(m, d)
1271 bb.utils.remove(stamp + "*")
1272 if removeworkdir:
1273 bb.utils.remove(workdir, recurse = True)
1274 lines.remove(r)
1275 removed = removed + 1
1276 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
Andrew Geissler6aa7eec2023-03-03 12:41:14 -06001277 bb.event.check_for_interrupts(d)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001278
1279 bb.event.fire(bb.event.ProcessFinished(msg), d)
1280
1281 with open(i, "w") as f:
1282 for l in lines:
1283 if l in ignore:
1284 continue
1285 f.write(l)
1286 machineindex |= set(stamps)
1287 with open(mi, "w") as f:
1288 for l in machineindex:
1289 f.write(l + "\n")
1290
1291 if preservestamps:
1292 os.remove(preservestampfile)
1293}
1294
1295
1296#
1297# Bitbake can generate an event showing which setscene tasks are 'stale',
1298# i.e. which ones will be rerun. These are ones where a stamp file is present but
1299# it is stable (e.g. taskhash doesn't match). With that list we can go through
1300# the manifests for matching tasks and "uninstall" those manifests now. We do
1301# this now rather than mid build since the distribution of files between sstate
1302# objects may have changed, new tasks may run first and if those new tasks overlap
1303# with the stale tasks, we'd see overlapping files messages and failures. Thankfully
1304# removing these files is fast.
1305#
1306addhandler sstate_eventhandler_stalesstate
1307sstate_eventhandler_stalesstate[eventmask] = "bb.event.StaleSetSceneTasks"
1308python sstate_eventhandler_stalesstate() {
1309 d = e.data
1310 tasks = e.tasks
1311
1312 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1313
1314 for a in list(set(d.getVar("SSTATE_ARCHS").split())):
1315 toremove = []
1316 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1317 if not os.path.exists(i):
1318 continue
1319 with open(i, "r") as f:
1320 lines = f.readlines()
1321 for l in lines:
1322 try:
1323 (stamp, manifest, workdir) = l.split()
1324 for tid in tasks:
1325 for s in tasks[tid]:
1326 if s.startswith(stamp):
1327 taskname = bb.runqueue.taskname_from_tid(tid)[3:]
1328 manname = manifest + "." + taskname
1329 if os.path.exists(manname):
1330 bb.debug(2, "Sstate for %s is stale, removing related manifest %s" % (tid, manname))
1331 toremove.append((manname, tid, tasks[tid]))
1332 break
1333 except ValueError:
1334 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1335
1336 if toremove:
1337 msg = "Removing %d stale sstate objects for arch %s" % (len(toremove), a)
1338 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1339
1340 removed = 0
1341 for (manname, tid, stamps) in toremove:
1342 sstate_clean_manifest(manname, d)
1343 for stamp in stamps:
1344 bb.utils.remove(stamp)
1345 removed = removed + 1
1346 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
Andrew Geissler6aa7eec2023-03-03 12:41:14 -06001347 bb.event.check_for_interrupts(d)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001348
1349 bb.event.fire(bb.event.ProcessFinished(msg), d)
1350}