blob: efe7f697752072afb5fcae02d7f44031213c9869 [file] [log] [blame]
Patrick Williams92b42cb2022-09-03 06:53:57 -05001#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
Andrew Geisslerfc113ea2023-03-31 09:59:46 -05007SSTATE_VERSION = "11"
Patrick Williams92b42cb2022-09-03 06:53:57 -05008
9SSTATE_ZSTD_CLEVEL ??= "8"
10
11SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
12SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
13
14def generate_sstatefn(spec, hash, taskname, siginfo, d):
15 if taskname is None:
16 return ""
17 extension = ".tar.zst"
18 # 8 chars reserved for siginfo
19 limit = 254 - 8
20 if siginfo:
21 limit = 254
22 extension = ".tar.zst.siginfo"
23 if not hash:
24 hash = "INVALID"
25 fn = spec + hash + "_" + taskname + extension
26 # If the filename is too long, attempt to reduce it
27 if len(fn) > limit:
28 components = spec.split(":")
29 # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
30 # 7 is for the separators
31 avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
32 components[2] = components[2][:avail]
33 components[3] = components[3][:avail]
34 components[4] = components[4][:avail]
35 spec = ":".join(components)
36 fn = spec + hash + "_" + taskname + extension
37 if len(fn) > limit:
38 bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
39 return hash[:2] + "/" + hash[2:4] + "/" + fn
40
41SSTATE_PKGARCH = "${PACKAGE_ARCH}"
42SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
43SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
44SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
45SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
46SSTATE_EXTRAPATH = ""
47SSTATE_EXTRAPATHWILDCARD = ""
48SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tar.zst*"
49
50# explicitly make PV to depend on evaluated value of PV variable
51PV[vardepvalue] = "${PV}"
52
53# We don't want the sstate to depend on things like the distro string
54# of the system, we let the sstate paths take care of this.
55SSTATE_EXTRAPATH[vardepvalue] = ""
56SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
57
Patrick Williams92b42cb2022-09-03 06:53:57 -050058# Avoid docbook/sgml catalog warnings for now
59SSTATE_ALLOW_OVERLAP_FILES += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
60# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
61SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
62SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
63# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
64SSTATE_ALLOW_OVERLAP_FILES += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
65# Archive the sources for many architectures in one deploy folder
66SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}"
67# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
68SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/ovmf"
69SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/grub-efi"
70SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/systemd-boot"
71SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/microcode"
72
73SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
74SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
75SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
76SSTATE_HASHEQUIV_FILEMAP ?= " \
77 populate_sysroot:*/postinst-useradd-*:${TMPDIR} \
78 populate_sysroot:*/postinst-useradd-*:${COREBASE} \
79 populate_sysroot:*/postinst-useradd-*:regex-\s(PATH|PSEUDO_IGNORE_PATHS|HOME|LOGNAME|OMP_NUM_THREADS|USER)=.*\s \
80 populate_sysroot:*/crossscripts/*:${TMPDIR} \
81 populate_sysroot:*/crossscripts/*:${COREBASE} \
82 "
83
84BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
85
Andrew Geissler220dafd2023-10-04 10:18:08 -050086SSTATE_ARCHS_TUNEPKG ??= "${TUNE_PKGARCH}"
Patrick Williams92b42cb2022-09-03 06:53:57 -050087SSTATE_ARCHS = " \
88 ${BUILD_ARCH} \
89 ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
90 ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
91 ${SDK_ARCH}_${SDK_OS} \
Andrew Geissler220dafd2023-10-04 10:18:08 -050092 ${SDK_ARCH}_${SDK_ARCH}-${SDKPKGSUFFIX} \
Patrick Williams92b42cb2022-09-03 06:53:57 -050093 allarch \
Andrew Geissler220dafd2023-10-04 10:18:08 -050094 ${SSTATE_ARCHS_TUNEPKG} \
Patrick Williams92b42cb2022-09-03 06:53:57 -050095 ${PACKAGE_EXTRA_ARCHS} \
96 ${MACHINE_ARCH}"
97SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
98
99SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
100
101SSTATECREATEFUNCS += "sstate_hardcode_path"
102SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
103SSTATEPOSTCREATEFUNCS = ""
104SSTATEPREINSTFUNCS = ""
105SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
106SSTATEPOSTINSTFUNCS = ""
107EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
108
109# Check whether sstate exists for tasks that support sstate and are in the
110# locked signatures file.
111SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error'
112
113# Check whether the task's computed hash matches the task's hash in the
114# locked signatures file.
115SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error"
116
117# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
118# not sign)
119SSTATE_SIG_KEY ?= ""
120SSTATE_SIG_PASSPHRASE ?= ""
121# Whether to verify the GnUPG signatures when extracting sstate archives
122SSTATE_VERIFY_SIG ?= "0"
123# List of signatures to consider valid.
124SSTATE_VALID_SIGS ??= ""
125SSTATE_VALID_SIGS[vardepvalue] = ""
126
127SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
128SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
129 the output hash for a task, which in turn is used to determine equivalency. \
130 "
131
132SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
133SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
134 hash equivalency server, such as PN, PV, taskname, etc. This information \
135 is very useful for developers looking at task data, but may leak sensitive \
136 data if the equivalence server is public. \
137 "
138
139python () {
140 if bb.data.inherits_class('native', d):
141 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
142 elif bb.data.inherits_class('crosssdk', d):
143 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
144 elif bb.data.inherits_class('cross', d):
145 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}"))
146 elif bb.data.inherits_class('nativesdk', d):
147 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
148 elif bb.data.inherits_class('cross-canadian', d):
149 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
150 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all":
151 d.setVar('SSTATE_PKGARCH', "allarch")
152 else:
153 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
154
155 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
156 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
157 d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
158 d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
159
160 unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
161 d.setVar('SSTATETASKS', " ".join(unique_tasks))
162 for task in unique_tasks:
163 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
164 d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
165 d.setVarFlag(task, 'network', '1')
166 d.setVarFlag(task + "_setscene", 'network', '1')
167}
168
169def sstate_init(task, d):
170 ss = {}
171 ss['task'] = task
172 ss['dirs'] = []
173 ss['plaindirs'] = []
174 ss['lockfiles'] = []
175 ss['lockfiles-shared'] = []
176 return ss
177
178def sstate_state_fromvars(d, task = None):
179 if task is None:
180 task = d.getVar('BB_CURRENTTASK')
181 if not task:
182 bb.fatal("sstate code running without task context?!")
183 task = task.replace("_setscene", "")
184
185 if task.startswith("do_"):
186 task = task[3:]
187 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split()
188 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split()
189 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split()
190 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split()
191 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split()
192 interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs') or "").split()
193 fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or ""
194 if not task or len(inputs) != len(outputs):
195 bb.fatal("sstate variables not setup correctly?!")
196
197 if task == "populate_lic":
198 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
199 d.setVar("SSTATE_EXTRAPATH", "")
200 d.setVar('SSTATE_EXTRAPATHWILDCARD', "")
201
202 ss = sstate_init(task, d)
203 for i in range(len(inputs)):
204 sstate_add(ss, inputs[i], outputs[i], d)
205 ss['lockfiles'] = lockfiles
206 ss['lockfiles-shared'] = lockfilesshared
207 ss['plaindirs'] = plaindirs
208 ss['interceptfuncs'] = interceptfuncs
209 ss['fixmedir'] = fixmedir
210 return ss
211
212def sstate_add(ss, source, dest, d):
213 if not source.endswith("/"):
214 source = source + "/"
215 if not dest.endswith("/"):
216 dest = dest + "/"
217 source = os.path.normpath(source)
218 dest = os.path.normpath(dest)
219 srcbase = os.path.basename(source)
220 ss['dirs'].append([srcbase, source, dest])
221 return ss
222
223def sstate_install(ss, d):
224 import oe.path
225 import oe.sstatesig
226 import subprocess
227
228 sharedfiles = []
229 shareddirs = []
230 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
231
232 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
233
234 manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
235
236 if os.access(manifest, os.R_OK):
237 bb.fatal("Package already staged (%s)?!" % manifest)
238
239 d.setVar("SSTATE_INST_POSTRM", manifest + ".postrm")
240
241 locks = []
242 for lock in ss['lockfiles-shared']:
243 locks.append(bb.utils.lockfile(lock, True))
244 for lock in ss['lockfiles']:
245 locks.append(bb.utils.lockfile(lock))
246
247 for state in ss['dirs']:
248 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
249 for walkroot, dirs, files in os.walk(state[1]):
250 for file in files:
251 srcpath = os.path.join(walkroot, file)
252 dstpath = srcpath.replace(state[1], state[2])
253 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
254 sharedfiles.append(dstpath)
255 for dir in dirs:
256 srcdir = os.path.join(walkroot, dir)
257 dstdir = srcdir.replace(state[1], state[2])
258 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
259 if os.path.islink(srcdir):
260 sharedfiles.append(dstdir)
261 continue
262 if not dstdir.endswith("/"):
263 dstdir = dstdir + "/"
264 shareddirs.append(dstdir)
265
266 # Check the file list for conflicts against files which already exist
267 overlap_allowed = (d.getVar("SSTATE_ALLOW_OVERLAP_FILES") or "").split()
268 match = []
269 for f in sharedfiles:
Andrew Geissler220dafd2023-10-04 10:18:08 -0500270 if os.path.exists(f):
Patrick Williams92b42cb2022-09-03 06:53:57 -0500271 f = os.path.normpath(f)
272 realmatch = True
273 for w in overlap_allowed:
274 w = os.path.normpath(w)
275 if f.startswith(w):
276 realmatch = False
277 break
278 if realmatch:
279 match.append(f)
Andrew Geissler220dafd2023-10-04 10:18:08 -0500280 sstate_search_cmd = "grep -rlF '%s' %s --exclude=index-* | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
Patrick Williams92b42cb2022-09-03 06:53:57 -0500281 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
282 if search_output:
283 match.append(" (matched in %s)" % search_output.decode('utf-8').rstrip())
284 else:
285 match.append(" (not matched to any task)")
286 if match:
Andrew Geissler220dafd2023-10-04 10:18:08 -0500287 bb.fatal("Recipe %s is trying to install files into a shared " \
288 "area when those files already exist. The files and the manifests listing " \
289 "them are:\n %s\n"
290 "Please adjust the recipes so only one recipe provides a given file. " % \
Patrick Williams92b42cb2022-09-03 06:53:57 -0500291 (d.getVar('PN'), "\n ".join(match)))
Patrick Williams92b42cb2022-09-03 06:53:57 -0500292
293 if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"):
294 sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd")
295 sharedfiles.append(ss['fixmedir'] + "/fixmepath")
296
297 # Write out the manifest
298 f = open(manifest, "w")
299 for file in sharedfiles:
300 f.write(file + "\n")
301
302 # We want to ensure that directories appear at the end of the manifest
303 # so that when we test to see if they should be deleted any contents
304 # added by the task will have been removed first.
305 dirs = sorted(shareddirs, key=len)
306 # Must remove children first, which will have a longer path than the parent
307 for di in reversed(dirs):
308 f.write(di + "\n")
309 f.close()
310
311 # Append to the list of manifests for this PACKAGE_ARCH
312
313 i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
314 l = bb.utils.lockfile(i + ".lock")
315 filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n"
316 manifests = []
317 if os.path.exists(i):
318 with open(i, "r") as f:
319 manifests = f.readlines()
320 # We append new entries, we don't remove older entries which may have the same
321 # manifest name but different versions from stamp/workdir. See below.
322 if filedata not in manifests:
323 with open(i, "a+") as f:
324 f.write(filedata)
325 bb.utils.unlockfile(l)
326
327 # Run the actual file install
328 for state in ss['dirs']:
329 if os.path.exists(state[1]):
330 oe.path.copyhardlinktree(state[1], state[2])
331
332 for postinst in (d.getVar('SSTATEPOSTINSTFUNCS') or '').split():
333 # All hooks should run in the SSTATE_INSTDIR
334 bb.build.exec_func(postinst, d, (sstateinst,))
335
336 for lock in locks:
337 bb.utils.unlockfile(lock)
338
Patrick Williams169d7bc2024-01-05 11:33:25 -0600339sstate_install[vardepsexclude] += "SSTATE_ALLOW_OVERLAP_FILES SSTATE_MANMACH SSTATE_MANFILEPREFIX"
Patrick Williams92b42cb2022-09-03 06:53:57 -0500340sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
341
342def sstate_installpkg(ss, d):
343 from oe.gpg_sign import get_signer
344
345 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
346 d.setVar("SSTATE_CURRTASK", ss['task'])
347 sstatefetch = d.getVar('SSTATE_PKGNAME')
348 sstatepkg = d.getVar('SSTATE_PKG')
Patrick Williams864cc432023-02-09 14:54:44 -0600349 verify_sig = bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False)
Patrick Williams92b42cb2022-09-03 06:53:57 -0500350
Patrick Williams864cc432023-02-09 14:54:44 -0600351 if not os.path.exists(sstatepkg) or (verify_sig and not os.path.exists(sstatepkg + '.sig')):
Patrick Williams92b42cb2022-09-03 06:53:57 -0500352 pstaging_fetch(sstatefetch, d)
353
354 if not os.path.isfile(sstatepkg):
355 bb.note("Sstate package %s does not exist" % sstatepkg)
356 return False
357
358 sstate_clean(ss, d)
359
360 d.setVar('SSTATE_INSTDIR', sstateinst)
361
Patrick Williams864cc432023-02-09 14:54:44 -0600362 if verify_sig:
Patrick Williams92b42cb2022-09-03 06:53:57 -0500363 if not os.path.isfile(sstatepkg + '.sig'):
364 bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
365 return False
366 signer = get_signer(d, 'local')
367 if not signer.verify(sstatepkg + '.sig', d.getVar("SSTATE_VALID_SIGS")):
368 bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
369 return False
370
371 # Empty sstateinst directory, ensure its clean
372 if os.path.exists(sstateinst):
373 oe.path.remove(sstateinst)
374 bb.utils.mkdirhier(sstateinst)
375
376 sstateinst = d.getVar("SSTATE_INSTDIR")
377 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
378
379 for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']:
380 # All hooks should run in the SSTATE_INSTDIR
381 bb.build.exec_func(f, d, (sstateinst,))
382
383 return sstate_installpkgdir(ss, d)
384
385def sstate_installpkgdir(ss, d):
386 import oe.path
387 import subprocess
388
389 sstateinst = d.getVar("SSTATE_INSTDIR")
390 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
391
392 for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split():
393 # All hooks should run in the SSTATE_INSTDIR
394 bb.build.exec_func(f, d, (sstateinst,))
395
396 def prepdir(dir):
397 # remove dir if it exists, ensure any parent directories do exist
398 if os.path.exists(dir):
399 oe.path.remove(dir)
400 bb.utils.mkdirhier(dir)
401 oe.path.remove(dir)
402
403 for state in ss['dirs']:
404 prepdir(state[1])
405 bb.utils.rename(sstateinst + state[0], state[1])
406 sstate_install(ss, d)
407
408 for plain in ss['plaindirs']:
409 workdir = d.getVar('WORKDIR')
410 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
411 src = sstateinst + "/" + plain.replace(workdir, '')
412 if sharedworkdir in plain:
413 src = sstateinst + "/" + plain.replace(sharedworkdir, '')
414 dest = plain
415 bb.utils.mkdirhier(src)
416 prepdir(dest)
417 bb.utils.rename(src, dest)
418
419 return True
420
421python sstate_hardcode_path_unpack () {
422 # Fixup hardcoded paths
423 #
424 # Note: The logic below must match the reverse logic in
425 # sstate_hardcode_path(d)
426 import subprocess
427
428 sstateinst = d.getVar('SSTATE_INSTDIR')
429 sstatefixmedir = d.getVar('SSTATE_FIXMEDIR')
430 fixmefn = sstateinst + "fixmepath"
431 if os.path.isfile(fixmefn):
432 staging_target = d.getVar('RECIPE_SYSROOT')
433 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
434
435 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
436 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
437 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
438 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host)
439 else:
440 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target)
441
442 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
443 for fixmevar in extra_staging_fixmes.split():
444 fixme_path = d.getVar(fixmevar)
445 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
446
447 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
448 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
449
450 # Defer do_populate_sysroot relocation command
451 if sstatefixmedir:
452 bb.utils.mkdirhier(sstatefixmedir)
453 with open(sstatefixmedir + "/fixmepath.cmd", "w") as f:
454 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath")
455 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST")
456 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST")
457 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET")
458 f.write(sstate_hardcode_cmd)
459 bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath")
460 return
461
462 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
463 subprocess.check_call(sstate_hardcode_cmd, shell=True)
464
465 # Need to remove this or we'd copy it into the target directory and may
466 # conflict with another writer
467 os.remove(fixmefn)
468}
469
470def sstate_clean_cachefile(ss, d):
471 import oe.path
472
473 if d.getVarFlag('do_%s' % ss['task'], 'task'):
474 d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
475 sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
476 bb.note("Removing %s" % sstatepkgfile)
477 oe.path.remove(sstatepkgfile)
478
479def sstate_clean_cachefiles(d):
480 for task in (d.getVar('SSTATETASKS') or "").split():
481 ld = d.createCopy()
482 ss = sstate_state_fromvars(ld, task)
483 sstate_clean_cachefile(ss, ld)
484
485def sstate_clean_manifest(manifest, d, canrace=False, prefix=None):
486 import oe.path
487
488 mfile = open(manifest)
489 entries = mfile.readlines()
490 mfile.close()
491
492 for entry in entries:
493 entry = entry.strip()
494 if prefix and not entry.startswith("/"):
495 entry = prefix + "/" + entry
496 bb.debug(2, "Removing manifest: %s" % entry)
497 # We can race against another package populating directories as we're removing them
498 # so we ignore errors here.
499 try:
500 if entry.endswith("/"):
501 if os.path.islink(entry[:-1]):
502 os.remove(entry[:-1])
503 elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace:
504 # Removing directories whilst builds are in progress exposes a race. Only
505 # do it in contexts where it is safe to do so.
506 os.rmdir(entry[:-1])
507 else:
508 os.remove(entry)
509 except OSError:
510 pass
511
512 postrm = manifest + ".postrm"
513 if os.path.exists(manifest + ".postrm"):
514 import subprocess
515 os.chmod(postrm, 0o755)
516 subprocess.check_call(postrm, shell=True)
517 oe.path.remove(postrm)
518
519 oe.path.remove(manifest)
520
521def sstate_clean(ss, d):
522 import oe.path
523 import glob
524
525 d2 = d.createCopy()
526 stamp_clean = d.getVar("STAMPCLEAN")
527 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info')
528 if extrainf:
529 d2.setVar("SSTATE_MANMACH", extrainf)
530 wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
531 else:
532 wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
533
534 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
535
536 if os.path.exists(manifest):
537 locks = []
538 for lock in ss['lockfiles-shared']:
539 locks.append(bb.utils.lockfile(lock))
540 for lock in ss['lockfiles']:
541 locks.append(bb.utils.lockfile(lock))
542
543 sstate_clean_manifest(manifest, d, canrace=True)
544
545 for lock in locks:
546 bb.utils.unlockfile(lock)
547
548 # Remove the current and previous stamps, but keep the sigdata.
549 #
550 # The glob() matches do_task* which may match multiple tasks, for
551 # example: do_package and do_package_write_ipk, so we need to
552 # exactly match *.do_task.* and *.do_task_setscene.*
553 rm_stamp = '.do_%s.' % ss['task']
554 rm_setscene = '.do_%s_setscene.' % ss['task']
555 # For BB_SIGNATURE_HANDLER = "noop"
556 rm_nohash = ".do_%s" % ss['task']
557 for stfile in glob.glob(wildcard_stfile):
558 # Keep the sigdata
559 if ".sigdata." in stfile or ".sigbasedata." in stfile:
560 continue
561 # Preserve taint files in the stamps directory
562 if stfile.endswith('.taint'):
563 continue
564 if rm_stamp in stfile or rm_setscene in stfile or \
565 stfile.endswith(rm_nohash):
566 oe.path.remove(stfile)
567
568sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
569
570CLEANFUNCS += "sstate_cleanall"
571
572python sstate_cleanall() {
573 bb.note("Removing shared state for package %s" % d.getVar('PN'))
574
575 manifest_dir = d.getVar('SSTATE_MANIFESTS')
576 if not os.path.exists(manifest_dir):
577 return
578
579 tasks = d.getVar('SSTATETASKS').split()
580 for name in tasks:
581 ld = d.createCopy()
582 shared_state = sstate_state_fromvars(ld, name)
583 sstate_clean(shared_state, ld)
584}
585
586python sstate_hardcode_path () {
587 import subprocess, platform
588
589 # Need to remove hardcoded paths and fix these when we install the
590 # staging packages.
591 #
592 # Note: the logic in this function needs to match the reverse logic
593 # in sstate_installpkg(ss, d)
594
595 staging_target = d.getVar('RECIPE_SYSROOT')
596 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
597 sstate_builddir = d.getVar('SSTATE_BUILDDIR')
598
599 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host
600 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
601 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
602 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
603 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
604 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
605 else:
606 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
607 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
608
609 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
610 for fixmevar in extra_staging_fixmes.split():
611 fixme_path = d.getVar(fixmevar)
612 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
613 sstate_grep_cmd += " -e '%s'" % (fixme_path)
614
615 fixmefn = sstate_builddir + "fixmepath"
616
617 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD')
618 sstate_filelist_cmd = "tee %s" % (fixmefn)
619
620 # fixmepath file needs relative paths, drop sstate_builddir prefix
621 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
622
623 xargs_no_empty_run_cmd = '--no-run-if-empty'
624 if platform.system() == 'Darwin':
625 xargs_no_empty_run_cmd = ''
626
627 # Limit the fixpaths and sed operations based on the initial grep search
628 # This has the side effect of making sure the vfs cache is hot
629 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
630
631 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
632 subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir)
633
634 # If the fixmefn is empty, remove it..
635 if os.stat(fixmefn).st_size == 0:
636 os.remove(fixmefn)
637 else:
638 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
639 subprocess.check_output(sstate_filelist_relative_cmd, shell=True)
640}
641
642def sstate_package(ss, d):
643 import oe.path
644 import time
645
646 tmpdir = d.getVar('TMPDIR')
647
648 fixtime = False
649 if ss['task'] == "package":
650 fixtime = True
651
652 def fixtimestamp(root, path):
653 f = os.path.join(root, path)
654 if os.lstat(f).st_mtime > sde:
655 os.utime(f, (sde, sde), follow_symlinks=False)
656
657 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
658 sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
659 d.setVar("SSTATE_CURRTASK", ss['task'])
660 bb.utils.remove(sstatebuild, recurse=True)
661 bb.utils.mkdirhier(sstatebuild)
662 for state in ss['dirs']:
663 if not os.path.exists(state[1]):
664 continue
665 srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
666 # Find and error for absolute symlinks. We could attempt to relocate but its not
667 # clear where the symlink is relative to in this context. We could add that markup
668 # to sstate tasks but there aren't many of these so better just avoid them entirely.
669 for walkroot, dirs, files in os.walk(state[1]):
670 for file in files + dirs:
671 if fixtime:
672 fixtimestamp(walkroot, file)
673 srcpath = os.path.join(walkroot, file)
674 if not os.path.islink(srcpath):
675 continue
676 link = os.readlink(srcpath)
677 if not os.path.isabs(link):
678 continue
679 if not link.startswith(tmpdir):
680 continue
681 bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
682 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
683 bb.utils.rename(state[1], sstatebuild + state[0])
684
685 workdir = d.getVar('WORKDIR')
686 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
687 for plain in ss['plaindirs']:
688 pdir = plain.replace(workdir, sstatebuild)
689 if sharedworkdir in plain:
690 pdir = plain.replace(sharedworkdir, sstatebuild)
691 bb.utils.mkdirhier(plain)
692 bb.utils.mkdirhier(pdir)
693 bb.utils.rename(plain, pdir)
694 if fixtime:
695 fixtimestamp(pdir, "")
696 for walkroot, dirs, files in os.walk(pdir):
697 for file in files + dirs:
698 fixtimestamp(walkroot, file)
699
700 d.setVar('SSTATE_BUILDDIR', sstatebuild)
701 d.setVar('SSTATE_INSTDIR', sstatebuild)
702
703 if d.getVar('SSTATE_SKIP_CREATION') == '1':
704 return
705
Patrick Williams169d7bc2024-01-05 11:33:25 -0600706 sstate_create_package = ['sstate_report_unihash', 'sstate_create_pkgdirs', 'sstate_create_package']
Patrick Williams92b42cb2022-09-03 06:53:57 -0500707 if d.getVar('SSTATE_SIG_KEY'):
708 sstate_create_package.append('sstate_sign_package')
709
710 for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
711 sstate_create_package + \
712 (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
713 # All hooks should run in SSTATE_BUILDDIR.
714 bb.build.exec_func(f, d, (sstatebuild,))
715
716 # SSTATE_PKG may have been changed by sstate_report_unihash
717 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
718 if not os.path.exists(siginfo):
719 bb.siggen.dump_this_task(siginfo, d)
720 else:
721 try:
722 os.utime(siginfo, None)
723 except PermissionError:
724 pass
725 except OSError as e:
726 # Handle read-only file systems gracefully
727 import errno
728 if e.errno != errno.EROFS:
729 raise e
730
731 return
732
733sstate_package[vardepsexclude] += "SSTATE_SIG_KEY"
734
735def pstaging_fetch(sstatefetch, d):
736 import bb.fetch2
737
738 # Only try and fetch if the user has configured a mirror
739 mirrors = d.getVar('SSTATE_MIRRORS')
740 if not mirrors:
741 return
742
743 # Copy the data object and override DL_DIR and SRC_URI
744 localdata = bb.data.createCopy(d)
745
746 dldir = localdata.expand("${SSTATE_DIR}")
747 bb.utils.mkdirhier(dldir)
748
749 localdata.delVar('MIRRORS')
750 localdata.setVar('FILESPATH', dldir)
751 localdata.setVar('DL_DIR', dldir)
752 localdata.setVar('PREMIRRORS', mirrors)
Patrick Williams92b42cb2022-09-03 06:53:57 -0500753
754 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
755 # we'll want to allow network access for the current set of fetches.
756 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
757 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
758 localdata.delVar('BB_NO_NETWORK')
759
760 # Try a fetch from the sstate mirror, if it fails just return and
761 # we will build the package
762 uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
763 'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
764 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
765 uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
766
767 for srcuri in uris:
Andrew Geissler87f5cff2022-09-30 13:13:31 -0500768 localdata.delVar('SRC_URI')
Patrick Williams92b42cb2022-09-03 06:53:57 -0500769 localdata.setVar('SRC_URI', srcuri)
770 try:
771 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
772 fetcher.checkstatus()
773 fetcher.download()
774
775 except bb.fetch2.BBFetchException:
776 pass
777
Patrick Williams92b42cb2022-09-03 06:53:57 -0500778def sstate_setscene(d):
779 shared_state = sstate_state_fromvars(d)
780 accelerate = sstate_installpkg(shared_state, d)
781 if not accelerate:
782 msg = "No sstate archive obtainable, will run full task instead."
783 bb.warn(msg)
784 raise bb.BBHandledException(msg)
785
786python sstate_task_prefunc () {
787 shared_state = sstate_state_fromvars(d)
788 sstate_clean(shared_state, d)
789}
790sstate_task_prefunc[dirs] = "${WORKDIR}"
791
792python sstate_task_postfunc () {
793 shared_state = sstate_state_fromvars(d)
794
795 for intercept in shared_state['interceptfuncs']:
796 bb.build.exec_func(intercept, d, (d.getVar("WORKDIR"),))
797
798 omask = os.umask(0o002)
799 if omask != 0o002:
800 bb.note("Using umask 0o002 (not %0o) for sstate packaging" % omask)
801 sstate_package(shared_state, d)
802 os.umask(omask)
803
804 sstateinst = d.getVar("SSTATE_INSTDIR")
805 d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir'])
806
807 sstate_installpkgdir(shared_state, d)
808
809 bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True)
810}
811sstate_task_postfunc[dirs] = "${WORKDIR}"
812
Patrick Williams169d7bc2024-01-05 11:33:25 -0600813python sstate_create_pkgdirs () {
814 # report_unihash can change SSTATE_PKG and mkdir -p in shell doesn't own intermediate directories
815 # correctly so do this in an intermediate python task
816 with bb.utils.umask(0o002):
817 bb.utils.mkdirhier(os.path.dirname(d.getVar('SSTATE_PKG')))
818}
Patrick Williams92b42cb2022-09-03 06:53:57 -0500819
820#
821# Shell function to generate a sstate package from a directory
822# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
823#
824sstate_create_package () {
825 # Exit early if it already exists
826 if [ -e ${SSTATE_PKG} ]; then
827 touch ${SSTATE_PKG} 2>/dev/null || true
828 return
829 fi
830
Patrick Williams92b42cb2022-09-03 06:53:57 -0500831 TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
832
833 OPT="-cS"
834 ZSTD="zstd -${SSTATE_ZSTD_CLEVEL} -T${ZSTD_THREADS}"
835 # Use pzstd if available
836 if [ -x "$(command -v pzstd)" ]; then
837 ZSTD="pzstd -${SSTATE_ZSTD_CLEVEL} -p ${ZSTD_THREADS}"
838 fi
839
840 # Need to handle empty directories
841 if [ "$(ls -A)" ]; then
842 set +e
843 tar -I "$ZSTD" $OPT -f $TFILE *
844 ret=$?
845 if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
846 exit 1
847 fi
848 set -e
849 else
850 tar -I "$ZSTD" $OPT --file=$TFILE --files-from=/dev/null
851 fi
852 chmod 0664 $TFILE
853 # Skip if it was already created by some other process
854 if [ -h ${SSTATE_PKG} ] && [ ! -e ${SSTATE_PKG} ]; then
855 # There is a symbolic link, but it links to nothing.
856 # Forcefully replace it with the new file.
857 ln -f $TFILE ${SSTATE_PKG} || true
858 elif [ ! -e ${SSTATE_PKG} ]; then
859 # Move into place using ln to attempt an atomic op.
860 # Abort if it already exists
861 ln $TFILE ${SSTATE_PKG} || true
862 else
863 touch ${SSTATE_PKG} 2>/dev/null || true
864 fi
865 rm $TFILE
866}
867
868python sstate_sign_package () {
869 from oe.gpg_sign import get_signer
870
871
872 signer = get_signer(d, 'local')
873 sstate_pkg = d.getVar('SSTATE_PKG')
874 if os.path.exists(sstate_pkg + '.sig'):
875 os.unlink(sstate_pkg + '.sig')
876 signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
877 d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
878}
879
880python sstate_report_unihash() {
881 report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
882
883 if report_unihash:
884 ss = sstate_state_fromvars(d)
885 report_unihash(os.getcwd(), ss['task'], d)
886}
887
888#
889# Shell function to decompress and prepare a package for installation
890# Will be run from within SSTATE_INSTDIR.
891#
892sstate_unpack_package () {
893 ZSTD="zstd -T${ZSTD_THREADS}"
894 # Use pzstd if available
895 if [ -x "$(command -v pzstd)" ]; then
896 ZSTD="pzstd -p ${ZSTD_THREADS}"
897 fi
898
899 tar -I "$ZSTD" -xvpf ${SSTATE_PKG}
900 # update .siginfo atime on local/NFS mirror if it is a symbolic link
901 [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
902 # update each symbolic link instead of any referenced file
903 touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
904 [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
905 [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
906}
907
908BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
909
910def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
Andrew Geissler517393d2023-01-13 08:55:19 -0600911 import itertools
912
Patrick Williams92b42cb2022-09-03 06:53:57 -0500913 found = set()
914 missed = set()
915
916 def gethash(task):
917 return sq_data['unihash'][task]
918
919 def getpathcomponents(task, d):
920 # Magic data from BB_HASHFILENAME
921 splithashfn = sq_data['hashfn'][task].split(" ")
922 spec = splithashfn[1]
923 if splithashfn[0] == "True":
924 extrapath = d.getVar("NATIVELSBSTRING") + "/"
925 else:
926 extrapath = ""
927
928 tname = bb.runqueue.taskname_from_tid(task)[3:]
929
930 if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
931 spec = splithashfn[2]
932 extrapath = ""
933
934 return spec, extrapath, tname
935
936 def getsstatefile(tid, siginfo, d):
937 spec, extrapath, tname = getpathcomponents(tid, d)
938 return extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d)
939
940 for tid in sq_data['hash']:
941
942 sstatefile = d.expand("${SSTATE_DIR}/" + getsstatefile(tid, siginfo, d))
943
944 if os.path.exists(sstatefile):
Patrick Williamsac13d5f2023-11-24 18:59:46 -0600945 oe.utils.touch(sstatefile)
Patrick Williams92b42cb2022-09-03 06:53:57 -0500946 found.add(tid)
947 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
948 else:
949 missed.add(tid)
950 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
951
952 foundLocal = len(found)
953 mirrors = d.getVar("SSTATE_MIRRORS")
954 if mirrors:
955 # Copy the data object and override DL_DIR and SRC_URI
956 localdata = bb.data.createCopy(d)
957
958 dldir = localdata.expand("${SSTATE_DIR}")
959 localdata.delVar('MIRRORS')
960 localdata.setVar('FILESPATH', dldir)
961 localdata.setVar('DL_DIR', dldir)
962 localdata.setVar('PREMIRRORS', mirrors)
963
964 bb.debug(2, "SState using premirror of: %s" % mirrors)
965
966 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
967 # we'll want to allow network access for the current set of fetches.
968 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
969 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
970 localdata.delVar('BB_NO_NETWORK')
971
972 from bb.fetch2 import FetchConnectionCache
973 def checkstatus_init():
974 while not connection_cache_pool.full():
975 connection_cache_pool.put(FetchConnectionCache())
976
977 def checkstatus_end():
978 while not connection_cache_pool.empty():
979 connection_cache = connection_cache_pool.get()
980 connection_cache.close_connections()
981
982 def checkstatus(arg):
983 (tid, sstatefile) = arg
984
985 connection_cache = connection_cache_pool.get()
986 localdata2 = bb.data.createCopy(localdata)
987 srcuri = "file://" + sstatefile
988 localdata2.setVar('SRC_URI', srcuri)
989 bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
990
991 import traceback
992
993 try:
994 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
995 connection_cache=connection_cache)
996 fetcher.checkstatus()
997 bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
998 found.add(tid)
999 missed.remove(tid)
1000 except bb.fetch2.FetchError as e:
1001 bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)\n%s" % (srcuri, repr(e), traceback.format_exc()))
1002 except Exception as e:
1003 bb.error("SState: cannot test %s: %s\n%s" % (srcuri, repr(e), traceback.format_exc()))
1004
1005 connection_cache_pool.put(connection_cache)
1006
1007 if progress:
Andrew Geissler517393d2023-01-13 08:55:19 -06001008 bb.event.fire(bb.event.ProcessProgress(msg, next(cnt_tasks_done)), d)
Andrew Geissler6aa7eec2023-03-03 12:41:14 -06001009 bb.event.check_for_interrupts(d)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001010
1011 tasklist = []
1012 for tid in missed:
1013 sstatefile = d.expand(getsstatefile(tid, siginfo, d))
1014 tasklist.append((tid, sstatefile))
1015
1016 if tasklist:
1017 nproc = min(int(d.getVar("BB_NUMBER_THREADS")), len(tasklist))
1018
Andrew Geissler517393d2023-01-13 08:55:19 -06001019 ## thread-safe counter
1020 cnt_tasks_done = itertools.count(start = 1)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001021 progress = len(tasklist) >= 100
1022 if progress:
1023 msg = "Checking sstate mirror object availability"
1024 bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
1025
1026 # Have to setup the fetcher environment here rather than in each thread as it would race
1027 fetcherenv = bb.fetch2.get_fetcher_environment(d)
1028 with bb.utils.environment(**fetcherenv):
1029 bb.event.enable_threadlock()
1030 import concurrent.futures
1031 from queue import Queue
1032 connection_cache_pool = Queue(nproc)
1033 checkstatus_init()
1034 with concurrent.futures.ThreadPoolExecutor(max_workers=nproc) as executor:
1035 executor.map(checkstatus, tasklist.copy())
1036 checkstatus_end()
1037 bb.event.disable_threadlock()
1038
1039 if progress:
1040 bb.event.fire(bb.event.ProcessFinished(msg), d)
1041
1042 inheritlist = d.getVar("INHERIT")
1043 if "toaster" in inheritlist:
1044 evdata = {'missed': [], 'found': []};
1045 for tid in missed:
1046 sstatefile = d.expand(getsstatefile(tid, False, d))
1047 evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1048 for tid in found:
1049 sstatefile = d.expand(getsstatefile(tid, False, d))
1050 evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1051 bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
1052
1053 if summary:
1054 # Print some summary statistics about the current task completion and how much sstate
1055 # reuse there was. Avoid divide by zero errors.
1056 total = len(sq_data['hash'])
1057 complete = 0
1058 if currentcount:
1059 complete = (len(found) + currentcount) / (total + currentcount) * 100
1060 match = 0
1061 if total:
1062 match = len(found) / total * 100
1063 bb.plain("Sstate summary: Wanted %d Local %d Mirrors %d Missed %d Current %d (%d%% match, %d%% complete)" %
1064 (total, foundLocal, len(found)-foundLocal, len(missed), currentcount, match, complete))
1065
1066 if hasattr(bb.parse.siggen, "checkhashes"):
1067 bb.parse.siggen.checkhashes(sq_data, missed, found, d)
1068
1069 return found
1070setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT"
1071
1072BB_SETSCENE_DEPVALID = "setscene_depvalid"
1073
1074def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
1075 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
1076 # task is included in taskdependees too
1077 # Return - False - We need this dependency
1078 # - True - We can skip this dependency
1079 import re
1080
1081 def logit(msg, log):
1082 if log is not None:
1083 log.append(msg)
1084 else:
1085 bb.debug(2, msg)
1086
1087 logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
1088
Patrick Williams7784c422022-11-17 07:29:11 -06001089 directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx", "do_deploy_archives"]
Patrick Williams92b42cb2022-09-03 06:53:57 -05001090
1091 def isNativeCross(x):
1092 return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
1093
1094 # We only need to trigger deploy_source_date_epoch through direct dependencies
1095 if taskdependees[task][1] in directtasks:
1096 return True
1097
1098 # We only need to trigger packagedata through direct dependencies
1099 # but need to preserve packagedata on packagedata links
1100 if taskdependees[task][1] == "do_packagedata":
1101 for dep in taskdependees:
1102 if taskdependees[dep][1] == "do_packagedata":
1103 return False
1104 return True
1105
1106 for dep in taskdependees:
1107 logit(" considering dependency: %s" % (str(taskdependees[dep])), log)
1108 if task == dep:
1109 continue
1110 if dep in notneeded:
1111 continue
1112 # do_package_write_* and do_package doesn't need do_package
1113 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
1114 continue
1115 # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
1116 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
1117 return False
1118 # do_package/packagedata/package_qa/deploy don't need do_populate_sysroot
1119 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa', 'do_deploy']:
1120 continue
1121 # Native/Cross packages don't exist and are noexec anyway
1122 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
1123 continue
1124
Patrick Williams92b42cb2022-09-03 06:53:57 -05001125 # Consider sysroot depending on sysroot tasks
1126 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
1127 # Allow excluding certain recursive dependencies. If a recipe needs it should add a
1128 # specific dependency itself, rather than relying on one of its dependees to pull
1129 # them in.
1130 # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html
1131 not_needed = False
1132 excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT')
1133 if excludedeps is None:
1134 # Cache the regular expressions for speed
1135 excludedeps = []
1136 for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split():
1137 excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1])))
1138 d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps)
1139 for excl in excludedeps:
1140 if excl[0].match(taskdependees[dep][0]):
1141 if excl[1].match(taskdependees[task][0]):
1142 not_needed = True
1143 break
1144 if not_needed:
1145 continue
1146 # For meta-extsdk-toolchain we want all sysroot dependencies
1147 if taskdependees[dep][0] == 'meta-extsdk-toolchain':
1148 return False
1149 # Native/Cross populate_sysroot need their dependencies
1150 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
1151 return False
1152 # Target populate_sysroot depended on by cross tools need to be installed
1153 if isNativeCross(taskdependees[dep][0]):
1154 return False
1155 # Native/cross tools depended upon by target sysroot are not needed
1156 # Add an exception for shadow-native as required by useradd.bbclass
1157 if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
1158 continue
1159 # Target populate_sysroot need their dependencies
1160 return False
1161
1162 if taskdependees[dep][1] in directtasks:
1163 continue
1164
1165 # Safe fallthrough default
1166 logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
1167 return False
1168 return True
1169
1170addhandler sstate_eventhandler
1171sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
1172python sstate_eventhandler() {
1173 d = e.data
1174 writtensstate = d.getVar('SSTATE_CURRTASK')
1175 if not writtensstate:
1176 taskname = d.getVar("BB_RUNTASK")[3:]
1177 spec = d.getVar('SSTATE_PKGSPEC')
1178 swspec = d.getVar('SSTATE_SWSPEC')
1179 if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
1180 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
1181 d.setVar("SSTATE_EXTRAPATH", "")
1182 d.setVar("SSTATE_CURRTASK", taskname)
1183 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
1184 if not os.path.exists(siginfo):
1185 bb.siggen.dump_this_task(siginfo, d)
1186 else:
Patrick Williamsac13d5f2023-11-24 18:59:46 -06001187 oe.utils.touch(siginfo)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001188}
1189
1190SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
1191
1192#
1193# Event handler which removes manifests and stamps file for recipes which are no
1194# longer 'reachable' in a build where they once were. 'Reachable' refers to
1195# whether a recipe is parsed so recipes in a layer which was removed would no
1196# longer be reachable. Switching between systemd and sysvinit where recipes
1197# became skipped would be another example.
1198#
1199# Also optionally removes the workdir of those tasks/recipes
1200#
1201addhandler sstate_eventhandler_reachablestamps
1202sstate_eventhandler_reachablestamps[eventmask] = "bb.event.ReachableStamps"
1203python sstate_eventhandler_reachablestamps() {
1204 import glob
1205 d = e.data
1206 stamps = e.stamps.values()
1207 removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
1208 preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps')
1209 preservestamps = []
1210 if os.path.exists(preservestampfile):
1211 with open(preservestampfile, 'r') as f:
1212 preservestamps = f.readlines()
1213 seen = []
1214
1215 # The machine index contains all the stamps this machine has ever seen in this build directory.
1216 # We should only remove things which this machine once accessed but no longer does.
1217 machineindex = set()
1218 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1219 mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}")
1220 if os.path.exists(mi):
1221 with open(mi, "r") as f:
1222 machineindex = set(line.strip() for line in f.readlines())
1223
1224 for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
1225 toremove = []
1226 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1227 if not os.path.exists(i):
1228 continue
1229 manseen = set()
1230 ignore = []
1231 with open(i, "r") as f:
1232 lines = f.readlines()
1233 for l in reversed(lines):
1234 try:
1235 (stamp, manifest, workdir) = l.split()
1236 # The index may have multiple entries for the same manifest as the code above only appends
1237 # new entries and there may be an entry with matching manifest but differing version in stamp/workdir.
1238 # The last entry in the list is the valid one, any earlier entries with matching manifests
1239 # should be ignored.
1240 if manifest in manseen:
1241 ignore.append(l)
1242 continue
1243 manseen.add(manifest)
1244 if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
1245 toremove.append(l)
1246 if stamp not in seen:
1247 bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
1248 seen.append(stamp)
1249 except ValueError:
1250 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1251
1252 if toremove:
1253 msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
1254 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1255
1256 removed = 0
1257 for r in toremove:
1258 (stamp, manifest, workdir) = r.split()
1259 for m in glob.glob(manifest + ".*"):
1260 if m.endswith(".postrm"):
1261 continue
1262 sstate_clean_manifest(m, d)
1263 bb.utils.remove(stamp + "*")
1264 if removeworkdir:
1265 bb.utils.remove(workdir, recurse = True)
1266 lines.remove(r)
1267 removed = removed + 1
1268 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
Andrew Geissler6aa7eec2023-03-03 12:41:14 -06001269 bb.event.check_for_interrupts(d)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001270
1271 bb.event.fire(bb.event.ProcessFinished(msg), d)
1272
1273 with open(i, "w") as f:
1274 for l in lines:
1275 if l in ignore:
1276 continue
1277 f.write(l)
1278 machineindex |= set(stamps)
1279 with open(mi, "w") as f:
1280 for l in machineindex:
1281 f.write(l + "\n")
1282
1283 if preservestamps:
1284 os.remove(preservestampfile)
1285}
1286
1287
1288#
1289# Bitbake can generate an event showing which setscene tasks are 'stale',
1290# i.e. which ones will be rerun. These are ones where a stamp file is present but
1291# it is stable (e.g. taskhash doesn't match). With that list we can go through
1292# the manifests for matching tasks and "uninstall" those manifests now. We do
1293# this now rather than mid build since the distribution of files between sstate
1294# objects may have changed, new tasks may run first and if those new tasks overlap
1295# with the stale tasks, we'd see overlapping files messages and failures. Thankfully
1296# removing these files is fast.
1297#
1298addhandler sstate_eventhandler_stalesstate
1299sstate_eventhandler_stalesstate[eventmask] = "bb.event.StaleSetSceneTasks"
1300python sstate_eventhandler_stalesstate() {
1301 d = e.data
1302 tasks = e.tasks
1303
1304 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1305
1306 for a in list(set(d.getVar("SSTATE_ARCHS").split())):
1307 toremove = []
1308 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1309 if not os.path.exists(i):
1310 continue
1311 with open(i, "r") as f:
1312 lines = f.readlines()
1313 for l in lines:
1314 try:
1315 (stamp, manifest, workdir) = l.split()
1316 for tid in tasks:
1317 for s in tasks[tid]:
1318 if s.startswith(stamp):
1319 taskname = bb.runqueue.taskname_from_tid(tid)[3:]
1320 manname = manifest + "." + taskname
1321 if os.path.exists(manname):
1322 bb.debug(2, "Sstate for %s is stale, removing related manifest %s" % (tid, manname))
1323 toremove.append((manname, tid, tasks[tid]))
1324 break
1325 except ValueError:
1326 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1327
1328 if toremove:
1329 msg = "Removing %d stale sstate objects for arch %s" % (len(toremove), a)
1330 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1331
1332 removed = 0
1333 for (manname, tid, stamps) in toremove:
1334 sstate_clean_manifest(manname, d)
1335 for stamp in stamps:
1336 bb.utils.remove(stamp)
1337 removed = removed + 1
1338 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
Andrew Geissler6aa7eec2023-03-03 12:41:14 -06001339 bb.event.check_for_interrupts(d)
Patrick Williams92b42cb2022-09-03 06:53:57 -05001340
1341 bb.event.fire(bb.event.ProcessFinished(msg), d)
1342}