blob: cd77c58dbf855758c2421e29b7a8c17298966f55 [file] [log] [blame]
Patrick Williams92b42cb2022-09-03 06:53:57 -05001#
2# Copyright OpenEmbedded Contributors
3#
4# SPDX-License-Identifier: MIT
5#
6
7SSTATE_VERSION = "10"
8
9SSTATE_ZSTD_CLEVEL ??= "8"
10
11SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
12SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
13
14def generate_sstatefn(spec, hash, taskname, siginfo, d):
15 if taskname is None:
16 return ""
17 extension = ".tar.zst"
18 # 8 chars reserved for siginfo
19 limit = 254 - 8
20 if siginfo:
21 limit = 254
22 extension = ".tar.zst.siginfo"
23 if not hash:
24 hash = "INVALID"
25 fn = spec + hash + "_" + taskname + extension
26 # If the filename is too long, attempt to reduce it
27 if len(fn) > limit:
28 components = spec.split(":")
29 # Fields 0,5,6 are mandatory, 1 is most useful, 2,3,4 are just for information
30 # 7 is for the separators
31 avail = (limit - len(hash + "_" + taskname + extension) - len(components[0]) - len(components[1]) - len(components[5]) - len(components[6]) - 7) // 3
32 components[2] = components[2][:avail]
33 components[3] = components[3][:avail]
34 components[4] = components[4][:avail]
35 spec = ":".join(components)
36 fn = spec + hash + "_" + taskname + extension
37 if len(fn) > limit:
38 bb.fatal("Unable to reduce sstate name to less than 255 chararacters")
39 return hash[:2] + "/" + hash[2:4] + "/" + fn
40
41SSTATE_PKGARCH = "${PACKAGE_ARCH}"
42SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
43SSTATE_SWSPEC = "sstate:${PN}::${PV}:${PR}::${SSTATE_VERSION}:"
44SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC'), d.getVar('BB_UNIHASH'), d.getVar('SSTATE_CURRTASK'), False, d)}"
45SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
46SSTATE_EXTRAPATH = ""
47SSTATE_EXTRAPATHWILDCARD = ""
48SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/*/${SSTATE_PKGSPEC}*_${SSTATE_PATH_CURRTASK}.tar.zst*"
49
50# explicitly make PV to depend on evaluated value of PV variable
51PV[vardepvalue] = "${PV}"
52
53# We don't want the sstate to depend on things like the distro string
54# of the system, we let the sstate paths take care of this.
55SSTATE_EXTRAPATH[vardepvalue] = ""
56SSTATE_EXTRAPATHWILDCARD[vardepvalue] = ""
57
58# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
59SSTATE_ALLOW_OVERLAP_FILES = "${DEPLOY_DIR}/licenses/"
60# Avoid docbook/sgml catalog warnings for now
61SSTATE_ALLOW_OVERLAP_FILES += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
62# sdk-provides-dummy-nativesdk and nativesdk-buildtools-perl-dummy overlap for different SDKMACHINE
63SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/sdk_provides_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-nativesdk/"
64SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_RPM}/buildtools_dummy_nativesdk/ ${DEPLOY_DIR_IPK}/buildtools-dummy-nativesdk/"
65# target-sdk-provides-dummy overlaps that allarch is disabled when multilib is used
66SSTATE_ALLOW_OVERLAP_FILES += "${COMPONENTS_DIR}/sdk-provides-dummy-target/ ${DEPLOY_DIR_RPM}/sdk_provides_dummy_target/ ${DEPLOY_DIR_IPK}/sdk-provides-dummy-target/"
67# Archive the sources for many architectures in one deploy folder
68SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_SRC}"
69# ovmf/grub-efi/systemd-boot/intel-microcode multilib recipes can generate identical overlapping files
70SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/ovmf"
71SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/grub-efi"
72SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/systemd-boot"
73SSTATE_ALLOW_OVERLAP_FILES += "${DEPLOY_DIR_IMAGE}/microcode"
74
75SSTATE_SCAN_FILES ?= "*.la *-config *_config postinst-*"
76SSTATE_SCAN_CMD ??= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES").split())}" \) -type f'
77SSTATE_SCAN_CMD_NATIVE ??= 'grep -Irl -e ${RECIPE_SYSROOT} -e ${RECIPE_SYSROOT_NATIVE} -e ${HOSTTOOLS_DIR} ${SSTATE_BUILDDIR}'
78SSTATE_HASHEQUIV_FILEMAP ?= " \
79 populate_sysroot:*/postinst-useradd-*:${TMPDIR} \
80 populate_sysroot:*/postinst-useradd-*:${COREBASE} \
81 populate_sysroot:*/postinst-useradd-*:regex-\s(PATH|PSEUDO_IGNORE_PATHS|HOME|LOGNAME|OMP_NUM_THREADS|USER)=.*\s \
82 populate_sysroot:*/crossscripts/*:${TMPDIR} \
83 populate_sysroot:*/crossscripts/*:${COREBASE} \
84 "
85
86BB_HASHFILENAME = "False ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
87
88SSTATE_ARCHS = " \
89 ${BUILD_ARCH} \
90 ${BUILD_ARCH}_${ORIGNATIVELSBSTRING} \
91 ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
92 ${SDK_ARCH}_${SDK_OS} \
93 ${SDK_ARCH}_${PACKAGE_ARCH} \
94 allarch \
95 ${PACKAGE_ARCH} \
96 ${PACKAGE_EXTRA_ARCHS} \
97 ${MACHINE_ARCH}"
98SSTATE_ARCHS[vardepsexclude] = "ORIGNATIVELSBSTRING"
99
100SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
101
102SSTATECREATEFUNCS += "sstate_hardcode_path"
103SSTATECREATEFUNCS[vardeps] = "SSTATE_SCAN_FILES"
104SSTATEPOSTCREATEFUNCS = ""
105SSTATEPREINSTFUNCS = ""
106SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
107SSTATEPOSTINSTFUNCS = ""
108EXTRA_STAGING_FIXMES ?= "HOSTTOOLS_DIR"
109
110# Check whether sstate exists for tasks that support sstate and are in the
111# locked signatures file.
112SIGGEN_LOCKEDSIGS_SSTATE_EXISTS_CHECK ?= 'error'
113
114# Check whether the task's computed hash matches the task's hash in the
115# locked signatures file.
116SIGGEN_LOCKEDSIGS_TASKSIG_CHECK ?= "error"
117
118# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
119# not sign)
120SSTATE_SIG_KEY ?= ""
121SSTATE_SIG_PASSPHRASE ?= ""
122# Whether to verify the GnUPG signatures when extracting sstate archives
123SSTATE_VERIFY_SIG ?= "0"
124# List of signatures to consider valid.
125SSTATE_VALID_SIGS ??= ""
126SSTATE_VALID_SIGS[vardepvalue] = ""
127
128SSTATE_HASHEQUIV_METHOD ?= "oe.sstatesig.OEOuthashBasic"
129SSTATE_HASHEQUIV_METHOD[doc] = "The fully-qualified function used to calculate \
130 the output hash for a task, which in turn is used to determine equivalency. \
131 "
132
133SSTATE_HASHEQUIV_REPORT_TASKDATA ?= "0"
134SSTATE_HASHEQUIV_REPORT_TASKDATA[doc] = "Report additional useful data to the \
135 hash equivalency server, such as PN, PV, taskname, etc. This information \
136 is very useful for developers looking at task data, but may leak sensitive \
137 data if the equivalence server is public. \
138 "
139
140python () {
141 if bb.data.inherits_class('native', d):
142 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
143 elif bb.data.inherits_class('crosssdk', d):
144 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
145 elif bb.data.inherits_class('cross', d):
146 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}"))
147 elif bb.data.inherits_class('nativesdk', d):
148 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
149 elif bb.data.inherits_class('cross-canadian', d):
150 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
151 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH") == "all":
152 d.setVar('SSTATE_PKGARCH', "allarch")
153 else:
154 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
155
156 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
157 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
158 d.setVar('BB_HASHFILENAME', "True ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}")
159 d.setVar('SSTATE_EXTRAPATHWILDCARD', "${NATIVELSBSTRING}/")
160
161 unique_tasks = sorted(set((d.getVar('SSTATETASKS') or "").split()))
162 d.setVar('SSTATETASKS', " ".join(unique_tasks))
163 for task in unique_tasks:
164 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
165 d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
166 d.setVarFlag(task, 'network', '1')
167 d.setVarFlag(task + "_setscene", 'network', '1')
168}
169
170def sstate_init(task, d):
171 ss = {}
172 ss['task'] = task
173 ss['dirs'] = []
174 ss['plaindirs'] = []
175 ss['lockfiles'] = []
176 ss['lockfiles-shared'] = []
177 return ss
178
179def sstate_state_fromvars(d, task = None):
180 if task is None:
181 task = d.getVar('BB_CURRENTTASK')
182 if not task:
183 bb.fatal("sstate code running without task context?!")
184 task = task.replace("_setscene", "")
185
186 if task.startswith("do_"):
187 task = task[3:]
188 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs') or "").split()
189 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs') or "").split()
190 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs') or "").split()
191 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile') or "").split()
192 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared') or "").split()
193 interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs') or "").split()
194 fixmedir = d.getVarFlag("do_" + task, 'sstate-fixmedir') or ""
195 if not task or len(inputs) != len(outputs):
196 bb.fatal("sstate variables not setup correctly?!")
197
198 if task == "populate_lic":
199 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
200 d.setVar("SSTATE_EXTRAPATH", "")
201 d.setVar('SSTATE_EXTRAPATHWILDCARD', "")
202
203 ss = sstate_init(task, d)
204 for i in range(len(inputs)):
205 sstate_add(ss, inputs[i], outputs[i], d)
206 ss['lockfiles'] = lockfiles
207 ss['lockfiles-shared'] = lockfilesshared
208 ss['plaindirs'] = plaindirs
209 ss['interceptfuncs'] = interceptfuncs
210 ss['fixmedir'] = fixmedir
211 return ss
212
213def sstate_add(ss, source, dest, d):
214 if not source.endswith("/"):
215 source = source + "/"
216 if not dest.endswith("/"):
217 dest = dest + "/"
218 source = os.path.normpath(source)
219 dest = os.path.normpath(dest)
220 srcbase = os.path.basename(source)
221 ss['dirs'].append([srcbase, source, dest])
222 return ss
223
224def sstate_install(ss, d):
225 import oe.path
226 import oe.sstatesig
227 import subprocess
228
229 sharedfiles = []
230 shareddirs = []
231 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
232
233 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
234
235 manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
236
237 if os.access(manifest, os.R_OK):
238 bb.fatal("Package already staged (%s)?!" % manifest)
239
240 d.setVar("SSTATE_INST_POSTRM", manifest + ".postrm")
241
242 locks = []
243 for lock in ss['lockfiles-shared']:
244 locks.append(bb.utils.lockfile(lock, True))
245 for lock in ss['lockfiles']:
246 locks.append(bb.utils.lockfile(lock))
247
248 for state in ss['dirs']:
249 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
250 for walkroot, dirs, files in os.walk(state[1]):
251 for file in files:
252 srcpath = os.path.join(walkroot, file)
253 dstpath = srcpath.replace(state[1], state[2])
254 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
255 sharedfiles.append(dstpath)
256 for dir in dirs:
257 srcdir = os.path.join(walkroot, dir)
258 dstdir = srcdir.replace(state[1], state[2])
259 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
260 if os.path.islink(srcdir):
261 sharedfiles.append(dstdir)
262 continue
263 if not dstdir.endswith("/"):
264 dstdir = dstdir + "/"
265 shareddirs.append(dstdir)
266
267 # Check the file list for conflicts against files which already exist
268 overlap_allowed = (d.getVar("SSTATE_ALLOW_OVERLAP_FILES") or "").split()
269 match = []
270 for f in sharedfiles:
271 if os.path.exists(f) and not os.path.islink(f):
272 f = os.path.normpath(f)
273 realmatch = True
274 for w in overlap_allowed:
275 w = os.path.normpath(w)
276 if f.startswith(w):
277 realmatch = False
278 break
279 if realmatch:
280 match.append(f)
281 sstate_search_cmd = "grep -rlF '%s' %s --exclude=master.list | sed -e 's:^.*/::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
282 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
283 if search_output:
284 match.append(" (matched in %s)" % search_output.decode('utf-8').rstrip())
285 else:
286 match.append(" (not matched to any task)")
287 if match:
288 bb.error("The recipe %s is trying to install files into a shared " \
289 "area when those files already exist. Those files and their manifest " \
290 "location are:\n %s\nPlease verify which recipe should provide the " \
291 "above files.\n\nThe build has stopped, as continuing in this scenario WILL " \
292 "break things - if not now, possibly in the future (we've seen builds fail " \
293 "several months later). If the system knew how to recover from this " \
294 "automatically it would, however there are several different scenarios " \
295 "which can result in this and we don't know which one this is. It may be " \
296 "you have switched providers of something like virtual/kernel (e.g. from " \
297 "linux-yocto to linux-yocto-dev), in that case you need to execute the " \
298 "clean task for both recipes and it will resolve this error. It may be " \
299 "you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \
300 "those recipes should again resolve this error, however switching " \
301 "DISTRO_FEATURES on an existing build directory is not supported - you " \
302 "should really clean out tmp and rebuild (reusing sstate should be safe). " \
303 "It could be the overlapping files detected are harmless in which case " \
304 "adding them to SSTATE_ALLOW_OVERLAP_FILES may be the correct solution. It could " \
305 "also be your build is including two different conflicting versions of " \
306 "things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
307 "be to resolve the conflict. If in doubt, please ask on the mailing list, " \
308 "sharing the error and filelist above." % \
309 (d.getVar('PN'), "\n ".join(match)))
310 bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.")
311
312 if ss['fixmedir'] and os.path.exists(ss['fixmedir'] + "/fixmepath.cmd"):
313 sharedfiles.append(ss['fixmedir'] + "/fixmepath.cmd")
314 sharedfiles.append(ss['fixmedir'] + "/fixmepath")
315
316 # Write out the manifest
317 f = open(manifest, "w")
318 for file in sharedfiles:
319 f.write(file + "\n")
320
321 # We want to ensure that directories appear at the end of the manifest
322 # so that when we test to see if they should be deleted any contents
323 # added by the task will have been removed first.
324 dirs = sorted(shareddirs, key=len)
325 # Must remove children first, which will have a longer path than the parent
326 for di in reversed(dirs):
327 f.write(di + "\n")
328 f.close()
329
330 # Append to the list of manifests for this PACKAGE_ARCH
331
332 i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
333 l = bb.utils.lockfile(i + ".lock")
334 filedata = d.getVar("STAMP") + " " + d2.getVar("SSTATE_MANFILEPREFIX") + " " + d.getVar("WORKDIR") + "\n"
335 manifests = []
336 if os.path.exists(i):
337 with open(i, "r") as f:
338 manifests = f.readlines()
339 # We append new entries, we don't remove older entries which may have the same
340 # manifest name but different versions from stamp/workdir. See below.
341 if filedata not in manifests:
342 with open(i, "a+") as f:
343 f.write(filedata)
344 bb.utils.unlockfile(l)
345
346 # Run the actual file install
347 for state in ss['dirs']:
348 if os.path.exists(state[1]):
349 oe.path.copyhardlinktree(state[1], state[2])
350
351 for postinst in (d.getVar('SSTATEPOSTINSTFUNCS') or '').split():
352 # All hooks should run in the SSTATE_INSTDIR
353 bb.build.exec_func(postinst, d, (sstateinst,))
354
355 for lock in locks:
356 bb.utils.unlockfile(lock)
357
358sstate_install[vardepsexclude] += "SSTATE_ALLOW_OVERLAP_FILES STATE_MANMACH SSTATE_MANFILEPREFIX"
359sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
360
361def sstate_installpkg(ss, d):
362 from oe.gpg_sign import get_signer
363
364 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
365 d.setVar("SSTATE_CURRTASK", ss['task'])
366 sstatefetch = d.getVar('SSTATE_PKGNAME')
367 sstatepkg = d.getVar('SSTATE_PKG')
368
369 if not os.path.exists(sstatepkg):
370 pstaging_fetch(sstatefetch, d)
371
372 if not os.path.isfile(sstatepkg):
373 bb.note("Sstate package %s does not exist" % sstatepkg)
374 return False
375
376 sstate_clean(ss, d)
377
378 d.setVar('SSTATE_INSTDIR', sstateinst)
379
380 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
381 if not os.path.isfile(sstatepkg + '.sig'):
382 bb.warn("No signature file for sstate package %s, skipping acceleration..." % sstatepkg)
383 return False
384 signer = get_signer(d, 'local')
385 if not signer.verify(sstatepkg + '.sig', d.getVar("SSTATE_VALID_SIGS")):
386 bb.warn("Cannot verify signature on sstate package %s, skipping acceleration..." % sstatepkg)
387 return False
388
389 # Empty sstateinst directory, ensure its clean
390 if os.path.exists(sstateinst):
391 oe.path.remove(sstateinst)
392 bb.utils.mkdirhier(sstateinst)
393
394 sstateinst = d.getVar("SSTATE_INSTDIR")
395 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
396
397 for f in (d.getVar('SSTATEPREINSTFUNCS') or '').split() + ['sstate_unpack_package']:
398 # All hooks should run in the SSTATE_INSTDIR
399 bb.build.exec_func(f, d, (sstateinst,))
400
401 return sstate_installpkgdir(ss, d)
402
403def sstate_installpkgdir(ss, d):
404 import oe.path
405 import subprocess
406
407 sstateinst = d.getVar("SSTATE_INSTDIR")
408 d.setVar('SSTATE_FIXMEDIR', ss['fixmedir'])
409
410 for f in (d.getVar('SSTATEPOSTUNPACKFUNCS') or '').split():
411 # All hooks should run in the SSTATE_INSTDIR
412 bb.build.exec_func(f, d, (sstateinst,))
413
414 def prepdir(dir):
415 # remove dir if it exists, ensure any parent directories do exist
416 if os.path.exists(dir):
417 oe.path.remove(dir)
418 bb.utils.mkdirhier(dir)
419 oe.path.remove(dir)
420
421 for state in ss['dirs']:
422 prepdir(state[1])
423 bb.utils.rename(sstateinst + state[0], state[1])
424 sstate_install(ss, d)
425
426 for plain in ss['plaindirs']:
427 workdir = d.getVar('WORKDIR')
428 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
429 src = sstateinst + "/" + plain.replace(workdir, '')
430 if sharedworkdir in plain:
431 src = sstateinst + "/" + plain.replace(sharedworkdir, '')
432 dest = plain
433 bb.utils.mkdirhier(src)
434 prepdir(dest)
435 bb.utils.rename(src, dest)
436
437 return True
438
439python sstate_hardcode_path_unpack () {
440 # Fixup hardcoded paths
441 #
442 # Note: The logic below must match the reverse logic in
443 # sstate_hardcode_path(d)
444 import subprocess
445
446 sstateinst = d.getVar('SSTATE_INSTDIR')
447 sstatefixmedir = d.getVar('SSTATE_FIXMEDIR')
448 fixmefn = sstateinst + "fixmepath"
449 if os.path.isfile(fixmefn):
450 staging_target = d.getVar('RECIPE_SYSROOT')
451 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
452
453 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
454 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
455 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
456 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIRHOST:%s:g'" % (staging_target, staging_host)
457 else:
458 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g'" % (staging_target)
459
460 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
461 for fixmevar in extra_staging_fixmes.split():
462 fixme_path = d.getVar(fixmevar)
463 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
464
465 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
466 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
467
468 # Defer do_populate_sysroot relocation command
469 if sstatefixmedir:
470 bb.utils.mkdirhier(sstatefixmedir)
471 with open(sstatefixmedir + "/fixmepath.cmd", "w") as f:
472 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(fixmefn, sstatefixmedir + "/fixmepath")
473 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(sstateinst, "FIXMEFINALSSTATEINST")
474 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_host, "FIXMEFINALSSTATEHOST")
475 sstate_hardcode_cmd = sstate_hardcode_cmd.replace(staging_target, "FIXMEFINALSSTATETARGET")
476 f.write(sstate_hardcode_cmd)
477 bb.utils.copyfile(fixmefn, sstatefixmedir + "/fixmepath")
478 return
479
480 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
481 subprocess.check_call(sstate_hardcode_cmd, shell=True)
482
483 # Need to remove this or we'd copy it into the target directory and may
484 # conflict with another writer
485 os.remove(fixmefn)
486}
487
488def sstate_clean_cachefile(ss, d):
489 import oe.path
490
491 if d.getVarFlag('do_%s' % ss['task'], 'task'):
492 d.setVar("SSTATE_PATH_CURRTASK", ss['task'])
493 sstatepkgfile = d.getVar('SSTATE_PATHSPEC')
494 bb.note("Removing %s" % sstatepkgfile)
495 oe.path.remove(sstatepkgfile)
496
497def sstate_clean_cachefiles(d):
498 for task in (d.getVar('SSTATETASKS') or "").split():
499 ld = d.createCopy()
500 ss = sstate_state_fromvars(ld, task)
501 sstate_clean_cachefile(ss, ld)
502
503def sstate_clean_manifest(manifest, d, canrace=False, prefix=None):
504 import oe.path
505
506 mfile = open(manifest)
507 entries = mfile.readlines()
508 mfile.close()
509
510 for entry in entries:
511 entry = entry.strip()
512 if prefix and not entry.startswith("/"):
513 entry = prefix + "/" + entry
514 bb.debug(2, "Removing manifest: %s" % entry)
515 # We can race against another package populating directories as we're removing them
516 # so we ignore errors here.
517 try:
518 if entry.endswith("/"):
519 if os.path.islink(entry[:-1]):
520 os.remove(entry[:-1])
521 elif os.path.exists(entry) and len(os.listdir(entry)) == 0 and not canrace:
522 # Removing directories whilst builds are in progress exposes a race. Only
523 # do it in contexts where it is safe to do so.
524 os.rmdir(entry[:-1])
525 else:
526 os.remove(entry)
527 except OSError:
528 pass
529
530 postrm = manifest + ".postrm"
531 if os.path.exists(manifest + ".postrm"):
532 import subprocess
533 os.chmod(postrm, 0o755)
534 subprocess.check_call(postrm, shell=True)
535 oe.path.remove(postrm)
536
537 oe.path.remove(manifest)
538
539def sstate_clean(ss, d):
540 import oe.path
541 import glob
542
543 d2 = d.createCopy()
544 stamp_clean = d.getVar("STAMPCLEAN")
545 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info')
546 if extrainf:
547 d2.setVar("SSTATE_MANMACH", extrainf)
548 wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
549 else:
550 wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
551
552 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
553
554 if os.path.exists(manifest):
555 locks = []
556 for lock in ss['lockfiles-shared']:
557 locks.append(bb.utils.lockfile(lock))
558 for lock in ss['lockfiles']:
559 locks.append(bb.utils.lockfile(lock))
560
561 sstate_clean_manifest(manifest, d, canrace=True)
562
563 for lock in locks:
564 bb.utils.unlockfile(lock)
565
566 # Remove the current and previous stamps, but keep the sigdata.
567 #
568 # The glob() matches do_task* which may match multiple tasks, for
569 # example: do_package and do_package_write_ipk, so we need to
570 # exactly match *.do_task.* and *.do_task_setscene.*
571 rm_stamp = '.do_%s.' % ss['task']
572 rm_setscene = '.do_%s_setscene.' % ss['task']
573 # For BB_SIGNATURE_HANDLER = "noop"
574 rm_nohash = ".do_%s" % ss['task']
575 for stfile in glob.glob(wildcard_stfile):
576 # Keep the sigdata
577 if ".sigdata." in stfile or ".sigbasedata." in stfile:
578 continue
579 # Preserve taint files in the stamps directory
580 if stfile.endswith('.taint'):
581 continue
582 if rm_stamp in stfile or rm_setscene in stfile or \
583 stfile.endswith(rm_nohash):
584 oe.path.remove(stfile)
585
586sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
587
588CLEANFUNCS += "sstate_cleanall"
589
590python sstate_cleanall() {
591 bb.note("Removing shared state for package %s" % d.getVar('PN'))
592
593 manifest_dir = d.getVar('SSTATE_MANIFESTS')
594 if not os.path.exists(manifest_dir):
595 return
596
597 tasks = d.getVar('SSTATETASKS').split()
598 for name in tasks:
599 ld = d.createCopy()
600 shared_state = sstate_state_fromvars(ld, name)
601 sstate_clean(shared_state, ld)
602}
603
604python sstate_hardcode_path () {
605 import subprocess, platform
606
607 # Need to remove hardcoded paths and fix these when we install the
608 # staging packages.
609 #
610 # Note: the logic in this function needs to match the reverse logic
611 # in sstate_installpkg(ss, d)
612
613 staging_target = d.getVar('RECIPE_SYSROOT')
614 staging_host = d.getVar('RECIPE_SYSROOT_NATIVE')
615 sstate_builddir = d.getVar('SSTATE_BUILDDIR')
616
617 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % staging_host
618 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross-canadian', d):
619 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
620 elif bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d):
621 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
622 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
623 else:
624 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging_host)
625 sstate_sed_cmd += " -e 's:%s:FIXMESTAGINGDIRTARGET:g'" % staging_target
626
627 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES') or ''
628 for fixmevar in extra_staging_fixmes.split():
629 fixme_path = d.getVar(fixmevar)
630 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
631 sstate_grep_cmd += " -e '%s'" % (fixme_path)
632
633 fixmefn = sstate_builddir + "fixmepath"
634
635 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD')
636 sstate_filelist_cmd = "tee %s" % (fixmefn)
637
638 # fixmepath file needs relative paths, drop sstate_builddir prefix
639 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
640
641 xargs_no_empty_run_cmd = '--no-run-if-empty'
642 if platform.system() == 'Darwin':
643 xargs_no_empty_run_cmd = ''
644
645 # Limit the fixpaths and sed operations based on the initial grep search
646 # This has the side effect of making sure the vfs cache is hot
647 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
648
649 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
650 subprocess.check_output(sstate_hardcode_cmd, shell=True, cwd=sstate_builddir)
651
652 # If the fixmefn is empty, remove it..
653 if os.stat(fixmefn).st_size == 0:
654 os.remove(fixmefn)
655 else:
656 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
657 subprocess.check_output(sstate_filelist_relative_cmd, shell=True)
658}
659
660def sstate_package(ss, d):
661 import oe.path
662 import time
663
664 tmpdir = d.getVar('TMPDIR')
665
666 fixtime = False
667 if ss['task'] == "package":
668 fixtime = True
669
670 def fixtimestamp(root, path):
671 f = os.path.join(root, path)
672 if os.lstat(f).st_mtime > sde:
673 os.utime(f, (sde, sde), follow_symlinks=False)
674
675 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
676 sde = int(d.getVar("SOURCE_DATE_EPOCH") or time.time())
677 d.setVar("SSTATE_CURRTASK", ss['task'])
678 bb.utils.remove(sstatebuild, recurse=True)
679 bb.utils.mkdirhier(sstatebuild)
680 for state in ss['dirs']:
681 if not os.path.exists(state[1]):
682 continue
683 srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
684 # Find and error for absolute symlinks. We could attempt to relocate but its not
685 # clear where the symlink is relative to in this context. We could add that markup
686 # to sstate tasks but there aren't many of these so better just avoid them entirely.
687 for walkroot, dirs, files in os.walk(state[1]):
688 for file in files + dirs:
689 if fixtime:
690 fixtimestamp(walkroot, file)
691 srcpath = os.path.join(walkroot, file)
692 if not os.path.islink(srcpath):
693 continue
694 link = os.readlink(srcpath)
695 if not os.path.isabs(link):
696 continue
697 if not link.startswith(tmpdir):
698 continue
699 bb.error("sstate found an absolute path symlink %s pointing at %s. Please replace this with a relative link." % (srcpath, link))
700 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
701 bb.utils.rename(state[1], sstatebuild + state[0])
702
703 workdir = d.getVar('WORKDIR')
704 sharedworkdir = os.path.join(d.getVar('TMPDIR'), "work-shared")
705 for plain in ss['plaindirs']:
706 pdir = plain.replace(workdir, sstatebuild)
707 if sharedworkdir in plain:
708 pdir = plain.replace(sharedworkdir, sstatebuild)
709 bb.utils.mkdirhier(plain)
710 bb.utils.mkdirhier(pdir)
711 bb.utils.rename(plain, pdir)
712 if fixtime:
713 fixtimestamp(pdir, "")
714 for walkroot, dirs, files in os.walk(pdir):
715 for file in files + dirs:
716 fixtimestamp(walkroot, file)
717
718 d.setVar('SSTATE_BUILDDIR', sstatebuild)
719 d.setVar('SSTATE_INSTDIR', sstatebuild)
720
721 if d.getVar('SSTATE_SKIP_CREATION') == '1':
722 return
723
724 sstate_create_package = ['sstate_report_unihash', 'sstate_create_package']
725 if d.getVar('SSTATE_SIG_KEY'):
726 sstate_create_package.append('sstate_sign_package')
727
728 for f in (d.getVar('SSTATECREATEFUNCS') or '').split() + \
729 sstate_create_package + \
730 (d.getVar('SSTATEPOSTCREATEFUNCS') or '').split():
731 # All hooks should run in SSTATE_BUILDDIR.
732 bb.build.exec_func(f, d, (sstatebuild,))
733
734 # SSTATE_PKG may have been changed by sstate_report_unihash
735 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
736 if not os.path.exists(siginfo):
737 bb.siggen.dump_this_task(siginfo, d)
738 else:
739 try:
740 os.utime(siginfo, None)
741 except PermissionError:
742 pass
743 except OSError as e:
744 # Handle read-only file systems gracefully
745 import errno
746 if e.errno != errno.EROFS:
747 raise e
748
749 return
750
751sstate_package[vardepsexclude] += "SSTATE_SIG_KEY"
752
753def pstaging_fetch(sstatefetch, d):
754 import bb.fetch2
755
756 # Only try and fetch if the user has configured a mirror
757 mirrors = d.getVar('SSTATE_MIRRORS')
758 if not mirrors:
759 return
760
761 # Copy the data object and override DL_DIR and SRC_URI
762 localdata = bb.data.createCopy(d)
763
764 dldir = localdata.expand("${SSTATE_DIR}")
765 bb.utils.mkdirhier(dldir)
766
767 localdata.delVar('MIRRORS')
768 localdata.setVar('FILESPATH', dldir)
769 localdata.setVar('DL_DIR', dldir)
770 localdata.setVar('PREMIRRORS', mirrors)
771 localdata.setVar('SRCPV', d.getVar('SRCPV'))
772
773 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
774 # we'll want to allow network access for the current set of fetches.
775 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
776 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
777 localdata.delVar('BB_NO_NETWORK')
778
779 # Try a fetch from the sstate mirror, if it fails just return and
780 # we will build the package
781 uris = ['file://{0};downloadfilename={0}'.format(sstatefetch),
782 'file://{0}.siginfo;downloadfilename={0}.siginfo'.format(sstatefetch)]
783 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG"), False):
784 uris += ['file://{0}.sig;downloadfilename={0}.sig'.format(sstatefetch)]
785
786 for srcuri in uris:
787 localdata.setVar('SRC_URI', srcuri)
788 try:
789 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
790 fetcher.checkstatus()
791 fetcher.download()
792
793 except bb.fetch2.BBFetchException:
794 pass
795
796pstaging_fetch[vardepsexclude] += "SRCPV"
797
798
799def sstate_setscene(d):
800 shared_state = sstate_state_fromvars(d)
801 accelerate = sstate_installpkg(shared_state, d)
802 if not accelerate:
803 msg = "No sstate archive obtainable, will run full task instead."
804 bb.warn(msg)
805 raise bb.BBHandledException(msg)
806
807python sstate_task_prefunc () {
808 shared_state = sstate_state_fromvars(d)
809 sstate_clean(shared_state, d)
810}
811sstate_task_prefunc[dirs] = "${WORKDIR}"
812
813python sstate_task_postfunc () {
814 shared_state = sstate_state_fromvars(d)
815
816 for intercept in shared_state['interceptfuncs']:
817 bb.build.exec_func(intercept, d, (d.getVar("WORKDIR"),))
818
819 omask = os.umask(0o002)
820 if omask != 0o002:
821 bb.note("Using umask 0o002 (not %0o) for sstate packaging" % omask)
822 sstate_package(shared_state, d)
823 os.umask(omask)
824
825 sstateinst = d.getVar("SSTATE_INSTDIR")
826 d.setVar('SSTATE_FIXMEDIR', shared_state['fixmedir'])
827
828 sstate_installpkgdir(shared_state, d)
829
830 bb.utils.remove(d.getVar("SSTATE_BUILDDIR"), recurse=True)
831}
832sstate_task_postfunc[dirs] = "${WORKDIR}"
833
834
835#
836# Shell function to generate a sstate package from a directory
837# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
838#
839sstate_create_package () {
840 # Exit early if it already exists
841 if [ -e ${SSTATE_PKG} ]; then
842 touch ${SSTATE_PKG} 2>/dev/null || true
843 return
844 fi
845
846 mkdir --mode=0775 -p `dirname ${SSTATE_PKG}`
847 TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
848
849 OPT="-cS"
850 ZSTD="zstd -${SSTATE_ZSTD_CLEVEL} -T${ZSTD_THREADS}"
851 # Use pzstd if available
852 if [ -x "$(command -v pzstd)" ]; then
853 ZSTD="pzstd -${SSTATE_ZSTD_CLEVEL} -p ${ZSTD_THREADS}"
854 fi
855
856 # Need to handle empty directories
857 if [ "$(ls -A)" ]; then
858 set +e
859 tar -I "$ZSTD" $OPT -f $TFILE *
860 ret=$?
861 if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
862 exit 1
863 fi
864 set -e
865 else
866 tar -I "$ZSTD" $OPT --file=$TFILE --files-from=/dev/null
867 fi
868 chmod 0664 $TFILE
869 # Skip if it was already created by some other process
870 if [ -h ${SSTATE_PKG} ] && [ ! -e ${SSTATE_PKG} ]; then
871 # There is a symbolic link, but it links to nothing.
872 # Forcefully replace it with the new file.
873 ln -f $TFILE ${SSTATE_PKG} || true
874 elif [ ! -e ${SSTATE_PKG} ]; then
875 # Move into place using ln to attempt an atomic op.
876 # Abort if it already exists
877 ln $TFILE ${SSTATE_PKG} || true
878 else
879 touch ${SSTATE_PKG} 2>/dev/null || true
880 fi
881 rm $TFILE
882}
883
884python sstate_sign_package () {
885 from oe.gpg_sign import get_signer
886
887
888 signer = get_signer(d, 'local')
889 sstate_pkg = d.getVar('SSTATE_PKG')
890 if os.path.exists(sstate_pkg + '.sig'):
891 os.unlink(sstate_pkg + '.sig')
892 signer.detach_sign(sstate_pkg, d.getVar('SSTATE_SIG_KEY', False), None,
893 d.getVar('SSTATE_SIG_PASSPHRASE'), armor=False)
894}
895
896python sstate_report_unihash() {
897 report_unihash = getattr(bb.parse.siggen, 'report_unihash', None)
898
899 if report_unihash:
900 ss = sstate_state_fromvars(d)
901 report_unihash(os.getcwd(), ss['task'], d)
902}
903
904#
905# Shell function to decompress and prepare a package for installation
906# Will be run from within SSTATE_INSTDIR.
907#
908sstate_unpack_package () {
909 ZSTD="zstd -T${ZSTD_THREADS}"
910 # Use pzstd if available
911 if [ -x "$(command -v pzstd)" ]; then
912 ZSTD="pzstd -p ${ZSTD_THREADS}"
913 fi
914
915 tar -I "$ZSTD" -xvpf ${SSTATE_PKG}
916 # update .siginfo atime on local/NFS mirror if it is a symbolic link
917 [ ! -h ${SSTATE_PKG}.siginfo ] || [ ! -e ${SSTATE_PKG}.siginfo ] || touch -a ${SSTATE_PKG}.siginfo 2>/dev/null || true
918 # update each symbolic link instead of any referenced file
919 touch --no-dereference ${SSTATE_PKG} 2>/dev/null || true
920 [ ! -e ${SSTATE_PKG}.sig ] || touch --no-dereference ${SSTATE_PKG}.sig 2>/dev/null || true
921 [ ! -e ${SSTATE_PKG}.siginfo ] || touch --no-dereference ${SSTATE_PKG}.siginfo 2>/dev/null || true
922}
923
924BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
925
926def sstate_checkhashes(sq_data, d, siginfo=False, currentcount=0, summary=True, **kwargs):
927 found = set()
928 missed = set()
929
930 def gethash(task):
931 return sq_data['unihash'][task]
932
933 def getpathcomponents(task, d):
934 # Magic data from BB_HASHFILENAME
935 splithashfn = sq_data['hashfn'][task].split(" ")
936 spec = splithashfn[1]
937 if splithashfn[0] == "True":
938 extrapath = d.getVar("NATIVELSBSTRING") + "/"
939 else:
940 extrapath = ""
941
942 tname = bb.runqueue.taskname_from_tid(task)[3:]
943
944 if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
945 spec = splithashfn[2]
946 extrapath = ""
947
948 return spec, extrapath, tname
949
950 def getsstatefile(tid, siginfo, d):
951 spec, extrapath, tname = getpathcomponents(tid, d)
952 return extrapath + generate_sstatefn(spec, gethash(tid), tname, siginfo, d)
953
954 for tid in sq_data['hash']:
955
956 sstatefile = d.expand("${SSTATE_DIR}/" + getsstatefile(tid, siginfo, d))
957
958 if os.path.exists(sstatefile):
959 found.add(tid)
960 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
961 else:
962 missed.add(tid)
963 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
964
965 foundLocal = len(found)
966 mirrors = d.getVar("SSTATE_MIRRORS")
967 if mirrors:
968 # Copy the data object and override DL_DIR and SRC_URI
969 localdata = bb.data.createCopy(d)
970
971 dldir = localdata.expand("${SSTATE_DIR}")
972 localdata.delVar('MIRRORS')
973 localdata.setVar('FILESPATH', dldir)
974 localdata.setVar('DL_DIR', dldir)
975 localdata.setVar('PREMIRRORS', mirrors)
976
977 bb.debug(2, "SState using premirror of: %s" % mirrors)
978
979 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
980 # we'll want to allow network access for the current set of fetches.
981 if bb.utils.to_boolean(localdata.getVar('BB_NO_NETWORK')) and \
982 bb.utils.to_boolean(localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK')):
983 localdata.delVar('BB_NO_NETWORK')
984
985 from bb.fetch2 import FetchConnectionCache
986 def checkstatus_init():
987 while not connection_cache_pool.full():
988 connection_cache_pool.put(FetchConnectionCache())
989
990 def checkstatus_end():
991 while not connection_cache_pool.empty():
992 connection_cache = connection_cache_pool.get()
993 connection_cache.close_connections()
994
995 def checkstatus(arg):
996 (tid, sstatefile) = arg
997
998 connection_cache = connection_cache_pool.get()
999 localdata2 = bb.data.createCopy(localdata)
1000 srcuri = "file://" + sstatefile
1001 localdata2.setVar('SRC_URI', srcuri)
1002 bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
1003
1004 import traceback
1005
1006 try:
1007 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
1008 connection_cache=connection_cache)
1009 fetcher.checkstatus()
1010 bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
1011 found.add(tid)
1012 missed.remove(tid)
1013 except bb.fetch2.FetchError as e:
1014 bb.debug(2, "SState: Unsuccessful fetch test for %s (%s)\n%s" % (srcuri, repr(e), traceback.format_exc()))
1015 except Exception as e:
1016 bb.error("SState: cannot test %s: %s\n%s" % (srcuri, repr(e), traceback.format_exc()))
1017
1018 connection_cache_pool.put(connection_cache)
1019
1020 if progress:
1021 bb.event.fire(bb.event.ProcessProgress(msg, len(tasklist) - thread_worker.tasks.qsize()), d)
1022
1023 tasklist = []
1024 for tid in missed:
1025 sstatefile = d.expand(getsstatefile(tid, siginfo, d))
1026 tasklist.append((tid, sstatefile))
1027
1028 if tasklist:
1029 nproc = min(int(d.getVar("BB_NUMBER_THREADS")), len(tasklist))
1030
1031 progress = len(tasklist) >= 100
1032 if progress:
1033 msg = "Checking sstate mirror object availability"
1034 bb.event.fire(bb.event.ProcessStarted(msg, len(tasklist)), d)
1035
1036 # Have to setup the fetcher environment here rather than in each thread as it would race
1037 fetcherenv = bb.fetch2.get_fetcher_environment(d)
1038 with bb.utils.environment(**fetcherenv):
1039 bb.event.enable_threadlock()
1040 import concurrent.futures
1041 from queue import Queue
1042 connection_cache_pool = Queue(nproc)
1043 checkstatus_init()
1044 with concurrent.futures.ThreadPoolExecutor(max_workers=nproc) as executor:
1045 executor.map(checkstatus, tasklist.copy())
1046 checkstatus_end()
1047 bb.event.disable_threadlock()
1048
1049 if progress:
1050 bb.event.fire(bb.event.ProcessFinished(msg), d)
1051
1052 inheritlist = d.getVar("INHERIT")
1053 if "toaster" in inheritlist:
1054 evdata = {'missed': [], 'found': []};
1055 for tid in missed:
1056 sstatefile = d.expand(getsstatefile(tid, False, d))
1057 evdata['missed'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1058 for tid in found:
1059 sstatefile = d.expand(getsstatefile(tid, False, d))
1060 evdata['found'].append((bb.runqueue.fn_from_tid(tid), bb.runqueue.taskname_from_tid(tid), gethash(tid), sstatefile ) )
1061 bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
1062
1063 if summary:
1064 # Print some summary statistics about the current task completion and how much sstate
1065 # reuse there was. Avoid divide by zero errors.
1066 total = len(sq_data['hash'])
1067 complete = 0
1068 if currentcount:
1069 complete = (len(found) + currentcount) / (total + currentcount) * 100
1070 match = 0
1071 if total:
1072 match = len(found) / total * 100
1073 bb.plain("Sstate summary: Wanted %d Local %d Mirrors %d Missed %d Current %d (%d%% match, %d%% complete)" %
1074 (total, foundLocal, len(found)-foundLocal, len(missed), currentcount, match, complete))
1075
1076 if hasattr(bb.parse.siggen, "checkhashes"):
1077 bb.parse.siggen.checkhashes(sq_data, missed, found, d)
1078
1079 return found
1080setscene_depvalid[vardepsexclude] = "SSTATE_EXCLUDEDEPS_SYSROOT"
1081
1082BB_SETSCENE_DEPVALID = "setscene_depvalid"
1083
1084def setscene_depvalid(task, taskdependees, notneeded, d, log=None):
1085 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
1086 # task is included in taskdependees too
1087 # Return - False - We need this dependency
1088 # - True - We can skip this dependency
1089 import re
1090
1091 def logit(msg, log):
1092 if log is not None:
1093 log.append(msg)
1094 else:
1095 bb.debug(2, msg)
1096
1097 logit("Considering setscene task: %s" % (str(taskdependees[task])), log)
1098
1099 directtasks = ["do_populate_lic", "do_deploy_source_date_epoch", "do_shared_workdir", "do_stash_locale", "do_gcc_stash_builddir", "do_create_spdx"]
1100
1101 def isNativeCross(x):
1102 return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x or x.endswith("-cross")
1103
1104 # We only need to trigger deploy_source_date_epoch through direct dependencies
1105 if taskdependees[task][1] in directtasks:
1106 return True
1107
1108 # We only need to trigger packagedata through direct dependencies
1109 # but need to preserve packagedata on packagedata links
1110 if taskdependees[task][1] == "do_packagedata":
1111 for dep in taskdependees:
1112 if taskdependees[dep][1] == "do_packagedata":
1113 return False
1114 return True
1115
1116 for dep in taskdependees:
1117 logit(" considering dependency: %s" % (str(taskdependees[dep])), log)
1118 if task == dep:
1119 continue
1120 if dep in notneeded:
1121 continue
1122 # do_package_write_* and do_package doesn't need do_package
1123 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
1124 continue
1125 # do_package_write_* need do_populate_sysroot as they're mainly postinstall dependencies
1126 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
1127 return False
1128 # do_package/packagedata/package_qa/deploy don't need do_populate_sysroot
1129 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_packagedata', 'do_package_qa', 'do_deploy']:
1130 continue
1131 # Native/Cross packages don't exist and are noexec anyway
1132 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
1133 continue
1134
1135 # This is due to the [depends] in useradd.bbclass complicating matters
1136 # The logic *is* reversed here due to the way hard setscene dependencies are injected
1137 if (taskdependees[task][1] == 'do_package' or taskdependees[task][1] == 'do_populate_sysroot') and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
1138 continue
1139
1140 # Consider sysroot depending on sysroot tasks
1141 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
1142 # Allow excluding certain recursive dependencies. If a recipe needs it should add a
1143 # specific dependency itself, rather than relying on one of its dependees to pull
1144 # them in.
1145 # See also http://lists.openembedded.org/pipermail/openembedded-core/2018-January/146324.html
1146 not_needed = False
1147 excludedeps = d.getVar('_SSTATE_EXCLUDEDEPS_SYSROOT')
1148 if excludedeps is None:
1149 # Cache the regular expressions for speed
1150 excludedeps = []
1151 for excl in (d.getVar('SSTATE_EXCLUDEDEPS_SYSROOT') or "").split():
1152 excludedeps.append((re.compile(excl.split('->', 1)[0]), re.compile(excl.split('->', 1)[1])))
1153 d.setVar('_SSTATE_EXCLUDEDEPS_SYSROOT', excludedeps)
1154 for excl in excludedeps:
1155 if excl[0].match(taskdependees[dep][0]):
1156 if excl[1].match(taskdependees[task][0]):
1157 not_needed = True
1158 break
1159 if not_needed:
1160 continue
1161 # For meta-extsdk-toolchain we want all sysroot dependencies
1162 if taskdependees[dep][0] == 'meta-extsdk-toolchain':
1163 return False
1164 # Native/Cross populate_sysroot need their dependencies
1165 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
1166 return False
1167 # Target populate_sysroot depended on by cross tools need to be installed
1168 if isNativeCross(taskdependees[dep][0]):
1169 return False
1170 # Native/cross tools depended upon by target sysroot are not needed
1171 # Add an exception for shadow-native as required by useradd.bbclass
1172 if isNativeCross(taskdependees[task][0]) and taskdependees[task][0] != 'shadow-native':
1173 continue
1174 # Target populate_sysroot need their dependencies
1175 return False
1176
1177 if taskdependees[dep][1] in directtasks:
1178 continue
1179
1180 # Safe fallthrough default
1181 logit(" Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])), log)
1182 return False
1183 return True
1184
1185addhandler sstate_eventhandler
1186sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
1187python sstate_eventhandler() {
1188 d = e.data
1189 writtensstate = d.getVar('SSTATE_CURRTASK')
1190 if not writtensstate:
1191 taskname = d.getVar("BB_RUNTASK")[3:]
1192 spec = d.getVar('SSTATE_PKGSPEC')
1193 swspec = d.getVar('SSTATE_SWSPEC')
1194 if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
1195 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
1196 d.setVar("SSTATE_EXTRAPATH", "")
1197 d.setVar("SSTATE_CURRTASK", taskname)
1198 siginfo = d.getVar('SSTATE_PKG') + ".siginfo"
1199 if not os.path.exists(siginfo):
1200 bb.siggen.dump_this_task(siginfo, d)
1201 else:
1202 try:
1203 os.utime(siginfo, None)
1204 except PermissionError:
1205 pass
1206 except OSError as e:
1207 # Handle read-only file systems gracefully
1208 import errno
1209 if e.errno != errno.EROFS:
1210 raise e
1211
1212}
1213
1214SSTATE_PRUNE_OBSOLETEWORKDIR ?= "1"
1215
1216#
1217# Event handler which removes manifests and stamps file for recipes which are no
1218# longer 'reachable' in a build where they once were. 'Reachable' refers to
1219# whether a recipe is parsed so recipes in a layer which was removed would no
1220# longer be reachable. Switching between systemd and sysvinit where recipes
1221# became skipped would be another example.
1222#
1223# Also optionally removes the workdir of those tasks/recipes
1224#
1225addhandler sstate_eventhandler_reachablestamps
1226sstate_eventhandler_reachablestamps[eventmask] = "bb.event.ReachableStamps"
1227python sstate_eventhandler_reachablestamps() {
1228 import glob
1229 d = e.data
1230 stamps = e.stamps.values()
1231 removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
1232 preservestampfile = d.expand('${SSTATE_MANIFESTS}/preserve-stamps')
1233 preservestamps = []
1234 if os.path.exists(preservestampfile):
1235 with open(preservestampfile, 'r') as f:
1236 preservestamps = f.readlines()
1237 seen = []
1238
1239 # The machine index contains all the stamps this machine has ever seen in this build directory.
1240 # We should only remove things which this machine once accessed but no longer does.
1241 machineindex = set()
1242 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1243 mi = d.expand("${SSTATE_MANIFESTS}/index-machine-${MACHINE}")
1244 if os.path.exists(mi):
1245 with open(mi, "r") as f:
1246 machineindex = set(line.strip() for line in f.readlines())
1247
1248 for a in sorted(list(set(d.getVar("SSTATE_ARCHS").split()))):
1249 toremove = []
1250 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1251 if not os.path.exists(i):
1252 continue
1253 manseen = set()
1254 ignore = []
1255 with open(i, "r") as f:
1256 lines = f.readlines()
1257 for l in reversed(lines):
1258 try:
1259 (stamp, manifest, workdir) = l.split()
1260 # The index may have multiple entries for the same manifest as the code above only appends
1261 # new entries and there may be an entry with matching manifest but differing version in stamp/workdir.
1262 # The last entry in the list is the valid one, any earlier entries with matching manifests
1263 # should be ignored.
1264 if manifest in manseen:
1265 ignore.append(l)
1266 continue
1267 manseen.add(manifest)
1268 if stamp not in stamps and stamp not in preservestamps and stamp in machineindex:
1269 toremove.append(l)
1270 if stamp not in seen:
1271 bb.debug(2, "Stamp %s is not reachable, removing related manifests" % stamp)
1272 seen.append(stamp)
1273 except ValueError:
1274 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1275
1276 if toremove:
1277 msg = "Removing %d recipes from the %s sysroot" % (len(toremove), a)
1278 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1279
1280 removed = 0
1281 for r in toremove:
1282 (stamp, manifest, workdir) = r.split()
1283 for m in glob.glob(manifest + ".*"):
1284 if m.endswith(".postrm"):
1285 continue
1286 sstate_clean_manifest(m, d)
1287 bb.utils.remove(stamp + "*")
1288 if removeworkdir:
1289 bb.utils.remove(workdir, recurse = True)
1290 lines.remove(r)
1291 removed = removed + 1
1292 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1293
1294 bb.event.fire(bb.event.ProcessFinished(msg), d)
1295
1296 with open(i, "w") as f:
1297 for l in lines:
1298 if l in ignore:
1299 continue
1300 f.write(l)
1301 machineindex |= set(stamps)
1302 with open(mi, "w") as f:
1303 for l in machineindex:
1304 f.write(l + "\n")
1305
1306 if preservestamps:
1307 os.remove(preservestampfile)
1308}
1309
1310
1311#
1312# Bitbake can generate an event showing which setscene tasks are 'stale',
1313# i.e. which ones will be rerun. These are ones where a stamp file is present but
1314# it is stable (e.g. taskhash doesn't match). With that list we can go through
1315# the manifests for matching tasks and "uninstall" those manifests now. We do
1316# this now rather than mid build since the distribution of files between sstate
1317# objects may have changed, new tasks may run first and if those new tasks overlap
1318# with the stale tasks, we'd see overlapping files messages and failures. Thankfully
1319# removing these files is fast.
1320#
1321addhandler sstate_eventhandler_stalesstate
1322sstate_eventhandler_stalesstate[eventmask] = "bb.event.StaleSetSceneTasks"
1323python sstate_eventhandler_stalesstate() {
1324 d = e.data
1325 tasks = e.tasks
1326
1327 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
1328
1329 for a in list(set(d.getVar("SSTATE_ARCHS").split())):
1330 toremove = []
1331 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
1332 if not os.path.exists(i):
1333 continue
1334 with open(i, "r") as f:
1335 lines = f.readlines()
1336 for l in lines:
1337 try:
1338 (stamp, manifest, workdir) = l.split()
1339 for tid in tasks:
1340 for s in tasks[tid]:
1341 if s.startswith(stamp):
1342 taskname = bb.runqueue.taskname_from_tid(tid)[3:]
1343 manname = manifest + "." + taskname
1344 if os.path.exists(manname):
1345 bb.debug(2, "Sstate for %s is stale, removing related manifest %s" % (tid, manname))
1346 toremove.append((manname, tid, tasks[tid]))
1347 break
1348 except ValueError:
1349 bb.fatal("Invalid line '%s' in sstate manifest '%s'" % (l, i))
1350
1351 if toremove:
1352 msg = "Removing %d stale sstate objects for arch %s" % (len(toremove), a)
1353 bb.event.fire(bb.event.ProcessStarted(msg, len(toremove)), d)
1354
1355 removed = 0
1356 for (manname, tid, stamps) in toremove:
1357 sstate_clean_manifest(manname, d)
1358 for stamp in stamps:
1359 bb.utils.remove(stamp)
1360 removed = removed + 1
1361 bb.event.fire(bb.event.ProcessProgress(msg, removed), d)
1362
1363 bb.event.fire(bb.event.ProcessFinished(msg), d)
1364}