blob: d09e27aee4f887b7142b1cdbd15d4f808b71911a [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001SSTATE_VERSION = "3"
2
3SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
4SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
5
6def generate_sstatefn(spec, hash, d):
7 if not hash:
8 hash = "INVALID"
9 return hash[:2] + "/" + spec + hash
10
11SSTATE_PKGARCH = "${PACKAGE_ARCH}"
12SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
13SSTATE_SWSPEC = "sstate:${BPN}::${PV}:${PR}::${SSTATE_VERSION}:"
14SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC', True), d.getVar('BB_TASKHASH', True), d)}"
15SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
16SSTATE_EXTRAPATH = ""
17SSTATE_EXTRAPATHWILDCARD = ""
18SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/${SSTATE_PKGSPEC}"
19
20# We don't want the sstate to depend on things like the distro string
21# of the system, we let the sstate paths take care of this.
22SSTATE_EXTRAPATH[vardepvalue] = ""
23
24# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
25SSTATE_DUPWHITELIST = "${DEPLOY_DIR_IMAGE}/ ${DEPLOY_DIR}/licenses/ ${DEPLOY_DIR_RPM}/all/"
26# Avoid docbook/sgml catalog warnings for now
27SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
28# Archive the sources for many architectures in one deploy folder
29SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}"
30
31SSTATE_SCAN_FILES ?= "*.la *-config *_config"
32SSTATE_SCAN_CMD ?= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES", True).split())}" \) -type f'
33
34BB_HASHFILENAME = "${SSTATE_EXTRAPATH} ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
35
36SSTATE_ARCHS = " \
37 ${BUILD_ARCH} \
38 ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
39 ${BUILD_ARCH}_${TARGET_ARCH} \
40 ${SDK_ARCH}_${SDK_OS} \
41 ${SDK_ARCH}_${PACKAGE_ARCH} \
42 allarch \
43 ${PACKAGE_ARCH} \
44 ${MACHINE}"
45
46SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
47
48SSTATECREATEFUNCS = "sstate_hardcode_path"
49SSTATEPOSTCREATEFUNCS = ""
50SSTATEPREINSTFUNCS = ""
51SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
52SSTATEPOSTINSTFUNCS = ""
53EXTRA_STAGING_FIXMES ?= ""
54
55SIGGEN_LOCKEDSIGS_CHECK_LEVEL ?= 'error'
56
57# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
58# not sign)
59SSTATE_SIG_KEY ?= ""
60SSTATE_SIG_PASSPHRASE ?= ""
61# Whether to verify the GnUPG signatures when extracting sstate archives
62SSTATE_VERIFY_SIG ?= "0"
63
Patrick Williamsc124f4f2015-09-15 14:41:29 -050064python () {
65 if bb.data.inherits_class('native', d):
66 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
67 elif bb.data.inherits_class('crosssdk', d):
68 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
69 elif bb.data.inherits_class('cross', d):
70 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${TARGET_ARCH}"))
71 elif bb.data.inherits_class('nativesdk', d):
72 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
73 elif bb.data.inherits_class('cross-canadian', d):
74 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
75 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH", True) == "all":
76 d.setVar('SSTATE_PKGARCH', "allarch")
77 else:
78 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
79
80 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
81 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
82 d.setVar('SSTATE_EXTRAPATHWILDCARD', "*/")
83
84 # These classes encode staging paths into their scripts data so can only be
85 # reused if we manipulate the paths
86 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d):
87 scan_cmd = "grep -Irl ${STAGING_DIR} ${SSTATE_BUILDDIR}"
88 d.setVar('SSTATE_SCAN_CMD', scan_cmd)
89
90 unique_tasks = set((d.getVar('SSTATETASKS', True) or "").split())
91 d.setVar('SSTATETASKS', " ".join(unique_tasks))
92 for task in unique_tasks:
93 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
94 d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
95}
96
97def sstate_init(task, d):
98 ss = {}
99 ss['task'] = task
100 ss['dirs'] = []
101 ss['plaindirs'] = []
102 ss['lockfiles'] = []
103 ss['lockfiles-shared'] = []
104 return ss
105
106def sstate_state_fromvars(d, task = None):
107 if task is None:
108 task = d.getVar('BB_CURRENTTASK', True)
109 if not task:
110 bb.fatal("sstate code running without task context?!")
111 task = task.replace("_setscene", "")
112
113 if task.startswith("do_"):
114 task = task[3:]
115 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs', True) or "").split()
116 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs', True) or "").split()
117 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs', True) or "").split()
118 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile', True) or "").split()
119 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared', True) or "").split()
120 interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs', True) or "").split()
121 if not task or len(inputs) != len(outputs):
122 bb.fatal("sstate variables not setup correctly?!")
123
124 if task == "populate_lic":
125 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
126 d.setVar("SSTATE_EXTRAPATH", "")
127
128 ss = sstate_init(task, d)
129 for i in range(len(inputs)):
130 sstate_add(ss, inputs[i], outputs[i], d)
131 ss['lockfiles'] = lockfiles
132 ss['lockfiles-shared'] = lockfilesshared
133 ss['plaindirs'] = plaindirs
134 ss['interceptfuncs'] = interceptfuncs
135 return ss
136
137def sstate_add(ss, source, dest, d):
138 if not source.endswith("/"):
139 source = source + "/"
140 if not dest.endswith("/"):
141 dest = dest + "/"
142 source = os.path.normpath(source)
143 dest = os.path.normpath(dest)
144 srcbase = os.path.basename(source)
145 ss['dirs'].append([srcbase, source, dest])
146 return ss
147
148def sstate_install(ss, d):
149 import oe.path
150 import oe.sstatesig
151 import subprocess
152
153 sharedfiles = []
154 shareddirs = []
155 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
156
Patrick Williamsf1e5d692016-03-30 15:21:19 -0500157 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
158
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500159 manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
160
161 if os.access(manifest, os.R_OK):
162 bb.fatal("Package already staged (%s)?!" % manifest)
163
164 locks = []
165 for lock in ss['lockfiles-shared']:
166 locks.append(bb.utils.lockfile(lock, True))
167 for lock in ss['lockfiles']:
168 locks.append(bb.utils.lockfile(lock))
169
170 for state in ss['dirs']:
171 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
172 for walkroot, dirs, files in os.walk(state[1]):
173 for file in files:
174 srcpath = os.path.join(walkroot, file)
175 dstpath = srcpath.replace(state[1], state[2])
176 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
177 sharedfiles.append(dstpath)
178 for dir in dirs:
179 srcdir = os.path.join(walkroot, dir)
180 dstdir = srcdir.replace(state[1], state[2])
181 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
182 if not dstdir.endswith("/"):
183 dstdir = dstdir + "/"
184 shareddirs.append(dstdir)
185
186 # Check the file list for conflicts against files which already exist
187 whitelist = (d.getVar("SSTATE_DUPWHITELIST", True) or "").split()
188 match = []
189 for f in sharedfiles:
190 if os.path.exists(f):
191 f = os.path.normpath(f)
192 realmatch = True
193 for w in whitelist:
194 if f.startswith(w):
195 realmatch = False
196 break
197 if realmatch:
198 match.append(f)
199 sstate_search_cmd = "grep -rl '%s' %s --exclude=master.list | sed -e 's:^.*/::' -e 's:\.populate-sysroot::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
200 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
201 if search_output != "":
202 match.append("Matched in %s" % search_output.rstrip())
203 if match:
204 bb.error("The recipe %s is trying to install files into a shared " \
205 "area when those files already exist. Those files and their manifest " \
206 "location are:\n %s\nPlease verify which recipe should provide the " \
207 "above files.\nThe build has stopped as continuing in this scenario WILL " \
208 "break things, if not now, possibly in the future (we've seen builds fail " \
209 "several months later). If the system knew how to recover from this " \
210 "automatically it would however there are several different scenarios " \
211 "which can result in this and we don't know which one this is. It may be " \
212 "you have switched providers of something like virtual/kernel (e.g. from " \
213 "linux-yocto to linux-yocto-dev), in that case you need to execute the " \
214 "clean task for both recipes and it will resolve this error. It may be " \
215 "you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \
216 "those recipes should again resolve this error however switching " \
217 "DISTRO_FEATURES on an existing build directory is not supported, you " \
218 "should really clean out tmp and rebuild (reusing sstate should be safe). " \
219 "It could be the overlapping files detected are harmless in which case " \
220 "adding them to SSTATE_DUPWHITELIST may be the correct solution. It could " \
221 "also be your build is including two different conflicting versions of " \
222 "things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
223 "be to resolve the conflict. If in doubt, please ask on the mailing list, " \
224 "sharing the error and filelist above." % \
225 (d.getVar('PN', True), "\n ".join(match)))
226 bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.")
227
228 # Write out the manifest
229 f = open(manifest, "w")
230 for file in sharedfiles:
231 f.write(file + "\n")
232
233 # We want to ensure that directories appear at the end of the manifest
234 # so that when we test to see if they should be deleted any contents
235 # added by the task will have been removed first.
236 dirs = sorted(shareddirs, key=len)
237 # Must remove children first, which will have a longer path than the parent
238 for di in reversed(dirs):
239 f.write(di + "\n")
240 f.close()
241
242 # Append to the list of manifests for this PACKAGE_ARCH
243
244 i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
245 l = bb.utils.lockfile(i + ".lock")
246 filedata = d.getVar("STAMP", True) + " " + d2.getVar("SSTATE_MANFILEPREFIX", True) + " " + d.getVar("WORKDIR", True) + "\n"
247 manifests = []
248 if os.path.exists(i):
249 with open(i, "r") as f:
250 manifests = f.readlines()
251 if filedata not in manifests:
252 with open(i, "a+") as f:
253 f.write(filedata)
254 bb.utils.unlockfile(l)
255
256 # Run the actual file install
257 for state in ss['dirs']:
258 if os.path.exists(state[1]):
259 oe.path.copyhardlinktree(state[1], state[2])
260
261 for postinst in (d.getVar('SSTATEPOSTINSTFUNCS', True) or '').split():
Patrick Williamsf1e5d692016-03-30 15:21:19 -0500262 # All hooks should run in the SSTATE_INSTDIR
263 bb.build.exec_func(postinst, d, (sstateinst,))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500264
265 for lock in locks:
266 bb.utils.unlockfile(lock)
267
268sstate_install[vardepsexclude] += "SSTATE_DUPWHITELIST STATE_MANMACH SSTATE_MANFILEPREFIX"
269sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
270
271def sstate_installpkg(ss, d):
272 import oe.path
273 import subprocess
274
275 def prepdir(dir):
276 # remove dir if it exists, ensure any parent directories do exist
277 if os.path.exists(dir):
278 oe.path.remove(dir)
279 bb.utils.mkdirhier(dir)
280 oe.path.remove(dir)
281
282 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
283 sstatefetch = d.getVar('SSTATE_PKGNAME', True) + '_' + ss['task'] + ".tgz"
284 sstatepkg = d.getVar('SSTATE_PKG', True) + '_' + ss['task'] + ".tgz"
285
286 if not os.path.exists(sstatepkg):
287 pstaging_fetch(sstatefetch, sstatepkg, d)
288
289 if not os.path.isfile(sstatepkg):
290 bb.note("Staging package %s does not exist" % sstatepkg)
291 return False
292
293 sstate_clean(ss, d)
294
295 d.setVar('SSTATE_INSTDIR', sstateinst)
296 d.setVar('SSTATE_PKG', sstatepkg)
297
298 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG", True), False):
299 if subprocess.call(["gpg", "--verify", sstatepkg + ".sig", sstatepkg]) != 0:
300 bb.warn("Cannot verify signature on sstate package %s" % sstatepkg)
301
302 for f in (d.getVar('SSTATEPREINSTFUNCS', True) or '').split() + ['sstate_unpack_package'] + (d.getVar('SSTATEPOSTUNPACKFUNCS', True) or '').split():
Patrick Williamsf1e5d692016-03-30 15:21:19 -0500303 # All hooks should run in the SSTATE_INSTDIR
304 bb.build.exec_func(f, d, (sstateinst,))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500305
306 for state in ss['dirs']:
307 prepdir(state[1])
308 os.rename(sstateinst + state[0], state[1])
309 sstate_install(ss, d)
310
311 for plain in ss['plaindirs']:
312 workdir = d.getVar('WORKDIR', True)
313 src = sstateinst + "/" + plain.replace(workdir, '')
314 dest = plain
315 bb.utils.mkdirhier(src)
316 prepdir(dest)
317 os.rename(src, dest)
318
319 return True
320
321python sstate_hardcode_path_unpack () {
322 # Fixup hardcoded paths
323 #
324 # Note: The logic below must match the reverse logic in
325 # sstate_hardcode_path(d)
326 import subprocess
327
328 sstateinst = d.getVar('SSTATE_INSTDIR', True)
329 fixmefn = sstateinst + "fixmepath"
330 if os.path.isfile(fixmefn):
331 staging = d.getVar('STAGING_DIR', True)
332 staging_target = d.getVar('STAGING_DIR_TARGET', True)
333 staging_host = d.getVar('STAGING_DIR_HOST', True)
334
335 if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
336 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIR:%s:g'" % (staging)
337 elif bb.data.inherits_class('cross', d):
338 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIR:%s:g'" % (staging_target, staging)
339 else:
340 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
341
342 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES', True) or ''
343 for fixmevar in extra_staging_fixmes.split():
344 fixme_path = d.getVar(fixmevar, True)
345 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
346
347 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
348 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
349
350 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
351 subprocess.call(sstate_hardcode_cmd, shell=True)
352
353 # Need to remove this or we'd copy it into the target directory and may
354 # conflict with another writer
355 os.remove(fixmefn)
356}
357
358def sstate_clean_cachefile(ss, d):
359 import oe.path
360
361 sstatepkgfile = d.getVar('SSTATE_PATHSPEC', True) + "*_" + ss['task'] + ".tgz*"
362 bb.note("Removing %s" % sstatepkgfile)
363 oe.path.remove(sstatepkgfile)
364
365def sstate_clean_cachefiles(d):
366 for task in (d.getVar('SSTATETASKS', True) or "").split():
367 ld = d.createCopy()
368 ss = sstate_state_fromvars(ld, task)
369 sstate_clean_cachefile(ss, ld)
370
371def sstate_clean_manifest(manifest, d):
372 import oe.path
373
374 mfile = open(manifest)
375 entries = mfile.readlines()
376 mfile.close()
377
378 for entry in entries:
379 entry = entry.strip()
380 bb.debug(2, "Removing manifest: %s" % entry)
381 # We can race against another package populating directories as we're removing them
382 # so we ignore errors here.
383 try:
384 if entry.endswith("/"):
385 if os.path.islink(entry[:-1]):
386 os.remove(entry[:-1])
387 elif os.path.exists(entry) and len(os.listdir(entry)) == 0:
388 os.rmdir(entry[:-1])
389 else:
390 oe.path.remove(entry)
391 except OSError:
392 pass
393
394 oe.path.remove(manifest)
395
396def sstate_clean(ss, d):
397 import oe.path
398 import glob
399
400 d2 = d.createCopy()
401 stamp_clean = d.getVar("STAMPCLEAN", True)
402 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info', True)
403 if extrainf:
404 d2.setVar("SSTATE_MANMACH", extrainf)
405 wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
406 else:
407 wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
408
409 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
410
411 if os.path.exists(manifest):
412 locks = []
413 for lock in ss['lockfiles-shared']:
414 locks.append(bb.utils.lockfile(lock))
415 for lock in ss['lockfiles']:
416 locks.append(bb.utils.lockfile(lock))
417
418 sstate_clean_manifest(manifest, d)
419
420 for lock in locks:
421 bb.utils.unlockfile(lock)
422
423 # Remove the current and previous stamps, but keep the sigdata.
424 #
425 # The glob() matches do_task* which may match multiple tasks, for
426 # example: do_package and do_package_write_ipk, so we need to
427 # exactly match *.do_task.* and *.do_task_setscene.*
428 rm_stamp = '.do_%s.' % ss['task']
429 rm_setscene = '.do_%s_setscene.' % ss['task']
430 # For BB_SIGNATURE_HANDLER = "noop"
431 rm_nohash = ".do_%s" % ss['task']
432 for stfile in glob.glob(wildcard_stfile):
433 # Keep the sigdata
434 if ".sigdata." in stfile:
435 continue
436 # Preserve taint files in the stamps directory
437 if stfile.endswith('.taint'):
438 continue
439 if rm_stamp in stfile or rm_setscene in stfile or \
440 stfile.endswith(rm_nohash):
441 oe.path.remove(stfile)
442
443sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
444
445CLEANFUNCS += "sstate_cleanall"
446
447python sstate_cleanall() {
448 bb.note("Removing shared state for package %s" % d.getVar('PN', True))
449
450 manifest_dir = d.getVar('SSTATE_MANIFESTS', True)
451 if not os.path.exists(manifest_dir):
452 return
453
454 tasks = d.getVar('SSTATETASKS', True).split()
455 for name in tasks:
456 ld = d.createCopy()
457 shared_state = sstate_state_fromvars(ld, name)
458 sstate_clean(shared_state, ld)
459}
460
461python sstate_hardcode_path () {
462 import subprocess, platform
463
464 # Need to remove hardcoded paths and fix these when we install the
465 # staging packages.
466 #
467 # Note: the logic in this function needs to match the reverse logic
468 # in sstate_installpkg(ss, d)
469
470 staging = d.getVar('STAGING_DIR', True)
471 staging_target = d.getVar('STAGING_DIR_TARGET', True)
472 staging_host = d.getVar('STAGING_DIR_HOST', True)
473 sstate_builddir = d.getVar('SSTATE_BUILDDIR', True)
474
475 if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
476 sstate_grep_cmd = "grep -l -e '%s'" % (staging)
477 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIR:g'" % (staging)
478 elif bb.data.inherits_class('cross', d):
479 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging)
480 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRTARGET:g; s:%s:FIXMESTAGINGDIR:g'" % (staging_target, staging)
481 else:
482 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
483 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % (staging_host)
484
485 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES', True) or ''
486 for fixmevar in extra_staging_fixmes.split():
487 fixme_path = d.getVar(fixmevar, True)
488 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
489
490 fixmefn = sstate_builddir + "fixmepath"
491
492 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD', True)
493 sstate_filelist_cmd = "tee %s" % (fixmefn)
494
495 # fixmepath file needs relative paths, drop sstate_builddir prefix
496 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
497
498 xargs_no_empty_run_cmd = '--no-run-if-empty'
499 if platform.system() == 'Darwin':
500 xargs_no_empty_run_cmd = ''
501
502 # Limit the fixpaths and sed operations based on the initial grep search
503 # This has the side effect of making sure the vfs cache is hot
504 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
505
506 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
507 subprocess.call(sstate_hardcode_cmd, shell=True)
508
509 # If the fixmefn is empty, remove it..
510 if os.stat(fixmefn).st_size == 0:
511 os.remove(fixmefn)
512 else:
513 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
514 subprocess.call(sstate_filelist_relative_cmd, shell=True)
515}
516
517def sstate_package(ss, d):
518 import oe.path
519
520 def make_relative_symlink(path, outputpath, d):
521 # Replace out absolute TMPDIR paths in symlinks with relative ones
522 if not os.path.islink(path):
523 return
524 link = os.readlink(path)
525 if not os.path.isabs(link):
526 return
527 if not link.startswith(tmpdir):
528 return
529
530 depth = outputpath.rpartition(tmpdir)[2].count('/')
531 base = link.partition(tmpdir)[2].strip()
532 while depth > 1:
533 base = "/.." + base
534 depth -= 1
535 base = "." + base
536
537 bb.debug(2, "Replacing absolute path %s with relative path %s for %s" % (link, base, outputpath))
538 os.remove(path)
539 os.symlink(base, path)
540
541 tmpdir = d.getVar('TMPDIR', True)
542
543 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
544 sstatepkg = d.getVar('SSTATE_PKG', True) + '_'+ ss['task'] + ".tgz"
545 bb.utils.remove(sstatebuild, recurse=True)
546 bb.utils.mkdirhier(sstatebuild)
547 bb.utils.mkdirhier(os.path.dirname(sstatepkg))
548 for state in ss['dirs']:
549 if not os.path.exists(state[1]):
550 continue
551 srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
552 for walkroot, dirs, files in os.walk(state[1]):
553 for file in files:
554 srcpath = os.path.join(walkroot, file)
555 dstpath = srcpath.replace(state[1], state[2])
556 make_relative_symlink(srcpath, dstpath, d)
557 for dir in dirs:
558 srcpath = os.path.join(walkroot, dir)
559 dstpath = srcpath.replace(state[1], state[2])
560 make_relative_symlink(srcpath, dstpath, d)
561 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
562 oe.path.copyhardlinktree(state[1], sstatebuild + state[0])
563
564 workdir = d.getVar('WORKDIR', True)
565 for plain in ss['plaindirs']:
566 pdir = plain.replace(workdir, sstatebuild)
567 bb.utils.mkdirhier(plain)
568 bb.utils.mkdirhier(pdir)
569 oe.path.copyhardlinktree(plain, pdir)
570
571 d.setVar('SSTATE_BUILDDIR', sstatebuild)
572 d.setVar('SSTATE_PKG', sstatepkg)
573
574 for f in (d.getVar('SSTATECREATEFUNCS', True) or '').split() + ['sstate_create_package'] + \
575 (d.getVar('SSTATEPOSTCREATEFUNCS', True) or '').split():
Patrick Williamsf1e5d692016-03-30 15:21:19 -0500576 # All hooks should run in SSTATE_BUILDDIR.
577 bb.build.exec_func(f, d, (sstatebuild,))
578
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500579 bb.siggen.dump_this_task(sstatepkg + ".siginfo", d)
580
581 return
582
583def pstaging_fetch(sstatefetch, sstatepkg, d):
584 import bb.fetch2
585
586 # Only try and fetch if the user has configured a mirror
587 mirrors = d.getVar('SSTATE_MIRRORS', True)
588 if not mirrors:
589 return
590
591 # Copy the data object and override DL_DIR and SRC_URI
592 localdata = bb.data.createCopy(d)
593 bb.data.update_data(localdata)
594
595 dldir = localdata.expand("${SSTATE_DIR}")
596 bb.utils.mkdirhier(dldir)
597
598 localdata.delVar('MIRRORS')
599 localdata.setVar('FILESPATH', dldir)
600 localdata.setVar('DL_DIR', dldir)
601 localdata.setVar('PREMIRRORS', mirrors)
602
603 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
604 # we'll want to allow network access for the current set of fetches.
605 if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1":
606 localdata.delVar('BB_NO_NETWORK')
607
608 # Try a fetch from the sstate mirror, if it fails just return and
609 # we will build the package
610 uris = ['file://{0}'.format(sstatefetch),
611 'file://{0}.siginfo'.format(sstatefetch)]
612 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG", True), False):
613 uris += ['file://{0}.sig'.format(sstatefetch)]
614
615 for srcuri in uris:
616 localdata.setVar('SRC_URI', srcuri)
617 try:
618 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
619 fetcher.download()
620
621 # Need to optimise this, if using file:// urls, the fetcher just changes the local path
622 # For now work around by symlinking
623 localpath = bb.data.expand(fetcher.localpath(srcuri), localdata)
624 if localpath != sstatepkg and os.path.exists(localpath) and not os.path.exists(sstatepkg):
625 os.symlink(localpath, sstatepkg)
626
627 except bb.fetch2.BBFetchException:
628 break
629
630def sstate_setscene(d):
631 shared_state = sstate_state_fromvars(d)
632 accelerate = sstate_installpkg(shared_state, d)
633 if not accelerate:
634 raise bb.build.FuncFailed("No suitable staging package found")
635
636python sstate_task_prefunc () {
637 shared_state = sstate_state_fromvars(d)
638 sstate_clean(shared_state, d)
639}
Patrick Williamsf1e5d692016-03-30 15:21:19 -0500640sstate_task_prefunc[dirs] = "${WORKDIR}"
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500641
642python sstate_task_postfunc () {
643 shared_state = sstate_state_fromvars(d)
Patrick Williamsf1e5d692016-03-30 15:21:19 -0500644
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500645 sstate_install(shared_state, d)
646 for intercept in shared_state['interceptfuncs']:
Patrick Williamsf1e5d692016-03-30 15:21:19 -0500647 bb.build.exec_func(intercept, d, (d.getVar("WORKDIR", True),))
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500648 omask = os.umask(002)
649 if omask != 002:
650 bb.note("Using umask 002 (not %0o) for sstate packaging" % omask)
651 sstate_package(shared_state, d)
652 os.umask(omask)
653}
Patrick Williamsf1e5d692016-03-30 15:21:19 -0500654sstate_task_postfunc[dirs] = "${WORKDIR}"
655
Patrick Williamsc124f4f2015-09-15 14:41:29 -0500656
657#
658# Shell function to generate a sstate package from a directory
659# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
660#
661sstate_create_package () {
662 TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
663 # Need to handle empty directories
664 if [ "$(ls -A)" ]; then
665 set +e
666 tar -czf $TFILE *
667 ret=$?
668 if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
669 exit 1
670 fi
671 set -e
672 else
673 tar -cz --file=$TFILE --files-from=/dev/null
674 fi
675 chmod 0664 $TFILE
676 mv -f $TFILE ${SSTATE_PKG}
677
678 if [ -n "${SSTATE_SIG_KEY}" ]; then
679 rm -f ${SSTATE_PKG}.sig
680 echo ${SSTATE_SIG_PASSPHRASE} | gpg --batch --passphrase-fd 0 --detach-sign --local-user ${SSTATE_SIG_KEY} --output ${SSTATE_PKG}.sig ${SSTATE_PKG}
681 fi
682
683 cd ${WORKDIR}
684 rm -rf ${SSTATE_BUILDDIR}
685}
686
687#
688# Shell function to decompress and prepare a package for installation
689# Will be run from within SSTATE_INSTDIR.
690#
691sstate_unpack_package () {
692 tar -xmvzf ${SSTATE_PKG}
693 # Use "! -w ||" to return true for read only files
694 [ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG}
695}
696
697BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
698
699def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
700
701 ret = []
702 missed = []
703 extension = ".tgz"
704 if siginfo:
705 extension = extension + ".siginfo"
706
707 def getpathcomponents(task, d):
708 # Magic data from BB_HASHFILENAME
709 splithashfn = sq_hashfn[task].split(" ")
710 spec = splithashfn[1]
711 extrapath = splithashfn[0]
712
713 tname = sq_task[task][3:]
714
715 if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
716 spec = splithashfn[2]
717 extrapath = ""
718
719 return spec, extrapath, tname
720
721
722 for task in range(len(sq_fn)):
723
724 spec, extrapath, tname = getpathcomponents(task, d)
725
726 sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + extension)
727
728 if os.path.exists(sstatefile):
729 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
730 ret.append(task)
731 continue
732 else:
733 missed.append(task)
734 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
735
736 mirrors = d.getVar("SSTATE_MIRRORS", True)
737 if mirrors:
738 # Copy the data object and override DL_DIR and SRC_URI
739 localdata = bb.data.createCopy(d)
740 bb.data.update_data(localdata)
741
742 dldir = localdata.expand("${SSTATE_DIR}")
743 localdata.delVar('MIRRORS')
744 localdata.setVar('FILESPATH', dldir)
745 localdata.setVar('DL_DIR', dldir)
746 localdata.setVar('PREMIRRORS', mirrors)
747
748 bb.debug(2, "SState using premirror of: %s" % mirrors)
749
750 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
751 # we'll want to allow network access for the current set of fetches.
752 if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1":
753 localdata.delVar('BB_NO_NETWORK')
754
755 from bb.fetch2 import FetchConnectionCache
756 def checkstatus_init(thread_worker):
757 thread_worker.connection_cache = FetchConnectionCache()
758
759 def checkstatus_end(thread_worker):
760 thread_worker.connection_cache.close_connections()
761
762 def checkstatus(thread_worker, arg):
763 (task, sstatefile) = arg
764
765 localdata2 = bb.data.createCopy(localdata)
766 srcuri = "file://" + sstatefile
767 localdata.setVar('SRC_URI', srcuri)
768 bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
769
770 try:
771 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
772 connection_cache=thread_worker.connection_cache)
773 fetcher.checkstatus()
774 bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
775 ret.append(task)
776 if task in missed:
777 missed.remove(task)
778 except:
779 missed.append(task)
780 bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
781 pass
782
783 tasklist = []
784 for task in range(len(sq_fn)):
785 if task in ret:
786 continue
787 spec, extrapath, tname = getpathcomponents(task, d)
788 sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + extension)
789 tasklist.append((task, sstatefile))
790
791 if tasklist:
792 bb.note("Checking sstate mirror object availability (for %s objects)" % len(tasklist))
793 import multiprocessing
794 nproc = min(multiprocessing.cpu_count(), len(tasklist))
795
796 pool = oe.utils.ThreadedPool(nproc, len(tasklist),
797 worker_init=checkstatus_init, worker_end=checkstatus_end)
798 for t in tasklist:
799 pool.add_task(checkstatus, t)
800 pool.start()
801 pool.wait_completion()
802
803 inheritlist = d.getVar("INHERIT", True)
804 if "toaster" in inheritlist:
805 evdata = {'missed': [], 'found': []};
806 for task in missed:
807 spec, extrapath, tname = getpathcomponents(task, d)
808 sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz")
809 evdata['missed'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) )
810 for task in ret:
811 spec, extrapath, tname = getpathcomponents(task, d)
812 sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz")
813 evdata['found'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) )
814 bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
815
816 if hasattr(bb.parse.siggen, "checkhashes"):
817 bb.parse.siggen.checkhashes(missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d)
818
819 return ret
820
821BB_SETSCENE_DEPVALID = "setscene_depvalid"
822
823def setscene_depvalid(task, taskdependees, notneeded, d):
824 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
825 # task is included in taskdependees too
826
827 bb.debug(2, "Considering setscene task: %s" % (str(taskdependees[task])))
828
829 def isNativeCross(x):
830 return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x
831
832 def isPostInstDep(x):
833 if x in ["qemu-native", "gdk-pixbuf-native", "qemuwrapper-cross", "depmodwrapper-cross", "systemd-systemctl-native", "gtk-icon-utils-native"]:
834 return True
835 return False
836
837 # We only need to trigger populate_lic through direct dependencies
838 if taskdependees[task][1] == "do_populate_lic":
839 return True
840
841 for dep in taskdependees:
842 bb.debug(2, " considering dependency: %s" % (str(taskdependees[dep])))
843 if task == dep:
844 continue
845 if dep in notneeded:
846 continue
847 # do_package_write_* and do_package doesn't need do_package
848 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
849 continue
850 # do_package_write_* and do_package doesn't need do_populate_sysroot, unless is a postinstall dependency
851 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
852 if isPostInstDep(taskdependees[task][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
853 return False
854 continue
855 # Native/Cross packages don't exist and are noexec anyway
856 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
857 continue
858
859 # Consider sysroot depending on sysroot tasks
860 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
861 # base-passwd/shadow-sysroot don't need their dependencies
862 if taskdependees[dep][0].endswith(("base-passwd", "shadow-sysroot")):
863 continue
864 # Nothing need depend on libc-initial/gcc-cross-initial
865 if "-initial" in taskdependees[task][0]:
866 continue
867 # Native/Cross populate_sysroot need their dependencies
868 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
869 return False
870 # Target populate_sysroot depended on by cross tools need to be installed
871 if isNativeCross(taskdependees[dep][0]):
872 return False
873 # Native/cross tools depended upon by target sysroot are not needed
874 if isNativeCross(taskdependees[task][0]):
875 continue
876 # Target populate_sysroot need their dependencies
877 return False
878
879 if taskdependees[task][1] == 'do_shared_workdir':
880 continue
881
882 # This is due to the [depends] in useradd.bbclass complicating matters
883 # The logic *is* reversed here due to the way hard setscene dependencies are injected
884 if taskdependees[task][1] == 'do_package' and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
885 continue
886
887 # Safe fallthrough default
888 bb.debug(2, " Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])))
889 return False
890 return True
891
892addhandler sstate_eventhandler
893sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
894python sstate_eventhandler() {
895 d = e.data
896 # When we write an sstate package we rewrite the SSTATE_PKG
897 spkg = d.getVar('SSTATE_PKG', True)
898 if not spkg.endswith(".tgz"):
899 taskname = d.getVar("BB_RUNTASK", True)[3:]
900 spec = d.getVar('SSTATE_PKGSPEC', True)
901 swspec = d.getVar('SSTATE_SWSPEC', True)
902 if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
903 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
904 d.setVar("SSTATE_EXTRAPATH", "")
905 sstatepkg = d.getVar('SSTATE_PKG', True)
906 bb.siggen.dump_this_task(sstatepkg + '_' + taskname + ".tgz" ".siginfo", d)
907}
908
909SSTATE_PRUNE_OBSOLETEWORKDIR = "1"
910
911# Event handler which removes manifests and stamps file for
912# recipes which are no longer reachable in a build where they
913# once were.
914# Also optionally removes the workdir of those tasks/recipes
915#
916addhandler sstate_eventhandler2
917sstate_eventhandler2[eventmask] = "bb.event.ReachableStamps"
918python sstate_eventhandler2() {
919 import glob
920 d = e.data
921 stamps = e.stamps.values()
922 removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
923 seen = []
924 for a in d.getVar("SSTATE_ARCHS", True).split():
925 toremove = []
926 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
927 if not os.path.exists(i):
928 continue
929 with open(i, "r") as f:
930 lines = f.readlines()
931 for l in lines:
932 (stamp, manifest, workdir) = l.split()
933 if stamp not in stamps:
934 toremove.append(l)
935 if stamp not in seen:
936 bb.note("Stamp %s is not reachable, removing related manifests" % stamp)
937 seen.append(stamp)
938 for r in toremove:
939 (stamp, manifest, workdir) = r.split()
940 for m in glob.glob(manifest + ".*"):
941 sstate_clean_manifest(m, d)
942 bb.utils.remove(stamp + "*")
943 if removeworkdir:
944 bb.utils.remove(workdir, recurse = True)
945 lines.remove(r)
946 with open(i, "w") as f:
947 for l in lines:
948 f.write(l)
949}