blob: b9ad6da9dda66ac6dbeb30db0b2dbaa298e868e7 [file] [log] [blame]
Patrick Williamsc124f4f2015-09-15 14:41:29 -05001SSTATE_VERSION = "3"
2
3SSTATE_MANIFESTS ?= "${TMPDIR}/sstate-control"
4SSTATE_MANFILEPREFIX = "${SSTATE_MANIFESTS}/manifest-${SSTATE_MANMACH}-${PN}"
5
6def generate_sstatefn(spec, hash, d):
7 if not hash:
8 hash = "INVALID"
9 return hash[:2] + "/" + spec + hash
10
11SSTATE_PKGARCH = "${PACKAGE_ARCH}"
12SSTATE_PKGSPEC = "sstate:${PN}:${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}:${PV}:${PR}:${SSTATE_PKGARCH}:${SSTATE_VERSION}:"
13SSTATE_SWSPEC = "sstate:${BPN}::${PV}:${PR}::${SSTATE_VERSION}:"
14SSTATE_PKGNAME = "${SSTATE_EXTRAPATH}${@generate_sstatefn(d.getVar('SSTATE_PKGSPEC', True), d.getVar('BB_TASKHASH', True), d)}"
15SSTATE_PKG = "${SSTATE_DIR}/${SSTATE_PKGNAME}"
16SSTATE_EXTRAPATH = ""
17SSTATE_EXTRAPATHWILDCARD = ""
18SSTATE_PATHSPEC = "${SSTATE_DIR}/${SSTATE_EXTRAPATHWILDCARD}*/${SSTATE_PKGSPEC}"
19
20# We don't want the sstate to depend on things like the distro string
21# of the system, we let the sstate paths take care of this.
22SSTATE_EXTRAPATH[vardepvalue] = ""
23
24# For multilib rpm the allarch packagegroup files can overwrite (in theory they're identical)
25SSTATE_DUPWHITELIST = "${DEPLOY_DIR_IMAGE}/ ${DEPLOY_DIR}/licenses/ ${DEPLOY_DIR_RPM}/all/"
26# Avoid docbook/sgml catalog warnings for now
27SSTATE_DUPWHITELIST += "${STAGING_ETCDIR_NATIVE}/sgml ${STAGING_DATADIR_NATIVE}/sgml"
28# Archive the sources for many architectures in one deploy folder
29SSTATE_DUPWHITELIST += "${DEPLOY_DIR_SRC}"
30
31SSTATE_SCAN_FILES ?= "*.la *-config *_config"
32SSTATE_SCAN_CMD ?= 'find ${SSTATE_BUILDDIR} \( -name "${@"\" -o -name \"".join(d.getVar("SSTATE_SCAN_FILES", True).split())}" \) -type f'
33
34BB_HASHFILENAME = "${SSTATE_EXTRAPATH} ${SSTATE_PKGSPEC} ${SSTATE_SWSPEC}"
35
36SSTATE_ARCHS = " \
37 ${BUILD_ARCH} \
38 ${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS} \
39 ${BUILD_ARCH}_${TARGET_ARCH} \
40 ${SDK_ARCH}_${SDK_OS} \
41 ${SDK_ARCH}_${PACKAGE_ARCH} \
42 allarch \
43 ${PACKAGE_ARCH} \
44 ${MACHINE}"
45
46SSTATE_MANMACH ?= "${SSTATE_PKGARCH}"
47
48SSTATECREATEFUNCS = "sstate_hardcode_path"
49SSTATEPOSTCREATEFUNCS = ""
50SSTATEPREINSTFUNCS = ""
51SSTATEPOSTUNPACKFUNCS = "sstate_hardcode_path_unpack"
52SSTATEPOSTINSTFUNCS = ""
53EXTRA_STAGING_FIXMES ?= ""
54
55SIGGEN_LOCKEDSIGS_CHECK_LEVEL ?= 'error'
56
57# The GnuPG key ID and passphrase to use to sign sstate archives (or unset to
58# not sign)
59SSTATE_SIG_KEY ?= ""
60SSTATE_SIG_PASSPHRASE ?= ""
61# Whether to verify the GnUPG signatures when extracting sstate archives
62SSTATE_VERIFY_SIG ?= "0"
63
64# Specify dirs in which the shell function is executed and don't use ${B}
65# as default dirs to avoid possible race about ${B} with other task.
66sstate_create_package[dirs] = "${SSTATE_BUILDDIR}"
67sstate_unpack_package[dirs] = "${SSTATE_INSTDIR}"
68
69# Do not run sstate_hardcode_path() in ${B}:
70# the ${B} maybe removed by cmake_do_configure() while
71# sstate_hardcode_path() running.
72sstate_hardcode_path[dirs] = "${SSTATE_BUILDDIR}"
73
74python () {
75 if bb.data.inherits_class('native', d):
76 d.setVar('SSTATE_PKGARCH', d.getVar('BUILD_ARCH', False))
77 elif bb.data.inherits_class('crosssdk', d):
78 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${SDK_ARCH}_${SDK_OS}"))
79 elif bb.data.inherits_class('cross', d):
80 d.setVar('SSTATE_PKGARCH', d.expand("${BUILD_ARCH}_${TARGET_ARCH}"))
81 elif bb.data.inherits_class('nativesdk', d):
82 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${SDK_OS}"))
83 elif bb.data.inherits_class('cross-canadian', d):
84 d.setVar('SSTATE_PKGARCH', d.expand("${SDK_ARCH}_${PACKAGE_ARCH}"))
85 elif bb.data.inherits_class('allarch', d) and d.getVar("PACKAGE_ARCH", True) == "all":
86 d.setVar('SSTATE_PKGARCH', "allarch")
87 else:
88 d.setVar('SSTATE_MANMACH', d.expand("${PACKAGE_ARCH}"))
89
90 if bb.data.inherits_class('native', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross', d):
91 d.setVar('SSTATE_EXTRAPATH', "${NATIVELSBSTRING}/")
92 d.setVar('SSTATE_EXTRAPATHWILDCARD', "*/")
93
94 # These classes encode staging paths into their scripts data so can only be
95 # reused if we manipulate the paths
96 if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('sdk', d) or bb.data.inherits_class('crosssdk', d):
97 scan_cmd = "grep -Irl ${STAGING_DIR} ${SSTATE_BUILDDIR}"
98 d.setVar('SSTATE_SCAN_CMD', scan_cmd)
99
100 unique_tasks = set((d.getVar('SSTATETASKS', True) or "").split())
101 d.setVar('SSTATETASKS', " ".join(unique_tasks))
102 for task in unique_tasks:
103 d.prependVarFlag(task, 'prefuncs', "sstate_task_prefunc ")
104 d.appendVarFlag(task, 'postfuncs', " sstate_task_postfunc")
105}
106
107def sstate_init(task, d):
108 ss = {}
109 ss['task'] = task
110 ss['dirs'] = []
111 ss['plaindirs'] = []
112 ss['lockfiles'] = []
113 ss['lockfiles-shared'] = []
114 return ss
115
116def sstate_state_fromvars(d, task = None):
117 if task is None:
118 task = d.getVar('BB_CURRENTTASK', True)
119 if not task:
120 bb.fatal("sstate code running without task context?!")
121 task = task.replace("_setscene", "")
122
123 if task.startswith("do_"):
124 task = task[3:]
125 inputs = (d.getVarFlag("do_" + task, 'sstate-inputdirs', True) or "").split()
126 outputs = (d.getVarFlag("do_" + task, 'sstate-outputdirs', True) or "").split()
127 plaindirs = (d.getVarFlag("do_" + task, 'sstate-plaindirs', True) or "").split()
128 lockfiles = (d.getVarFlag("do_" + task, 'sstate-lockfile', True) or "").split()
129 lockfilesshared = (d.getVarFlag("do_" + task, 'sstate-lockfile-shared', True) or "").split()
130 interceptfuncs = (d.getVarFlag("do_" + task, 'sstate-interceptfuncs', True) or "").split()
131 if not task or len(inputs) != len(outputs):
132 bb.fatal("sstate variables not setup correctly?!")
133
134 if task == "populate_lic":
135 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
136 d.setVar("SSTATE_EXTRAPATH", "")
137
138 ss = sstate_init(task, d)
139 for i in range(len(inputs)):
140 sstate_add(ss, inputs[i], outputs[i], d)
141 ss['lockfiles'] = lockfiles
142 ss['lockfiles-shared'] = lockfilesshared
143 ss['plaindirs'] = plaindirs
144 ss['interceptfuncs'] = interceptfuncs
145 return ss
146
147def sstate_add(ss, source, dest, d):
148 if not source.endswith("/"):
149 source = source + "/"
150 if not dest.endswith("/"):
151 dest = dest + "/"
152 source = os.path.normpath(source)
153 dest = os.path.normpath(dest)
154 srcbase = os.path.basename(source)
155 ss['dirs'].append([srcbase, source, dest])
156 return ss
157
158def sstate_install(ss, d):
159 import oe.path
160 import oe.sstatesig
161 import subprocess
162
163 sharedfiles = []
164 shareddirs = []
165 bb.utils.mkdirhier(d.expand("${SSTATE_MANIFESTS}"))
166
167 manifest, d2 = oe.sstatesig.sstate_get_manifest_filename(ss['task'], d)
168
169 if os.access(manifest, os.R_OK):
170 bb.fatal("Package already staged (%s)?!" % manifest)
171
172 locks = []
173 for lock in ss['lockfiles-shared']:
174 locks.append(bb.utils.lockfile(lock, True))
175 for lock in ss['lockfiles']:
176 locks.append(bb.utils.lockfile(lock))
177
178 for state in ss['dirs']:
179 bb.debug(2, "Staging files from %s to %s" % (state[1], state[2]))
180 for walkroot, dirs, files in os.walk(state[1]):
181 for file in files:
182 srcpath = os.path.join(walkroot, file)
183 dstpath = srcpath.replace(state[1], state[2])
184 #bb.debug(2, "Staging %s to %s" % (srcpath, dstpath))
185 sharedfiles.append(dstpath)
186 for dir in dirs:
187 srcdir = os.path.join(walkroot, dir)
188 dstdir = srcdir.replace(state[1], state[2])
189 #bb.debug(2, "Staging %s to %s" % (srcdir, dstdir))
190 if not dstdir.endswith("/"):
191 dstdir = dstdir + "/"
192 shareddirs.append(dstdir)
193
194 # Check the file list for conflicts against files which already exist
195 whitelist = (d.getVar("SSTATE_DUPWHITELIST", True) or "").split()
196 match = []
197 for f in sharedfiles:
198 if os.path.exists(f):
199 f = os.path.normpath(f)
200 realmatch = True
201 for w in whitelist:
202 if f.startswith(w):
203 realmatch = False
204 break
205 if realmatch:
206 match.append(f)
207 sstate_search_cmd = "grep -rl '%s' %s --exclude=master.list | sed -e 's:^.*/::' -e 's:\.populate-sysroot::'" % (f, d.expand("${SSTATE_MANIFESTS}"))
208 search_output = subprocess.Popen(sstate_search_cmd, shell=True, stdout=subprocess.PIPE).communicate()[0]
209 if search_output != "":
210 match.append("Matched in %s" % search_output.rstrip())
211 if match:
212 bb.error("The recipe %s is trying to install files into a shared " \
213 "area when those files already exist. Those files and their manifest " \
214 "location are:\n %s\nPlease verify which recipe should provide the " \
215 "above files.\nThe build has stopped as continuing in this scenario WILL " \
216 "break things, if not now, possibly in the future (we've seen builds fail " \
217 "several months later). If the system knew how to recover from this " \
218 "automatically it would however there are several different scenarios " \
219 "which can result in this and we don't know which one this is. It may be " \
220 "you have switched providers of something like virtual/kernel (e.g. from " \
221 "linux-yocto to linux-yocto-dev), in that case you need to execute the " \
222 "clean task for both recipes and it will resolve this error. It may be " \
223 "you changed DISTRO_FEATURES from systemd to udev or vice versa. Cleaning " \
224 "those recipes should again resolve this error however switching " \
225 "DISTRO_FEATURES on an existing build directory is not supported, you " \
226 "should really clean out tmp and rebuild (reusing sstate should be safe). " \
227 "It could be the overlapping files detected are harmless in which case " \
228 "adding them to SSTATE_DUPWHITELIST may be the correct solution. It could " \
229 "also be your build is including two different conflicting versions of " \
230 "things (e.g. bluez 4 and bluez 5 and the correct solution for that would " \
231 "be to resolve the conflict. If in doubt, please ask on the mailing list, " \
232 "sharing the error and filelist above." % \
233 (d.getVar('PN', True), "\n ".join(match)))
234 bb.fatal("If the above message is too much, the simpler version is you're advised to wipe out tmp and rebuild (reusing sstate is fine). That will likely fix things in most (but not all) cases.")
235
236 # Write out the manifest
237 f = open(manifest, "w")
238 for file in sharedfiles:
239 f.write(file + "\n")
240
241 # We want to ensure that directories appear at the end of the manifest
242 # so that when we test to see if they should be deleted any contents
243 # added by the task will have been removed first.
244 dirs = sorted(shareddirs, key=len)
245 # Must remove children first, which will have a longer path than the parent
246 for di in reversed(dirs):
247 f.write(di + "\n")
248 f.close()
249
250 # Append to the list of manifests for this PACKAGE_ARCH
251
252 i = d2.expand("${SSTATE_MANIFESTS}/index-${SSTATE_MANMACH}")
253 l = bb.utils.lockfile(i + ".lock")
254 filedata = d.getVar("STAMP", True) + " " + d2.getVar("SSTATE_MANFILEPREFIX", True) + " " + d.getVar("WORKDIR", True) + "\n"
255 manifests = []
256 if os.path.exists(i):
257 with open(i, "r") as f:
258 manifests = f.readlines()
259 if filedata not in manifests:
260 with open(i, "a+") as f:
261 f.write(filedata)
262 bb.utils.unlockfile(l)
263
264 # Run the actual file install
265 for state in ss['dirs']:
266 if os.path.exists(state[1]):
267 oe.path.copyhardlinktree(state[1], state[2])
268
269 for postinst in (d.getVar('SSTATEPOSTINSTFUNCS', True) or '').split():
270 bb.build.exec_func(postinst, d)
271
272 for lock in locks:
273 bb.utils.unlockfile(lock)
274
275sstate_install[vardepsexclude] += "SSTATE_DUPWHITELIST STATE_MANMACH SSTATE_MANFILEPREFIX"
276sstate_install[vardeps] += "${SSTATEPOSTINSTFUNCS}"
277
278def sstate_installpkg(ss, d):
279 import oe.path
280 import subprocess
281
282 def prepdir(dir):
283 # remove dir if it exists, ensure any parent directories do exist
284 if os.path.exists(dir):
285 oe.path.remove(dir)
286 bb.utils.mkdirhier(dir)
287 oe.path.remove(dir)
288
289 sstateinst = d.expand("${WORKDIR}/sstate-install-%s/" % ss['task'])
290 sstatefetch = d.getVar('SSTATE_PKGNAME', True) + '_' + ss['task'] + ".tgz"
291 sstatepkg = d.getVar('SSTATE_PKG', True) + '_' + ss['task'] + ".tgz"
292
293 if not os.path.exists(sstatepkg):
294 pstaging_fetch(sstatefetch, sstatepkg, d)
295
296 if not os.path.isfile(sstatepkg):
297 bb.note("Staging package %s does not exist" % sstatepkg)
298 return False
299
300 sstate_clean(ss, d)
301
302 d.setVar('SSTATE_INSTDIR', sstateinst)
303 d.setVar('SSTATE_PKG', sstatepkg)
304
305 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG", True), False):
306 if subprocess.call(["gpg", "--verify", sstatepkg + ".sig", sstatepkg]) != 0:
307 bb.warn("Cannot verify signature on sstate package %s" % sstatepkg)
308
309 for f in (d.getVar('SSTATEPREINSTFUNCS', True) or '').split() + ['sstate_unpack_package'] + (d.getVar('SSTATEPOSTUNPACKFUNCS', True) or '').split():
310 bb.build.exec_func(f, d)
311
312 for state in ss['dirs']:
313 prepdir(state[1])
314 os.rename(sstateinst + state[0], state[1])
315 sstate_install(ss, d)
316
317 for plain in ss['plaindirs']:
318 workdir = d.getVar('WORKDIR', True)
319 src = sstateinst + "/" + plain.replace(workdir, '')
320 dest = plain
321 bb.utils.mkdirhier(src)
322 prepdir(dest)
323 os.rename(src, dest)
324
325 return True
326
327python sstate_hardcode_path_unpack () {
328 # Fixup hardcoded paths
329 #
330 # Note: The logic below must match the reverse logic in
331 # sstate_hardcode_path(d)
332 import subprocess
333
334 sstateinst = d.getVar('SSTATE_INSTDIR', True)
335 fixmefn = sstateinst + "fixmepath"
336 if os.path.isfile(fixmefn):
337 staging = d.getVar('STAGING_DIR', True)
338 staging_target = d.getVar('STAGING_DIR_TARGET', True)
339 staging_host = d.getVar('STAGING_DIR_HOST', True)
340
341 if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
342 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIR:%s:g'" % (staging)
343 elif bb.data.inherits_class('cross', d):
344 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRTARGET:%s:g; s:FIXMESTAGINGDIR:%s:g'" % (staging_target, staging)
345 else:
346 sstate_sed_cmd = "sed -i -e 's:FIXMESTAGINGDIRHOST:%s:g'" % (staging_host)
347
348 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES', True) or ''
349 for fixmevar in extra_staging_fixmes.split():
350 fixme_path = d.getVar(fixmevar, True)
351 sstate_sed_cmd += " -e 's:FIXME_%s:%s:g'" % (fixmevar, fixme_path)
352
353 # Add sstateinst to each filename in fixmepath, use xargs to efficiently call sed
354 sstate_hardcode_cmd = "sed -e 's:^:%s:g' %s | xargs %s" % (sstateinst, fixmefn, sstate_sed_cmd)
355
356 bb.note("Replacing fixme paths in sstate package: %s" % (sstate_hardcode_cmd))
357 subprocess.call(sstate_hardcode_cmd, shell=True)
358
359 # Need to remove this or we'd copy it into the target directory and may
360 # conflict with another writer
361 os.remove(fixmefn)
362}
363
364def sstate_clean_cachefile(ss, d):
365 import oe.path
366
367 sstatepkgfile = d.getVar('SSTATE_PATHSPEC', True) + "*_" + ss['task'] + ".tgz*"
368 bb.note("Removing %s" % sstatepkgfile)
369 oe.path.remove(sstatepkgfile)
370
371def sstate_clean_cachefiles(d):
372 for task in (d.getVar('SSTATETASKS', True) or "").split():
373 ld = d.createCopy()
374 ss = sstate_state_fromvars(ld, task)
375 sstate_clean_cachefile(ss, ld)
376
377def sstate_clean_manifest(manifest, d):
378 import oe.path
379
380 mfile = open(manifest)
381 entries = mfile.readlines()
382 mfile.close()
383
384 for entry in entries:
385 entry = entry.strip()
386 bb.debug(2, "Removing manifest: %s" % entry)
387 # We can race against another package populating directories as we're removing them
388 # so we ignore errors here.
389 try:
390 if entry.endswith("/"):
391 if os.path.islink(entry[:-1]):
392 os.remove(entry[:-1])
393 elif os.path.exists(entry) and len(os.listdir(entry)) == 0:
394 os.rmdir(entry[:-1])
395 else:
396 oe.path.remove(entry)
397 except OSError:
398 pass
399
400 oe.path.remove(manifest)
401
402def sstate_clean(ss, d):
403 import oe.path
404 import glob
405
406 d2 = d.createCopy()
407 stamp_clean = d.getVar("STAMPCLEAN", True)
408 extrainf = d.getVarFlag("do_" + ss['task'], 'stamp-extra-info', True)
409 if extrainf:
410 d2.setVar("SSTATE_MANMACH", extrainf)
411 wildcard_stfile = "%s.do_%s*.%s" % (stamp_clean, ss['task'], extrainf)
412 else:
413 wildcard_stfile = "%s.do_%s*" % (stamp_clean, ss['task'])
414
415 manifest = d2.expand("${SSTATE_MANFILEPREFIX}.%s" % ss['task'])
416
417 if os.path.exists(manifest):
418 locks = []
419 for lock in ss['lockfiles-shared']:
420 locks.append(bb.utils.lockfile(lock))
421 for lock in ss['lockfiles']:
422 locks.append(bb.utils.lockfile(lock))
423
424 sstate_clean_manifest(manifest, d)
425
426 for lock in locks:
427 bb.utils.unlockfile(lock)
428
429 # Remove the current and previous stamps, but keep the sigdata.
430 #
431 # The glob() matches do_task* which may match multiple tasks, for
432 # example: do_package and do_package_write_ipk, so we need to
433 # exactly match *.do_task.* and *.do_task_setscene.*
434 rm_stamp = '.do_%s.' % ss['task']
435 rm_setscene = '.do_%s_setscene.' % ss['task']
436 # For BB_SIGNATURE_HANDLER = "noop"
437 rm_nohash = ".do_%s" % ss['task']
438 for stfile in glob.glob(wildcard_stfile):
439 # Keep the sigdata
440 if ".sigdata." in stfile:
441 continue
442 # Preserve taint files in the stamps directory
443 if stfile.endswith('.taint'):
444 continue
445 if rm_stamp in stfile or rm_setscene in stfile or \
446 stfile.endswith(rm_nohash):
447 oe.path.remove(stfile)
448
449sstate_clean[vardepsexclude] = "SSTATE_MANFILEPREFIX"
450
451CLEANFUNCS += "sstate_cleanall"
452
453python sstate_cleanall() {
454 bb.note("Removing shared state for package %s" % d.getVar('PN', True))
455
456 manifest_dir = d.getVar('SSTATE_MANIFESTS', True)
457 if not os.path.exists(manifest_dir):
458 return
459
460 tasks = d.getVar('SSTATETASKS', True).split()
461 for name in tasks:
462 ld = d.createCopy()
463 shared_state = sstate_state_fromvars(ld, name)
464 sstate_clean(shared_state, ld)
465}
466
467python sstate_hardcode_path () {
468 import subprocess, platform
469
470 # Need to remove hardcoded paths and fix these when we install the
471 # staging packages.
472 #
473 # Note: the logic in this function needs to match the reverse logic
474 # in sstate_installpkg(ss, d)
475
476 staging = d.getVar('STAGING_DIR', True)
477 staging_target = d.getVar('STAGING_DIR_TARGET', True)
478 staging_host = d.getVar('STAGING_DIR_HOST', True)
479 sstate_builddir = d.getVar('SSTATE_BUILDDIR', True)
480
481 if bb.data.inherits_class('native', d) or bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('cross-canadian', d):
482 sstate_grep_cmd = "grep -l -e '%s'" % (staging)
483 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIR:g'" % (staging)
484 elif bb.data.inherits_class('cross', d):
485 sstate_grep_cmd = "grep -l -e '%s' -e '%s'" % (staging_target, staging)
486 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRTARGET:g; s:%s:FIXMESTAGINGDIR:g'" % (staging_target, staging)
487 else:
488 sstate_grep_cmd = "grep -l -e '%s'" % (staging_host)
489 sstate_sed_cmd = "sed -i -e 's:%s:FIXMESTAGINGDIRHOST:g'" % (staging_host)
490
491 extra_staging_fixmes = d.getVar('EXTRA_STAGING_FIXMES', True) or ''
492 for fixmevar in extra_staging_fixmes.split():
493 fixme_path = d.getVar(fixmevar, True)
494 sstate_sed_cmd += " -e 's:%s:FIXME_%s:g'" % (fixme_path, fixmevar)
495
496 fixmefn = sstate_builddir + "fixmepath"
497
498 sstate_scan_cmd = d.getVar('SSTATE_SCAN_CMD', True)
499 sstate_filelist_cmd = "tee %s" % (fixmefn)
500
501 # fixmepath file needs relative paths, drop sstate_builddir prefix
502 sstate_filelist_relative_cmd = "sed -i -e 's:^%s::g' %s" % (sstate_builddir, fixmefn)
503
504 xargs_no_empty_run_cmd = '--no-run-if-empty'
505 if platform.system() == 'Darwin':
506 xargs_no_empty_run_cmd = ''
507
508 # Limit the fixpaths and sed operations based on the initial grep search
509 # This has the side effect of making sure the vfs cache is hot
510 sstate_hardcode_cmd = "%s | xargs %s | %s | xargs %s %s" % (sstate_scan_cmd, sstate_grep_cmd, sstate_filelist_cmd, xargs_no_empty_run_cmd, sstate_sed_cmd)
511
512 bb.note("Removing hardcoded paths from sstate package: '%s'" % (sstate_hardcode_cmd))
513 subprocess.call(sstate_hardcode_cmd, shell=True)
514
515 # If the fixmefn is empty, remove it..
516 if os.stat(fixmefn).st_size == 0:
517 os.remove(fixmefn)
518 else:
519 bb.note("Replacing absolute paths in fixmepath file: '%s'" % (sstate_filelist_relative_cmd))
520 subprocess.call(sstate_filelist_relative_cmd, shell=True)
521}
522
523def sstate_package(ss, d):
524 import oe.path
525
526 def make_relative_symlink(path, outputpath, d):
527 # Replace out absolute TMPDIR paths in symlinks with relative ones
528 if not os.path.islink(path):
529 return
530 link = os.readlink(path)
531 if not os.path.isabs(link):
532 return
533 if not link.startswith(tmpdir):
534 return
535
536 depth = outputpath.rpartition(tmpdir)[2].count('/')
537 base = link.partition(tmpdir)[2].strip()
538 while depth > 1:
539 base = "/.." + base
540 depth -= 1
541 base = "." + base
542
543 bb.debug(2, "Replacing absolute path %s with relative path %s for %s" % (link, base, outputpath))
544 os.remove(path)
545 os.symlink(base, path)
546
547 tmpdir = d.getVar('TMPDIR', True)
548
549 sstatebuild = d.expand("${WORKDIR}/sstate-build-%s/" % ss['task'])
550 sstatepkg = d.getVar('SSTATE_PKG', True) + '_'+ ss['task'] + ".tgz"
551 bb.utils.remove(sstatebuild, recurse=True)
552 bb.utils.mkdirhier(sstatebuild)
553 bb.utils.mkdirhier(os.path.dirname(sstatepkg))
554 for state in ss['dirs']:
555 if not os.path.exists(state[1]):
556 continue
557 srcbase = state[0].rstrip("/").rsplit('/', 1)[0]
558 for walkroot, dirs, files in os.walk(state[1]):
559 for file in files:
560 srcpath = os.path.join(walkroot, file)
561 dstpath = srcpath.replace(state[1], state[2])
562 make_relative_symlink(srcpath, dstpath, d)
563 for dir in dirs:
564 srcpath = os.path.join(walkroot, dir)
565 dstpath = srcpath.replace(state[1], state[2])
566 make_relative_symlink(srcpath, dstpath, d)
567 bb.debug(2, "Preparing tree %s for packaging at %s" % (state[1], sstatebuild + state[0]))
568 oe.path.copyhardlinktree(state[1], sstatebuild + state[0])
569
570 workdir = d.getVar('WORKDIR', True)
571 for plain in ss['plaindirs']:
572 pdir = plain.replace(workdir, sstatebuild)
573 bb.utils.mkdirhier(plain)
574 bb.utils.mkdirhier(pdir)
575 oe.path.copyhardlinktree(plain, pdir)
576
577 d.setVar('SSTATE_BUILDDIR', sstatebuild)
578 d.setVar('SSTATE_PKG', sstatepkg)
579
580 for f in (d.getVar('SSTATECREATEFUNCS', True) or '').split() + ['sstate_create_package'] + \
581 (d.getVar('SSTATEPOSTCREATEFUNCS', True) or '').split():
582 bb.build.exec_func(f, d)
583
584 bb.siggen.dump_this_task(sstatepkg + ".siginfo", d)
585
586 return
587
588def pstaging_fetch(sstatefetch, sstatepkg, d):
589 import bb.fetch2
590
591 # Only try and fetch if the user has configured a mirror
592 mirrors = d.getVar('SSTATE_MIRRORS', True)
593 if not mirrors:
594 return
595
596 # Copy the data object and override DL_DIR and SRC_URI
597 localdata = bb.data.createCopy(d)
598 bb.data.update_data(localdata)
599
600 dldir = localdata.expand("${SSTATE_DIR}")
601 bb.utils.mkdirhier(dldir)
602
603 localdata.delVar('MIRRORS')
604 localdata.setVar('FILESPATH', dldir)
605 localdata.setVar('DL_DIR', dldir)
606 localdata.setVar('PREMIRRORS', mirrors)
607
608 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
609 # we'll want to allow network access for the current set of fetches.
610 if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1":
611 localdata.delVar('BB_NO_NETWORK')
612
613 # Try a fetch from the sstate mirror, if it fails just return and
614 # we will build the package
615 uris = ['file://{0}'.format(sstatefetch),
616 'file://{0}.siginfo'.format(sstatefetch)]
617 if bb.utils.to_boolean(d.getVar("SSTATE_VERIFY_SIG", True), False):
618 uris += ['file://{0}.sig'.format(sstatefetch)]
619
620 for srcuri in uris:
621 localdata.setVar('SRC_URI', srcuri)
622 try:
623 fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False)
624 fetcher.download()
625
626 # Need to optimise this, if using file:// urls, the fetcher just changes the local path
627 # For now work around by symlinking
628 localpath = bb.data.expand(fetcher.localpath(srcuri), localdata)
629 if localpath != sstatepkg and os.path.exists(localpath) and not os.path.exists(sstatepkg):
630 os.symlink(localpath, sstatepkg)
631
632 except bb.fetch2.BBFetchException:
633 break
634
635def sstate_setscene(d):
636 shared_state = sstate_state_fromvars(d)
637 accelerate = sstate_installpkg(shared_state, d)
638 if not accelerate:
639 raise bb.build.FuncFailed("No suitable staging package found")
640
641python sstate_task_prefunc () {
642 shared_state = sstate_state_fromvars(d)
643 sstate_clean(shared_state, d)
644}
645
646python sstate_task_postfunc () {
647 shared_state = sstate_state_fromvars(d)
648 sstate_install(shared_state, d)
649 for intercept in shared_state['interceptfuncs']:
650 bb.build.exec_func(intercept, d)
651 omask = os.umask(002)
652 if omask != 002:
653 bb.note("Using umask 002 (not %0o) for sstate packaging" % omask)
654 sstate_package(shared_state, d)
655 os.umask(omask)
656}
657
658
659#
660# Shell function to generate a sstate package from a directory
661# set as SSTATE_BUILDDIR. Will be run from within SSTATE_BUILDDIR.
662#
663sstate_create_package () {
664 TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
665 # Need to handle empty directories
666 if [ "$(ls -A)" ]; then
667 set +e
668 tar -czf $TFILE *
669 ret=$?
670 if [ $ret -ne 0 ] && [ $ret -ne 1 ]; then
671 exit 1
672 fi
673 set -e
674 else
675 tar -cz --file=$TFILE --files-from=/dev/null
676 fi
677 chmod 0664 $TFILE
678 mv -f $TFILE ${SSTATE_PKG}
679
680 if [ -n "${SSTATE_SIG_KEY}" ]; then
681 rm -f ${SSTATE_PKG}.sig
682 echo ${SSTATE_SIG_PASSPHRASE} | gpg --batch --passphrase-fd 0 --detach-sign --local-user ${SSTATE_SIG_KEY} --output ${SSTATE_PKG}.sig ${SSTATE_PKG}
683 fi
684
685 cd ${WORKDIR}
686 rm -rf ${SSTATE_BUILDDIR}
687}
688
689#
690# Shell function to decompress and prepare a package for installation
691# Will be run from within SSTATE_INSTDIR.
692#
693sstate_unpack_package () {
694 tar -xmvzf ${SSTATE_PKG}
695 # Use "! -w ||" to return true for read only files
696 [ ! -w ${SSTATE_PKG} ] || touch --no-dereference ${SSTATE_PKG}
697}
698
699BB_HASHCHECK_FUNCTION = "sstate_checkhashes"
700
701def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False):
702
703 ret = []
704 missed = []
705 extension = ".tgz"
706 if siginfo:
707 extension = extension + ".siginfo"
708
709 def getpathcomponents(task, d):
710 # Magic data from BB_HASHFILENAME
711 splithashfn = sq_hashfn[task].split(" ")
712 spec = splithashfn[1]
713 extrapath = splithashfn[0]
714
715 tname = sq_task[task][3:]
716
717 if tname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and splithashfn[2]:
718 spec = splithashfn[2]
719 extrapath = ""
720
721 return spec, extrapath, tname
722
723
724 for task in range(len(sq_fn)):
725
726 spec, extrapath, tname = getpathcomponents(task, d)
727
728 sstatefile = d.expand("${SSTATE_DIR}/" + extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + extension)
729
730 if os.path.exists(sstatefile):
731 bb.debug(2, "SState: Found valid sstate file %s" % sstatefile)
732 ret.append(task)
733 continue
734 else:
735 missed.append(task)
736 bb.debug(2, "SState: Looked for but didn't find file %s" % sstatefile)
737
738 mirrors = d.getVar("SSTATE_MIRRORS", True)
739 if mirrors:
740 # Copy the data object and override DL_DIR and SRC_URI
741 localdata = bb.data.createCopy(d)
742 bb.data.update_data(localdata)
743
744 dldir = localdata.expand("${SSTATE_DIR}")
745 localdata.delVar('MIRRORS')
746 localdata.setVar('FILESPATH', dldir)
747 localdata.setVar('DL_DIR', dldir)
748 localdata.setVar('PREMIRRORS', mirrors)
749
750 bb.debug(2, "SState using premirror of: %s" % mirrors)
751
752 # if BB_NO_NETWORK is set but we also have SSTATE_MIRROR_ALLOW_NETWORK,
753 # we'll want to allow network access for the current set of fetches.
754 if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1":
755 localdata.delVar('BB_NO_NETWORK')
756
757 from bb.fetch2 import FetchConnectionCache
758 def checkstatus_init(thread_worker):
759 thread_worker.connection_cache = FetchConnectionCache()
760
761 def checkstatus_end(thread_worker):
762 thread_worker.connection_cache.close_connections()
763
764 def checkstatus(thread_worker, arg):
765 (task, sstatefile) = arg
766
767 localdata2 = bb.data.createCopy(localdata)
768 srcuri = "file://" + sstatefile
769 localdata.setVar('SRC_URI', srcuri)
770 bb.debug(2, "SState: Attempting to fetch %s" % srcuri)
771
772 try:
773 fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2,
774 connection_cache=thread_worker.connection_cache)
775 fetcher.checkstatus()
776 bb.debug(2, "SState: Successful fetch test for %s" % srcuri)
777 ret.append(task)
778 if task in missed:
779 missed.remove(task)
780 except:
781 missed.append(task)
782 bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri)
783 pass
784
785 tasklist = []
786 for task in range(len(sq_fn)):
787 if task in ret:
788 continue
789 spec, extrapath, tname = getpathcomponents(task, d)
790 sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + extension)
791 tasklist.append((task, sstatefile))
792
793 if tasklist:
794 bb.note("Checking sstate mirror object availability (for %s objects)" % len(tasklist))
795 import multiprocessing
796 nproc = min(multiprocessing.cpu_count(), len(tasklist))
797
798 pool = oe.utils.ThreadedPool(nproc, len(tasklist),
799 worker_init=checkstatus_init, worker_end=checkstatus_end)
800 for t in tasklist:
801 pool.add_task(checkstatus, t)
802 pool.start()
803 pool.wait_completion()
804
805 inheritlist = d.getVar("INHERIT", True)
806 if "toaster" in inheritlist:
807 evdata = {'missed': [], 'found': []};
808 for task in missed:
809 spec, extrapath, tname = getpathcomponents(task, d)
810 sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz")
811 evdata['missed'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) )
812 for task in ret:
813 spec, extrapath, tname = getpathcomponents(task, d)
814 sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + ".tgz")
815 evdata['found'].append( (sq_fn[task], sq_task[task], sq_hash[task], sstatefile ) )
816 bb.event.fire(bb.event.MetadataEvent("MissedSstate", evdata), d)
817
818 if hasattr(bb.parse.siggen, "checkhashes"):
819 bb.parse.siggen.checkhashes(missed, ret, sq_fn, sq_task, sq_hash, sq_hashfn, d)
820
821 return ret
822
823BB_SETSCENE_DEPVALID = "setscene_depvalid"
824
825def setscene_depvalid(task, taskdependees, notneeded, d):
826 # taskdependees is a dict of tasks which depend on task, each being a 3 item list of [PN, TASKNAME, FILENAME]
827 # task is included in taskdependees too
828
829 bb.debug(2, "Considering setscene task: %s" % (str(taskdependees[task])))
830
831 def isNativeCross(x):
832 return x.endswith("-native") or "-cross-" in x or "-crosssdk" in x
833
834 def isPostInstDep(x):
835 if x in ["qemu-native", "gdk-pixbuf-native", "qemuwrapper-cross", "depmodwrapper-cross", "systemd-systemctl-native", "gtk-icon-utils-native"]:
836 return True
837 return False
838
839 # We only need to trigger populate_lic through direct dependencies
840 if taskdependees[task][1] == "do_populate_lic":
841 return True
842
843 for dep in taskdependees:
844 bb.debug(2, " considering dependency: %s" % (str(taskdependees[dep])))
845 if task == dep:
846 continue
847 if dep in notneeded:
848 continue
849 # do_package_write_* and do_package doesn't need do_package
850 if taskdependees[task][1] == "do_package" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
851 continue
852 # do_package_write_* and do_package doesn't need do_populate_sysroot, unless is a postinstall dependency
853 if taskdependees[task][1] == "do_populate_sysroot" and taskdependees[dep][1] in ['do_package', 'do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package_qa']:
854 if isPostInstDep(taskdependees[task][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm']:
855 return False
856 continue
857 # Native/Cross packages don't exist and are noexec anyway
858 if isNativeCross(taskdependees[dep][0]) and taskdependees[dep][1] in ['do_package_write_deb', 'do_package_write_ipk', 'do_package_write_rpm', 'do_packagedata', 'do_package', 'do_package_qa']:
859 continue
860
861 # Consider sysroot depending on sysroot tasks
862 if taskdependees[task][1] == 'do_populate_sysroot' and taskdependees[dep][1] == 'do_populate_sysroot':
863 # base-passwd/shadow-sysroot don't need their dependencies
864 if taskdependees[dep][0].endswith(("base-passwd", "shadow-sysroot")):
865 continue
866 # Nothing need depend on libc-initial/gcc-cross-initial
867 if "-initial" in taskdependees[task][0]:
868 continue
869 # Native/Cross populate_sysroot need their dependencies
870 if isNativeCross(taskdependees[task][0]) and isNativeCross(taskdependees[dep][0]):
871 return False
872 # Target populate_sysroot depended on by cross tools need to be installed
873 if isNativeCross(taskdependees[dep][0]):
874 return False
875 # Native/cross tools depended upon by target sysroot are not needed
876 if isNativeCross(taskdependees[task][0]):
877 continue
878 # Target populate_sysroot need their dependencies
879 return False
880
881 if taskdependees[task][1] == 'do_shared_workdir':
882 continue
883
884 # This is due to the [depends] in useradd.bbclass complicating matters
885 # The logic *is* reversed here due to the way hard setscene dependencies are injected
886 if taskdependees[task][1] == 'do_package' and taskdependees[dep][0].endswith(('shadow-native', 'shadow-sysroot', 'base-passwd', 'pseudo-native')) and taskdependees[dep][1] == 'do_populate_sysroot':
887 continue
888
889 # Safe fallthrough default
890 bb.debug(2, " Default setscene dependency fall through due to dependency: %s" % (str(taskdependees[dep])))
891 return False
892 return True
893
894addhandler sstate_eventhandler
895sstate_eventhandler[eventmask] = "bb.build.TaskSucceeded"
896python sstate_eventhandler() {
897 d = e.data
898 # When we write an sstate package we rewrite the SSTATE_PKG
899 spkg = d.getVar('SSTATE_PKG', True)
900 if not spkg.endswith(".tgz"):
901 taskname = d.getVar("BB_RUNTASK", True)[3:]
902 spec = d.getVar('SSTATE_PKGSPEC', True)
903 swspec = d.getVar('SSTATE_SWSPEC', True)
904 if taskname in ["fetch", "unpack", "patch", "populate_lic", "preconfigure"] and swspec:
905 d.setVar("SSTATE_PKGSPEC", "${SSTATE_SWSPEC}")
906 d.setVar("SSTATE_EXTRAPATH", "")
907 sstatepkg = d.getVar('SSTATE_PKG', True)
908 bb.siggen.dump_this_task(sstatepkg + '_' + taskname + ".tgz" ".siginfo", d)
909}
910
911SSTATE_PRUNE_OBSOLETEWORKDIR = "1"
912
913# Event handler which removes manifests and stamps file for
914# recipes which are no longer reachable in a build where they
915# once were.
916# Also optionally removes the workdir of those tasks/recipes
917#
918addhandler sstate_eventhandler2
919sstate_eventhandler2[eventmask] = "bb.event.ReachableStamps"
920python sstate_eventhandler2() {
921 import glob
922 d = e.data
923 stamps = e.stamps.values()
924 removeworkdir = (d.getVar("SSTATE_PRUNE_OBSOLETEWORKDIR", False) == "1")
925 seen = []
926 for a in d.getVar("SSTATE_ARCHS", True).split():
927 toremove = []
928 i = d.expand("${SSTATE_MANIFESTS}/index-" + a)
929 if not os.path.exists(i):
930 continue
931 with open(i, "r") as f:
932 lines = f.readlines()
933 for l in lines:
934 (stamp, manifest, workdir) = l.split()
935 if stamp not in stamps:
936 toremove.append(l)
937 if stamp not in seen:
938 bb.note("Stamp %s is not reachable, removing related manifests" % stamp)
939 seen.append(stamp)
940 for r in toremove:
941 (stamp, manifest, workdir) = r.split()
942 for m in glob.glob(manifest + ".*"):
943 sstate_clean_manifest(m, d)
944 bb.utils.remove(stamp + "*")
945 if removeworkdir:
946 bb.utils.remove(workdir, recurse = True)
947 lines.remove(r)
948 with open(i, "w") as f:
949 for l in lines:
950 f.write(l)
951}