Patrick Williams | 92b42cb | 2022-09-03 06:53:57 -0500 | [diff] [blame] | 1 | # |
| 2 | # Copyright OpenEmbedded Contributors |
| 3 | # |
| 4 | # SPDX-License-Identifier: MIT |
| 5 | # |
| 6 | |
| 7 | BB_DEFAULT_TASK ?= "build" |
| 8 | CLASSOVERRIDE ?= "class-target" |
| 9 | |
| 10 | inherit patch |
| 11 | inherit staging |
| 12 | |
| 13 | inherit mirrors |
| 14 | inherit utils |
| 15 | inherit utility-tasks |
| 16 | inherit logging |
| 17 | |
| 18 | OE_EXTRA_IMPORTS ?= "" |
| 19 | |
| 20 | OE_IMPORTS += "os sys time oe.path oe.utils oe.types oe.package oe.packagegroup oe.sstatesig oe.lsb oe.cachedpath oe.license oe.qa oe.reproducible oe.rust oe.buildcfg ${OE_EXTRA_IMPORTS}" |
| 21 | OE_IMPORTS[type] = "list" |
| 22 | |
| 23 | PACKAGECONFIG_CONFARGS ??= "" |
| 24 | |
| 25 | def oe_import(d): |
| 26 | import sys |
| 27 | |
| 28 | bbpath = [os.path.join(dir, "lib") for dir in d.getVar("BBPATH").split(":")] |
| 29 | sys.path[0:0] = [dir for dir in bbpath if dir not in sys.path] |
| 30 | |
| 31 | import oe.data |
| 32 | for toimport in oe.data.typed_value("OE_IMPORTS", d): |
| 33 | try: |
| 34 | # Make a python object accessible from the metadata |
| 35 | bb.utils._context[toimport.split(".", 1)[0]] = __import__(toimport) |
| 36 | except AttributeError as e: |
| 37 | bb.error("Error importing OE modules: %s" % str(e)) |
| 38 | return "" |
| 39 | |
| 40 | # We need the oe module name space early (before INHERITs get added) |
| 41 | OE_IMPORTED := "${@oe_import(d)}" |
| 42 | |
| 43 | inherit metadata_scm |
| 44 | |
| 45 | def lsb_distro_identifier(d): |
| 46 | adjust = d.getVar('LSB_DISTRO_ADJUST') |
| 47 | adjust_func = None |
| 48 | if adjust: |
| 49 | try: |
| 50 | adjust_func = globals()[adjust] |
| 51 | except KeyError: |
| 52 | pass |
| 53 | return oe.lsb.distro_identifier(adjust_func) |
| 54 | |
| 55 | die() { |
| 56 | bbfatal_log "$*" |
| 57 | } |
| 58 | |
| 59 | oe_runmake_call() { |
| 60 | bbnote ${MAKE} ${EXTRA_OEMAKE} "$@" |
| 61 | ${MAKE} ${EXTRA_OEMAKE} "$@" |
| 62 | } |
| 63 | |
| 64 | oe_runmake() { |
| 65 | oe_runmake_call "$@" || die "oe_runmake failed" |
| 66 | } |
| 67 | |
| 68 | |
| 69 | def get_base_dep(d): |
| 70 | if d.getVar('INHIBIT_DEFAULT_DEPS', False): |
| 71 | return "" |
| 72 | return "${BASE_DEFAULT_DEPS}" |
| 73 | |
| 74 | BASE_DEFAULT_DEPS = "virtual/${HOST_PREFIX}gcc virtual/${HOST_PREFIX}compilerlibs virtual/libc" |
| 75 | |
| 76 | BASEDEPENDS = "" |
| 77 | BASEDEPENDS:class-target = "${@get_base_dep(d)}" |
| 78 | BASEDEPENDS:class-nativesdk = "${@get_base_dep(d)}" |
| 79 | |
| 80 | DEPENDS:prepend="${BASEDEPENDS} " |
| 81 | |
| 82 | FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}" |
| 83 | # THISDIR only works properly with imediate expansion as it has to run |
| 84 | # in the context of the location its used (:=) |
| 85 | THISDIR = "${@os.path.dirname(d.getVar('FILE'))}" |
| 86 | |
| 87 | def extra_path_elements(d): |
| 88 | path = "" |
| 89 | elements = (d.getVar('EXTRANATIVEPATH') or "").split() |
| 90 | for e in elements: |
| 91 | path = path + "${STAGING_BINDIR_NATIVE}/" + e + ":" |
| 92 | return path |
| 93 | |
| 94 | PATH:prepend = "${@extra_path_elements(d)}" |
| 95 | |
| 96 | def get_lic_checksum_file_list(d): |
| 97 | filelist = [] |
| 98 | lic_files = d.getVar("LIC_FILES_CHKSUM") or '' |
| 99 | tmpdir = d.getVar("TMPDIR") |
| 100 | s = d.getVar("S") |
| 101 | b = d.getVar("B") |
| 102 | workdir = d.getVar("WORKDIR") |
| 103 | |
| 104 | urls = lic_files.split() |
| 105 | for url in urls: |
| 106 | # We only care about items that are absolute paths since |
| 107 | # any others should be covered by SRC_URI. |
| 108 | try: |
| 109 | (method, host, path, user, pswd, parm) = bb.fetch.decodeurl(url) |
| 110 | if method != "file" or not path: |
| 111 | raise bb.fetch.MalformedUrl(url) |
| 112 | |
| 113 | if path[0] == '/': |
| 114 | if path.startswith((tmpdir, s, b, workdir)): |
| 115 | continue |
| 116 | filelist.append(path + ":" + str(os.path.exists(path))) |
| 117 | except bb.fetch.MalformedUrl: |
| 118 | bb.fatal(d.getVar('PN') + ": LIC_FILES_CHKSUM contains an invalid URL: " + url) |
| 119 | return " ".join(filelist) |
| 120 | |
| 121 | def setup_hosttools_dir(dest, toolsvar, d, fatal=True): |
| 122 | tools = d.getVar(toolsvar).split() |
| 123 | origbbenv = d.getVar("BB_ORIGENV", False) |
| 124 | path = origbbenv.getVar("PATH") |
| 125 | # Need to ignore our own scripts directories to avoid circular links |
| 126 | for p in path.split(":"): |
| 127 | if p.endswith("/scripts"): |
| 128 | path = path.replace(p, "/ignoreme") |
| 129 | bb.utils.mkdirhier(dest) |
| 130 | notfound = [] |
| 131 | for tool in tools: |
| 132 | desttool = os.path.join(dest, tool) |
| 133 | if not os.path.exists(desttool): |
| 134 | # clean up dead symlink |
| 135 | if os.path.islink(desttool): |
| 136 | os.unlink(desttool) |
| 137 | srctool = bb.utils.which(path, tool, executable=True) |
| 138 | # gcc/g++ may link to ccache on some hosts, e.g., |
| 139 | # /usr/local/bin/ccache/gcc -> /usr/bin/ccache, then which(gcc) |
| 140 | # would return /usr/local/bin/ccache/gcc, but what we need is |
| 141 | # /usr/bin/gcc, this code can check and fix that. |
| 142 | if "ccache" in srctool: |
| 143 | srctool = bb.utils.which(path, tool, executable=True, direction=1) |
| 144 | if srctool: |
| 145 | os.symlink(srctool, desttool) |
| 146 | else: |
| 147 | notfound.append(tool) |
| 148 | |
| 149 | if notfound and fatal: |
| 150 | bb.fatal("The following required tools (as specified by HOSTTOOLS) appear to be unavailable in PATH, please install them in order to proceed:\n %s" % " ".join(notfound)) |
| 151 | |
| 152 | addtask fetch |
| 153 | do_fetch[dirs] = "${DL_DIR}" |
| 154 | do_fetch[file-checksums] = "${@bb.fetch.get_checksum_file_list(d)}" |
| 155 | do_fetch[file-checksums] += " ${@get_lic_checksum_file_list(d)}" |
| 156 | do_fetch[vardeps] += "SRCREV" |
| 157 | do_fetch[network] = "1" |
| 158 | python base_do_fetch() { |
| 159 | |
| 160 | src_uri = (d.getVar('SRC_URI') or "").split() |
| 161 | if not src_uri: |
| 162 | return |
| 163 | |
| 164 | try: |
| 165 | fetcher = bb.fetch2.Fetch(src_uri, d) |
| 166 | fetcher.download() |
| 167 | except bb.fetch2.BBFetchException as e: |
| 168 | bb.fatal("Bitbake Fetcher Error: " + repr(e)) |
| 169 | } |
| 170 | |
| 171 | addtask unpack after do_fetch |
| 172 | do_unpack[dirs] = "${WORKDIR}" |
| 173 | |
| 174 | do_unpack[cleandirs] = "${@d.getVar('S') if os.path.normpath(d.getVar('S')) != os.path.normpath(d.getVar('WORKDIR')) else os.path.join('${S}', 'patches')}" |
| 175 | |
| 176 | python base_do_unpack() { |
| 177 | src_uri = (d.getVar('SRC_URI') or "").split() |
| 178 | if not src_uri: |
| 179 | return |
| 180 | |
| 181 | try: |
| 182 | fetcher = bb.fetch2.Fetch(src_uri, d) |
| 183 | fetcher.unpack(d.getVar('WORKDIR')) |
| 184 | except bb.fetch2.BBFetchException as e: |
| 185 | bb.fatal("Bitbake Fetcher Error: " + repr(e)) |
| 186 | } |
| 187 | |
| 188 | SSTATETASKS += "do_deploy_source_date_epoch" |
| 189 | |
| 190 | do_deploy_source_date_epoch () { |
| 191 | mkdir -p ${SDE_DEPLOYDIR} |
| 192 | if [ -e ${SDE_FILE} ]; then |
| 193 | echo "Deploying SDE from ${SDE_FILE} -> ${SDE_DEPLOYDIR}." |
| 194 | cp -p ${SDE_FILE} ${SDE_DEPLOYDIR}/__source_date_epoch.txt |
| 195 | else |
| 196 | echo "${SDE_FILE} not found!" |
| 197 | fi |
| 198 | } |
| 199 | |
| 200 | python do_deploy_source_date_epoch_setscene () { |
| 201 | sstate_setscene(d) |
| 202 | bb.utils.mkdirhier(d.getVar('SDE_DIR')) |
| 203 | sde_file = os.path.join(d.getVar('SDE_DEPLOYDIR'), '__source_date_epoch.txt') |
| 204 | if os.path.exists(sde_file): |
| 205 | target = d.getVar('SDE_FILE') |
| 206 | bb.debug(1, "Moving setscene SDE file %s -> %s" % (sde_file, target)) |
| 207 | bb.utils.rename(sde_file, target) |
| 208 | else: |
| 209 | bb.debug(1, "%s not found!" % sde_file) |
| 210 | } |
| 211 | |
| 212 | do_deploy_source_date_epoch[dirs] = "${SDE_DEPLOYDIR}" |
| 213 | do_deploy_source_date_epoch[sstate-plaindirs] = "${SDE_DEPLOYDIR}" |
| 214 | addtask do_deploy_source_date_epoch_setscene |
| 215 | addtask do_deploy_source_date_epoch before do_configure after do_patch |
| 216 | |
| 217 | python create_source_date_epoch_stamp() { |
| 218 | # Version: 1 |
| 219 | source_date_epoch = oe.reproducible.get_source_date_epoch(d, d.getVar('S')) |
| 220 | oe.reproducible.epochfile_write(source_date_epoch, d.getVar('SDE_FILE'), d) |
| 221 | } |
| 222 | do_unpack[postfuncs] += "create_source_date_epoch_stamp" |
| 223 | |
| 224 | def get_source_date_epoch_value(d): |
| 225 | return oe.reproducible.epochfile_read(d.getVar('SDE_FILE'), d) |
| 226 | |
| 227 | def get_layers_branch_rev(d): |
| 228 | revisions = oe.buildcfg.get_layer_revisions(d) |
| 229 | layers_branch_rev = ["%-20s = \"%s:%s\"" % (r[1], r[2], r[3]) for r in revisions] |
| 230 | i = len(layers_branch_rev)-1 |
| 231 | p1 = layers_branch_rev[i].find("=") |
| 232 | s1 = layers_branch_rev[i][p1:] |
| 233 | while i > 0: |
| 234 | p2 = layers_branch_rev[i-1].find("=") |
| 235 | s2= layers_branch_rev[i-1][p2:] |
| 236 | if s1 == s2: |
| 237 | layers_branch_rev[i-1] = layers_branch_rev[i-1][0:p2] |
| 238 | i -= 1 |
| 239 | else: |
| 240 | i -= 1 |
| 241 | p1 = layers_branch_rev[i].find("=") |
| 242 | s1= layers_branch_rev[i][p1:] |
| 243 | return layers_branch_rev |
| 244 | |
| 245 | |
| 246 | BUILDCFG_FUNCS ??= "buildcfg_vars get_layers_branch_rev buildcfg_neededvars" |
| 247 | BUILDCFG_FUNCS[type] = "list" |
| 248 | |
| 249 | def buildcfg_vars(d): |
| 250 | statusvars = oe.data.typed_value('BUILDCFG_VARS', d) |
| 251 | for var in statusvars: |
| 252 | value = d.getVar(var) |
| 253 | if value is not None: |
| 254 | yield '%-20s = "%s"' % (var, value) |
| 255 | |
| 256 | def buildcfg_neededvars(d): |
| 257 | needed_vars = oe.data.typed_value("BUILDCFG_NEEDEDVARS", d) |
| 258 | pesteruser = [] |
| 259 | for v in needed_vars: |
| 260 | val = d.getVar(v) |
| 261 | if not val or val == 'INVALID': |
| 262 | pesteruser.append(v) |
| 263 | |
| 264 | if pesteruser: |
| 265 | bb.fatal('The following variable(s) were not set: %s\nPlease set them directly, or choose a MACHINE or DISTRO that sets them.' % ', '.join(pesteruser)) |
| 266 | |
| 267 | addhandler base_eventhandler |
| 268 | base_eventhandler[eventmask] = "bb.event.ConfigParsed bb.event.MultiConfigParsed bb.event.BuildStarted bb.event.RecipePreFinalise bb.event.RecipeParsed" |
| 269 | python base_eventhandler() { |
| 270 | import bb.runqueue |
| 271 | |
| 272 | if isinstance(e, bb.event.ConfigParsed): |
| 273 | if not d.getVar("NATIVELSBSTRING", False): |
| 274 | d.setVar("NATIVELSBSTRING", lsb_distro_identifier(d)) |
| 275 | d.setVar("ORIGNATIVELSBSTRING", d.getVar("NATIVELSBSTRING", False)) |
| 276 | d.setVar('BB_VERSION', bb.__version__) |
| 277 | |
| 278 | # There might be no bb.event.ConfigParsed event if bitbake server is |
| 279 | # running, so check bb.event.BuildStarted too to make sure ${HOSTTOOLS_DIR} |
| 280 | # exists. |
| 281 | if isinstance(e, bb.event.ConfigParsed) or \ |
| 282 | (isinstance(e, bb.event.BuildStarted) and not os.path.exists(d.getVar('HOSTTOOLS_DIR'))): |
| 283 | # Works with the line in layer.conf which changes PATH to point here |
| 284 | setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS', d) |
| 285 | setup_hosttools_dir(d.getVar('HOSTTOOLS_DIR'), 'HOSTTOOLS_NONFATAL', d, fatal=False) |
| 286 | |
| 287 | if isinstance(e, bb.event.MultiConfigParsed): |
| 288 | # We need to expand SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS in each of the multiconfig data stores |
| 289 | # own contexts so the variables get expanded correctly for that arch, then inject back into |
| 290 | # the main data store. |
| 291 | deps = [] |
| 292 | for config in e.mcdata: |
| 293 | deps.append(e.mcdata[config].getVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS")) |
| 294 | deps = " ".join(deps) |
| 295 | e.mcdata[''].setVar("SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS", deps) |
| 296 | |
| 297 | if isinstance(e, bb.event.BuildStarted): |
| 298 | localdata = bb.data.createCopy(d) |
| 299 | statuslines = [] |
| 300 | for func in oe.data.typed_value('BUILDCFG_FUNCS', localdata): |
| 301 | g = globals() |
| 302 | if func not in g: |
| 303 | bb.warn("Build configuration function '%s' does not exist" % func) |
| 304 | else: |
| 305 | flines = g[func](localdata) |
| 306 | if flines: |
| 307 | statuslines.extend(flines) |
| 308 | |
| 309 | statusheader = d.getVar('BUILDCFG_HEADER') |
| 310 | if statusheader: |
| 311 | bb.plain('\n%s\n%s\n' % (statusheader, '\n'.join(statuslines))) |
| 312 | |
| 313 | # This code is to silence warnings where the SDK variables overwrite the |
| 314 | # target ones and we'd see dulpicate key names overwriting each other |
| 315 | # for various PREFERRED_PROVIDERS |
| 316 | if isinstance(e, bb.event.RecipePreFinalise): |
| 317 | if d.getVar("TARGET_PREFIX") == d.getVar("SDK_PREFIX"): |
| 318 | d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}binutils") |
| 319 | d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}gcc") |
| 320 | d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}g++") |
| 321 | d.delVar("PREFERRED_PROVIDER_virtual/${TARGET_PREFIX}compilerlibs") |
| 322 | |
| 323 | if isinstance(e, bb.event.RecipeParsed): |
| 324 | # |
| 325 | # If we have multiple providers of virtual/X and a PREFERRED_PROVIDER_virtual/X is set |
| 326 | # skip parsing for all the other providers which will mean they get uninstalled from the |
| 327 | # sysroot since they're now "unreachable". This makes switching virtual/kernel work in |
| 328 | # particular. |
| 329 | # |
| 330 | pn = d.getVar('PN') |
| 331 | source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False) |
| 332 | if not source_mirror_fetch: |
| 333 | provs = (d.getVar("PROVIDES") or "").split() |
| 334 | multiprovidersallowed = (d.getVar("BB_MULTI_PROVIDER_ALLOWED") or "").split() |
| 335 | for p in provs: |
| 336 | if p.startswith("virtual/") and p not in multiprovidersallowed: |
| 337 | profprov = d.getVar("PREFERRED_PROVIDER_" + p) |
| 338 | if profprov and pn != profprov: |
| 339 | raise bb.parse.SkipRecipe("PREFERRED_PROVIDER_%s set to %s, not %s" % (p, profprov, pn)) |
| 340 | } |
| 341 | |
| 342 | CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate" |
| 343 | CLEANBROKEN = "0" |
| 344 | |
| 345 | addtask configure after do_patch |
| 346 | do_configure[dirs] = "${B}" |
| 347 | base_do_configure() { |
| 348 | if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then |
| 349 | if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then |
| 350 | cd ${B} |
| 351 | if [ "${CLEANBROKEN}" != "1" -a \( -e Makefile -o -e makefile -o -e GNUmakefile \) ]; then |
| 352 | oe_runmake clean |
| 353 | fi |
| 354 | # -ignore_readdir_race does not work correctly with -delete; |
| 355 | # use xargs to avoid spurious build failures |
| 356 | find ${B} -ignore_readdir_race -name \*.la -type f -print0 | xargs -0 rm -f |
| 357 | fi |
| 358 | fi |
| 359 | if [ -n "${CONFIGURESTAMPFILE}" ]; then |
| 360 | mkdir -p `dirname ${CONFIGURESTAMPFILE}` |
| 361 | echo ${BB_TASKHASH} > ${CONFIGURESTAMPFILE} |
| 362 | fi |
| 363 | } |
| 364 | |
| 365 | addtask compile after do_configure |
| 366 | do_compile[dirs] = "${B}" |
| 367 | base_do_compile() { |
| 368 | if [ -e Makefile -o -e makefile -o -e GNUmakefile ]; then |
| 369 | oe_runmake || die "make failed" |
| 370 | else |
| 371 | bbnote "nothing to compile" |
| 372 | fi |
| 373 | } |
| 374 | |
| 375 | addtask install after do_compile |
| 376 | do_install[dirs] = "${B}" |
| 377 | # Remove and re-create ${D} so that is it guaranteed to be empty |
| 378 | do_install[cleandirs] = "${D}" |
| 379 | |
| 380 | base_do_install() { |
| 381 | : |
| 382 | } |
| 383 | |
| 384 | base_do_package() { |
| 385 | : |
| 386 | } |
| 387 | |
| 388 | addtask build after do_populate_sysroot |
| 389 | do_build[noexec] = "1" |
| 390 | do_build[recrdeptask] += "do_deploy" |
| 391 | do_build () { |
| 392 | : |
| 393 | } |
| 394 | |
| 395 | def set_packagetriplet(d): |
| 396 | archs = [] |
| 397 | tos = [] |
| 398 | tvs = [] |
| 399 | |
| 400 | archs.append(d.getVar("PACKAGE_ARCHS").split()) |
| 401 | tos.append(d.getVar("TARGET_OS")) |
| 402 | tvs.append(d.getVar("TARGET_VENDOR")) |
| 403 | |
| 404 | def settriplet(d, varname, archs, tos, tvs): |
| 405 | triplets = [] |
| 406 | for i in range(len(archs)): |
| 407 | for arch in archs[i]: |
| 408 | triplets.append(arch + tvs[i] + "-" + tos[i]) |
| 409 | triplets.reverse() |
| 410 | d.setVar(varname, " ".join(triplets)) |
| 411 | |
| 412 | settriplet(d, "PKGTRIPLETS", archs, tos, tvs) |
| 413 | |
| 414 | variants = d.getVar("MULTILIB_VARIANTS") or "" |
| 415 | for item in variants.split(): |
| 416 | localdata = bb.data.createCopy(d) |
| 417 | overrides = localdata.getVar("OVERRIDES", False) + ":virtclass-multilib-" + item |
| 418 | localdata.setVar("OVERRIDES", overrides) |
| 419 | |
| 420 | archs.append(localdata.getVar("PACKAGE_ARCHS").split()) |
| 421 | tos.append(localdata.getVar("TARGET_OS")) |
| 422 | tvs.append(localdata.getVar("TARGET_VENDOR")) |
| 423 | |
| 424 | settriplet(d, "PKGMLTRIPLETS", archs, tos, tvs) |
| 425 | |
| 426 | python () { |
| 427 | import string, re |
| 428 | |
| 429 | # Handle backfilling |
| 430 | oe.utils.features_backfill("DISTRO_FEATURES", d) |
| 431 | oe.utils.features_backfill("MACHINE_FEATURES", d) |
| 432 | |
| 433 | if d.getVar("S")[-1] == '/': |
| 434 | bb.warn("Recipe %s sets S variable with trailing slash '%s', remove it" % (d.getVar("PN"), d.getVar("S"))) |
| 435 | if d.getVar("B")[-1] == '/': |
| 436 | bb.warn("Recipe %s sets B variable with trailing slash '%s', remove it" % (d.getVar("PN"), d.getVar("B"))) |
| 437 | |
| 438 | if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("S")): |
| 439 | d.appendVar("PSEUDO_IGNORE_PATHS", ",${S}") |
| 440 | if os.path.normpath(d.getVar("WORKDIR")) != os.path.normpath(d.getVar("B")): |
| 441 | d.appendVar("PSEUDO_IGNORE_PATHS", ",${B}") |
| 442 | |
| 443 | # To add a recipe to the skip list , set: |
| 444 | # SKIP_RECIPE[pn] = "message" |
| 445 | pn = d.getVar('PN') |
| 446 | skip_msg = d.getVarFlag('SKIP_RECIPE', pn) |
| 447 | if skip_msg: |
| 448 | bb.debug(1, "Skipping %s %s" % (pn, skip_msg)) |
| 449 | raise bb.parse.SkipRecipe("Recipe will be skipped because: %s" % (skip_msg)) |
| 450 | |
| 451 | # Handle PACKAGECONFIG |
| 452 | # |
| 453 | # These take the form: |
| 454 | # |
| 455 | # PACKAGECONFIG ??= "<default options>" |
| 456 | # PACKAGECONFIG[foo] = "--enable-foo,--disable-foo,foo_depends,foo_runtime_depends,foo_runtime_recommends,foo_conflict_packageconfig" |
| 457 | pkgconfigflags = d.getVarFlags("PACKAGECONFIG") or {} |
| 458 | if pkgconfigflags: |
| 459 | pkgconfig = (d.getVar('PACKAGECONFIG') or "").split() |
| 460 | pn = d.getVar("PN") |
| 461 | |
| 462 | mlprefix = d.getVar("MLPREFIX") |
| 463 | |
| 464 | def expandFilter(appends, extension, prefix): |
| 465 | appends = bb.utils.explode_deps(d.expand(" ".join(appends))) |
| 466 | newappends = [] |
| 467 | for a in appends: |
| 468 | if a.endswith("-native") or ("-cross-" in a): |
| 469 | newappends.append(a) |
| 470 | elif a.startswith("virtual/"): |
| 471 | subs = a.split("/", 1)[1] |
| 472 | if subs.startswith(prefix): |
| 473 | newappends.append(a + extension) |
| 474 | else: |
| 475 | newappends.append("virtual/" + prefix + subs + extension) |
| 476 | else: |
| 477 | if a.startswith(prefix): |
| 478 | newappends.append(a + extension) |
| 479 | else: |
| 480 | newappends.append(prefix + a + extension) |
| 481 | return newappends |
| 482 | |
| 483 | def appendVar(varname, appends): |
| 484 | if not appends: |
| 485 | return |
| 486 | if varname.find("DEPENDS") != -1: |
| 487 | if bb.data.inherits_class('nativesdk', d) or bb.data.inherits_class('cross-canadian', d) : |
| 488 | appends = expandFilter(appends, "", "nativesdk-") |
| 489 | elif bb.data.inherits_class('native', d): |
| 490 | appends = expandFilter(appends, "-native", "") |
| 491 | elif mlprefix: |
| 492 | appends = expandFilter(appends, "", mlprefix) |
| 493 | varname = d.expand(varname) |
| 494 | d.appendVar(varname, " " + " ".join(appends)) |
| 495 | |
| 496 | extradeps = [] |
| 497 | extrardeps = [] |
| 498 | extrarrecs = [] |
| 499 | extraconf = [] |
| 500 | for flag, flagval in sorted(pkgconfigflags.items()): |
| 501 | items = flagval.split(",") |
| 502 | num = len(items) |
| 503 | if num > 6: |
| 504 | bb.error("%s: PACKAGECONFIG[%s] Only enable,disable,depend,rdepend,rrecommend,conflict_packageconfig can be specified!" |
| 505 | % (d.getVar('PN'), flag)) |
| 506 | |
| 507 | if flag in pkgconfig: |
| 508 | if num >= 3 and items[2]: |
| 509 | extradeps.append(items[2]) |
| 510 | if num >= 4 and items[3]: |
| 511 | extrardeps.append(items[3]) |
| 512 | if num >= 5 and items[4]: |
| 513 | extrarrecs.append(items[4]) |
| 514 | if num >= 1 and items[0]: |
| 515 | extraconf.append(items[0]) |
| 516 | elif num >= 2 and items[1]: |
| 517 | extraconf.append(items[1]) |
| 518 | |
| 519 | if num >= 6 and items[5]: |
| 520 | conflicts = set(items[5].split()) |
| 521 | invalid = conflicts.difference(set(pkgconfigflags.keys())) |
| 522 | if invalid: |
| 523 | bb.error("%s: PACKAGECONFIG[%s] Invalid conflict package config%s '%s' specified." |
| 524 | % (d.getVar('PN'), flag, 's' if len(invalid) > 1 else '', ' '.join(invalid))) |
| 525 | |
| 526 | if flag in pkgconfig: |
| 527 | intersec = conflicts.intersection(set(pkgconfig)) |
| 528 | if intersec: |
| 529 | bb.fatal("%s: PACKAGECONFIG[%s] Conflict package config%s '%s' set in PACKAGECONFIG." |
| 530 | % (d.getVar('PN'), flag, 's' if len(intersec) > 1 else '', ' '.join(intersec))) |
| 531 | |
| 532 | appendVar('DEPENDS', extradeps) |
| 533 | appendVar('RDEPENDS:${PN}', extrardeps) |
| 534 | appendVar('RRECOMMENDS:${PN}', extrarrecs) |
| 535 | appendVar('PACKAGECONFIG_CONFARGS', extraconf) |
| 536 | |
| 537 | pn = d.getVar('PN') |
| 538 | license = d.getVar('LICENSE') |
| 539 | if license == "INVALID" and pn != "defaultpkgname": |
| 540 | bb.fatal('This recipe does not have the LICENSE field set (%s)' % pn) |
| 541 | |
| 542 | if bb.data.inherits_class('license', d): |
| 543 | check_license_format(d) |
| 544 | unmatched_license_flags = check_license_flags(d) |
| 545 | if unmatched_license_flags: |
| 546 | if len(unmatched_license_flags) == 1: |
| 547 | message = "because it has a restricted license '{0}'. Which is not listed in LICENSE_FLAGS_ACCEPTED".format(unmatched_license_flags[0]) |
| 548 | else: |
| 549 | message = "because it has restricted licenses {0}. Which are not listed in LICENSE_FLAGS_ACCEPTED".format( |
| 550 | ", ".join("'{0}'".format(f) for f in unmatched_license_flags)) |
| 551 | bb.debug(1, "Skipping %s %s" % (pn, message)) |
| 552 | raise bb.parse.SkipRecipe(message) |
| 553 | |
| 554 | # If we're building a target package we need to use fakeroot (pseudo) |
| 555 | # in order to capture permissions, owners, groups and special files |
| 556 | if not bb.data.inherits_class('native', d) and not bb.data.inherits_class('cross', d): |
| 557 | d.appendVarFlag('do_prepare_recipe_sysroot', 'depends', ' virtual/fakeroot-native:do_populate_sysroot') |
| 558 | d.appendVarFlag('do_install', 'depends', ' virtual/fakeroot-native:do_populate_sysroot') |
| 559 | d.setVarFlag('do_install', 'fakeroot', '1') |
| 560 | d.appendVarFlag('do_package', 'depends', ' virtual/fakeroot-native:do_populate_sysroot') |
| 561 | d.setVarFlag('do_package', 'fakeroot', '1') |
| 562 | d.setVarFlag('do_package_setscene', 'fakeroot', '1') |
| 563 | d.appendVarFlag('do_package_setscene', 'depends', ' virtual/fakeroot-native:do_populate_sysroot') |
| 564 | d.setVarFlag('do_devshell', 'fakeroot', '1') |
| 565 | d.appendVarFlag('do_devshell', 'depends', ' virtual/fakeroot-native:do_populate_sysroot') |
| 566 | |
| 567 | need_machine = d.getVar('COMPATIBLE_MACHINE') |
| 568 | if need_machine and not d.getVar('PARSE_ALL_RECIPES', False): |
| 569 | import re |
| 570 | compat_machines = (d.getVar('MACHINEOVERRIDES') or "").split(":") |
| 571 | for m in compat_machines: |
| 572 | if re.match(need_machine, m): |
| 573 | break |
| 574 | else: |
| 575 | raise bb.parse.SkipRecipe("incompatible with machine %s (not in COMPATIBLE_MACHINE)" % d.getVar('MACHINE')) |
| 576 | |
| 577 | source_mirror_fetch = d.getVar('SOURCE_MIRROR_FETCH', False) or d.getVar('PARSE_ALL_RECIPES', False) |
| 578 | if not source_mirror_fetch: |
| 579 | need_host = d.getVar('COMPATIBLE_HOST') |
| 580 | if need_host: |
| 581 | import re |
| 582 | this_host = d.getVar('HOST_SYS') |
| 583 | if not re.match(need_host, this_host): |
| 584 | raise bb.parse.SkipRecipe("incompatible with host %s (not in COMPATIBLE_HOST)" % this_host) |
| 585 | |
| 586 | bad_licenses = (d.getVar('INCOMPATIBLE_LICENSE') or "").split() |
| 587 | |
| 588 | check_license = False if pn.startswith("nativesdk-") else True |
| 589 | for t in ["-native", "-cross-${TARGET_ARCH}", "-cross-initial-${TARGET_ARCH}", |
| 590 | "-crosssdk-${SDK_SYS}", "-crosssdk-initial-${SDK_SYS}", |
| 591 | "-cross-canadian-${TRANSLATED_TARGET_ARCH}"]: |
| 592 | if pn.endswith(d.expand(t)): |
| 593 | check_license = False |
| 594 | if pn.startswith("gcc-source-"): |
| 595 | check_license = False |
| 596 | |
| 597 | if check_license and bad_licenses: |
| 598 | bad_licenses = expand_wildcard_licenses(d, bad_licenses) |
| 599 | |
| 600 | exceptions = (d.getVar("INCOMPATIBLE_LICENSE_EXCEPTIONS") or "").split() |
| 601 | |
| 602 | for lic_exception in exceptions: |
| 603 | if ":" in lic_exception: |
| 604 | lic_exception = lic_exception.split(":")[1] |
| 605 | if lic_exception in oe.license.obsolete_license_list(): |
| 606 | bb.fatal("Obsolete license %s used in INCOMPATIBLE_LICENSE_EXCEPTIONS" % lic_exception) |
| 607 | |
| 608 | pkgs = d.getVar('PACKAGES').split() |
| 609 | skipped_pkgs = {} |
| 610 | unskipped_pkgs = [] |
| 611 | for pkg in pkgs: |
| 612 | remaining_bad_licenses = oe.license.apply_pkg_license_exception(pkg, bad_licenses, exceptions) |
| 613 | |
| 614 | incompatible_lic = incompatible_license(d, remaining_bad_licenses, pkg) |
| 615 | if incompatible_lic: |
| 616 | skipped_pkgs[pkg] = incompatible_lic |
| 617 | else: |
| 618 | unskipped_pkgs.append(pkg) |
| 619 | |
| 620 | if unskipped_pkgs: |
| 621 | for pkg in skipped_pkgs: |
| 622 | bb.debug(1, "Skipping the package %s at do_rootfs because of incompatible license(s): %s" % (pkg, ' '.join(skipped_pkgs[pkg]))) |
| 623 | d.setVar('_exclude_incompatible-' + pkg, ' '.join(skipped_pkgs[pkg])) |
| 624 | for pkg in unskipped_pkgs: |
| 625 | bb.debug(1, "Including the package %s" % pkg) |
| 626 | else: |
| 627 | incompatible_lic = incompatible_license(d, bad_licenses) |
| 628 | for pkg in skipped_pkgs: |
| 629 | incompatible_lic += skipped_pkgs[pkg] |
| 630 | incompatible_lic = sorted(list(set(incompatible_lic))) |
| 631 | |
| 632 | if incompatible_lic: |
| 633 | bb.debug(1, "Skipping recipe %s because of incompatible license(s): %s" % (pn, ' '.join(incompatible_lic))) |
| 634 | raise bb.parse.SkipRecipe("it has incompatible license(s): %s" % ' '.join(incompatible_lic)) |
| 635 | |
| 636 | needsrcrev = False |
| 637 | srcuri = d.getVar('SRC_URI') |
| 638 | for uri_string in srcuri.split(): |
| 639 | uri = bb.fetch.URI(uri_string) |
| 640 | # Also check downloadfilename as the URL path might not be useful for sniffing |
| 641 | path = uri.params.get("downloadfilename", uri.path) |
| 642 | |
| 643 | # HTTP/FTP use the wget fetcher |
| 644 | if uri.scheme in ("http", "https", "ftp"): |
| 645 | d.appendVarFlag('do_fetch', 'depends', ' wget-native:do_populate_sysroot') |
| 646 | |
| 647 | # Svn packages should DEPEND on subversion-native |
| 648 | if uri.scheme == "svn": |
| 649 | needsrcrev = True |
| 650 | d.appendVarFlag('do_fetch', 'depends', ' subversion-native:do_populate_sysroot') |
| 651 | |
| 652 | # Git packages should DEPEND on git-native |
| 653 | elif uri.scheme in ("git", "gitsm"): |
| 654 | needsrcrev = True |
| 655 | d.appendVarFlag('do_fetch', 'depends', ' git-native:do_populate_sysroot') |
| 656 | |
| 657 | # Mercurial packages should DEPEND on mercurial-native |
| 658 | elif uri.scheme == "hg": |
| 659 | needsrcrev = True |
| 660 | d.appendVar("EXTRANATIVEPATH", ' python3-native ') |
| 661 | d.appendVarFlag('do_fetch', 'depends', ' mercurial-native:do_populate_sysroot') |
| 662 | |
| 663 | # Perforce packages support SRCREV = "${AUTOREV}" |
| 664 | elif uri.scheme == "p4": |
| 665 | needsrcrev = True |
| 666 | |
| 667 | # OSC packages should DEPEND on osc-native |
| 668 | elif uri.scheme == "osc": |
| 669 | d.appendVarFlag('do_fetch', 'depends', ' osc-native:do_populate_sysroot') |
| 670 | |
| 671 | elif uri.scheme == "npm": |
| 672 | d.appendVarFlag('do_fetch', 'depends', ' nodejs-native:do_populate_sysroot') |
| 673 | |
| 674 | elif uri.scheme == "repo": |
| 675 | needsrcrev = True |
| 676 | d.appendVarFlag('do_fetch', 'depends', ' repo-native:do_populate_sysroot') |
| 677 | |
| 678 | # *.lz4 should DEPEND on lz4-native for unpacking |
| 679 | if path.endswith('.lz4'): |
| 680 | d.appendVarFlag('do_unpack', 'depends', ' lz4-native:do_populate_sysroot') |
| 681 | |
| 682 | # *.zst should DEPEND on zstd-native for unpacking |
| 683 | elif path.endswith('.zst'): |
| 684 | d.appendVarFlag('do_unpack', 'depends', ' zstd-native:do_populate_sysroot') |
| 685 | |
| 686 | # *.lz should DEPEND on lzip-native for unpacking |
| 687 | elif path.endswith('.lz'): |
| 688 | d.appendVarFlag('do_unpack', 'depends', ' lzip-native:do_populate_sysroot') |
| 689 | |
| 690 | # *.xz should DEPEND on xz-native for unpacking |
| 691 | elif path.endswith('.xz') or path.endswith('.txz'): |
| 692 | d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot') |
| 693 | |
| 694 | # .zip should DEPEND on unzip-native for unpacking |
| 695 | elif path.endswith('.zip') or path.endswith('.jar'): |
| 696 | d.appendVarFlag('do_unpack', 'depends', ' unzip-native:do_populate_sysroot') |
| 697 | |
| 698 | # Some rpm files may be compressed internally using xz (for example, rpms from Fedora) |
| 699 | elif path.endswith('.rpm'): |
| 700 | d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot') |
| 701 | |
| 702 | # *.deb should DEPEND on xz-native for unpacking |
| 703 | elif path.endswith('.deb'): |
| 704 | d.appendVarFlag('do_unpack', 'depends', ' xz-native:do_populate_sysroot') |
| 705 | |
| 706 | if needsrcrev: |
| 707 | d.setVar("SRCPV", "${@bb.fetch2.get_srcrev(d)}") |
| 708 | |
| 709 | # Gather all named SRCREVs to add to the sstate hash calculation |
| 710 | # This anonymous python snippet is called multiple times so we |
| 711 | # need to be careful to not double up the appends here and cause |
| 712 | # the base hash to mismatch the task hash |
| 713 | for uri in srcuri.split(): |
| 714 | parm = bb.fetch.decodeurl(uri)[5] |
| 715 | uri_names = parm.get("name", "").split(",") |
| 716 | for uri_name in filter(None, uri_names): |
| 717 | srcrev_name = "SRCREV_{}".format(uri_name) |
| 718 | if srcrev_name not in (d.getVarFlag("do_fetch", "vardeps") or "").split(): |
| 719 | d.appendVarFlag("do_fetch", "vardeps", " {}".format(srcrev_name)) |
| 720 | |
| 721 | set_packagetriplet(d) |
| 722 | |
| 723 | # 'multimachine' handling |
| 724 | mach_arch = d.getVar('MACHINE_ARCH') |
| 725 | pkg_arch = d.getVar('PACKAGE_ARCH') |
| 726 | |
| 727 | if (pkg_arch == mach_arch): |
| 728 | # Already machine specific - nothing further to do |
| 729 | return |
| 730 | |
| 731 | # |
| 732 | # We always try to scan SRC_URI for urls with machine overrides |
| 733 | # unless the package sets SRC_URI_OVERRIDES_PACKAGE_ARCH=0 |
| 734 | # |
| 735 | override = d.getVar('SRC_URI_OVERRIDES_PACKAGE_ARCH') |
| 736 | if override != '0': |
| 737 | paths = [] |
| 738 | fpaths = (d.getVar('FILESPATH') or '').split(':') |
| 739 | machine = d.getVar('MACHINE') |
| 740 | for p in fpaths: |
| 741 | if os.path.basename(p) == machine and os.path.isdir(p): |
| 742 | paths.append(p) |
| 743 | |
| 744 | if paths: |
| 745 | for s in srcuri.split(): |
| 746 | if not s.startswith("file://"): |
| 747 | continue |
| 748 | fetcher = bb.fetch2.Fetch([s], d) |
| 749 | local = fetcher.localpath(s) |
| 750 | for mp in paths: |
| 751 | if local.startswith(mp): |
| 752 | #bb.note("overriding PACKAGE_ARCH from %s to %s for %s" % (pkg_arch, mach_arch, pn)) |
| 753 | d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}") |
| 754 | return |
| 755 | |
| 756 | packages = d.getVar('PACKAGES').split() |
| 757 | for pkg in packages: |
| 758 | pkgarch = d.getVar("PACKAGE_ARCH_%s" % pkg) |
| 759 | |
| 760 | # We could look for != PACKAGE_ARCH here but how to choose |
| 761 | # if multiple differences are present? |
| 762 | # Look through PACKAGE_ARCHS for the priority order? |
| 763 | if pkgarch and pkgarch == mach_arch: |
| 764 | d.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}") |
| 765 | bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN")) |
| 766 | } |
| 767 | |
| 768 | addtask cleansstate after do_clean |
| 769 | python do_cleansstate() { |
| 770 | sstate_clean_cachefiles(d) |
| 771 | } |
| 772 | addtask cleanall after do_cleansstate |
| 773 | do_cleansstate[nostamp] = "1" |
| 774 | |
| 775 | python do_cleanall() { |
| 776 | src_uri = (d.getVar('SRC_URI') or "").split() |
| 777 | if not src_uri: |
| 778 | return |
| 779 | |
| 780 | try: |
| 781 | fetcher = bb.fetch2.Fetch(src_uri, d) |
| 782 | fetcher.clean() |
| 783 | except bb.fetch2.BBFetchException as e: |
| 784 | bb.fatal(str(e)) |
| 785 | } |
| 786 | do_cleanall[nostamp] = "1" |
| 787 | |
| 788 | |
| 789 | EXPORT_FUNCTIONS do_fetch do_unpack do_configure do_compile do_install do_package |