Andrew Geissler | 635e0e4 | 2020-08-21 15:58:33 -0500 | [diff] [blame] | 1 | # |
| 2 | # SPDX-License-Identifier: GPL-2.0-only |
| 3 | # |
| 4 | |
| 5 | from abc import ABCMeta, abstractmethod |
| 6 | import os |
| 7 | import glob |
| 8 | import subprocess |
| 9 | import shutil |
| 10 | import re |
| 11 | import collections |
| 12 | import bb |
| 13 | import tempfile |
| 14 | import oe.utils |
| 15 | import oe.path |
| 16 | import string |
| 17 | from oe.gpg_sign import get_signer |
| 18 | import hashlib |
| 19 | import fnmatch |
| 20 | |
| 21 | # this can be used by all PM backends to create the index files in parallel |
| 22 | def create_index(arg): |
| 23 | index_cmd = arg |
| 24 | |
| 25 | bb.note("Executing '%s' ..." % index_cmd) |
| 26 | result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8") |
| 27 | if result: |
| 28 | bb.note(result) |
| 29 | |
| 30 | def opkg_query(cmd_output): |
| 31 | """ |
| 32 | This method parse the output from the package managerand return |
| 33 | a dictionary with the information of the packages. This is used |
| 34 | when the packages are in deb or ipk format. |
| 35 | """ |
| 36 | verregex = re.compile(r' \([=<>]* [^ )]*\)') |
| 37 | output = dict() |
| 38 | pkg = "" |
| 39 | arch = "" |
| 40 | ver = "" |
| 41 | filename = "" |
| 42 | dep = [] |
| 43 | prov = [] |
| 44 | pkgarch = "" |
| 45 | for line in cmd_output.splitlines()+['']: |
| 46 | line = line.rstrip() |
| 47 | if ':' in line: |
| 48 | if line.startswith("Package: "): |
| 49 | pkg = line.split(": ")[1] |
| 50 | elif line.startswith("Architecture: "): |
| 51 | arch = line.split(": ")[1] |
| 52 | elif line.startswith("Version: "): |
| 53 | ver = line.split(": ")[1] |
| 54 | elif line.startswith("File: ") or line.startswith("Filename:"): |
| 55 | filename = line.split(": ")[1] |
| 56 | if "/" in filename: |
| 57 | filename = os.path.basename(filename) |
| 58 | elif line.startswith("Depends: "): |
| 59 | depends = verregex.sub('', line.split(": ")[1]) |
| 60 | for depend in depends.split(", "): |
| 61 | dep.append(depend) |
| 62 | elif line.startswith("Recommends: "): |
| 63 | recommends = verregex.sub('', line.split(": ")[1]) |
| 64 | for recommend in recommends.split(", "): |
| 65 | dep.append("%s [REC]" % recommend) |
| 66 | elif line.startswith("PackageArch: "): |
| 67 | pkgarch = line.split(": ")[1] |
| 68 | elif line.startswith("Provides: "): |
| 69 | provides = verregex.sub('', line.split(": ")[1]) |
| 70 | for provide in provides.split(", "): |
| 71 | prov.append(provide) |
| 72 | |
| 73 | # When there is a blank line save the package information |
| 74 | elif not line: |
| 75 | # IPK doesn't include the filename |
| 76 | if not filename: |
| 77 | filename = "%s_%s_%s.ipk" % (pkg, ver, arch) |
| 78 | if pkg: |
| 79 | output[pkg] = {"arch":arch, "ver":ver, |
| 80 | "filename":filename, "deps": dep, "pkgarch":pkgarch, "provs": prov} |
| 81 | pkg = "" |
| 82 | arch = "" |
| 83 | ver = "" |
| 84 | filename = "" |
| 85 | dep = [] |
| 86 | prov = [] |
| 87 | pkgarch = "" |
| 88 | |
| 89 | return output |
| 90 | |
| 91 | def failed_postinsts_abort(pkgs, log_path): |
| 92 | bb.fatal("""Postinstall scriptlets of %s have failed. If the intention is to defer them to first boot, |
| 93 | then please place them into pkg_postinst_ontarget_${PN} (). |
| 94 | Deferring to first boot via 'exit 1' is no longer supported. |
| 95 | Details of the failure are in %s.""" %(pkgs, log_path)) |
| 96 | |
| 97 | def generate_locale_archive(d, rootfs, target_arch, localedir): |
| 98 | # Pretty sure we don't need this for locale archive generation but |
| 99 | # keeping it to be safe... |
| 100 | locale_arch_options = { \ |
| 101 | "arc": ["--uint32-align=4", "--little-endian"], |
| 102 | "arceb": ["--uint32-align=4", "--big-endian"], |
| 103 | "arm": ["--uint32-align=4", "--little-endian"], |
| 104 | "armeb": ["--uint32-align=4", "--big-endian"], |
| 105 | "aarch64": ["--uint32-align=4", "--little-endian"], |
| 106 | "aarch64_be": ["--uint32-align=4", "--big-endian"], |
| 107 | "sh4": ["--uint32-align=4", "--big-endian"], |
| 108 | "powerpc": ["--uint32-align=4", "--big-endian"], |
| 109 | "powerpc64": ["--uint32-align=4", "--big-endian"], |
| 110 | "powerpc64le": ["--uint32-align=4", "--little-endian"], |
| 111 | "mips": ["--uint32-align=4", "--big-endian"], |
| 112 | "mipsisa32r6": ["--uint32-align=4", "--big-endian"], |
| 113 | "mips64": ["--uint32-align=4", "--big-endian"], |
| 114 | "mipsisa64r6": ["--uint32-align=4", "--big-endian"], |
| 115 | "mipsel": ["--uint32-align=4", "--little-endian"], |
| 116 | "mipsisa32r6el": ["--uint32-align=4", "--little-endian"], |
| 117 | "mips64el": ["--uint32-align=4", "--little-endian"], |
| 118 | "mipsisa64r6el": ["--uint32-align=4", "--little-endian"], |
| 119 | "riscv64": ["--uint32-align=4", "--little-endian"], |
| 120 | "riscv32": ["--uint32-align=4", "--little-endian"], |
| 121 | "i586": ["--uint32-align=4", "--little-endian"], |
| 122 | "i686": ["--uint32-align=4", "--little-endian"], |
| 123 | "x86_64": ["--uint32-align=4", "--little-endian"] |
| 124 | } |
| 125 | if target_arch in locale_arch_options: |
| 126 | arch_options = locale_arch_options[target_arch] |
| 127 | else: |
| 128 | bb.error("locale_arch_options not found for target_arch=" + target_arch) |
| 129 | bb.fatal("unknown arch:" + target_arch + " for locale_arch_options") |
| 130 | |
| 131 | # Need to set this so cross-localedef knows where the archive is |
| 132 | env = dict(os.environ) |
| 133 | env["LOCALEARCHIVE"] = oe.path.join(localedir, "locale-archive") |
| 134 | |
| 135 | for name in sorted(os.listdir(localedir)): |
| 136 | path = os.path.join(localedir, name) |
| 137 | if os.path.isdir(path): |
| 138 | cmd = ["cross-localedef", "--verbose"] |
| 139 | cmd += arch_options |
| 140 | cmd += ["--add-to-archive", path] |
| 141 | subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT) |
| 142 | |
| 143 | class Indexer(object, metaclass=ABCMeta): |
| 144 | def __init__(self, d, deploy_dir): |
| 145 | self.d = d |
| 146 | self.deploy_dir = deploy_dir |
| 147 | |
| 148 | @abstractmethod |
| 149 | def write_index(self): |
| 150 | pass |
| 151 | |
| 152 | class PkgsList(object, metaclass=ABCMeta): |
| 153 | def __init__(self, d, rootfs_dir): |
| 154 | self.d = d |
| 155 | self.rootfs_dir = rootfs_dir |
| 156 | |
| 157 | @abstractmethod |
| 158 | def list_pkgs(self): |
| 159 | pass |
| 160 | |
| 161 | class PackageManager(object, metaclass=ABCMeta): |
| 162 | """ |
| 163 | This is an abstract class. Do not instantiate this directly. |
| 164 | """ |
| 165 | |
| 166 | def __init__(self, d, target_rootfs): |
| 167 | self.d = d |
| 168 | self.target_rootfs = target_rootfs |
| 169 | self.deploy_dir = None |
| 170 | self.deploy_lock = None |
| 171 | self._initialize_intercepts() |
| 172 | |
| 173 | def _initialize_intercepts(self): |
| 174 | bb.note("Initializing intercept dir for %s" % self.target_rootfs) |
| 175 | # As there might be more than one instance of PackageManager operating at the same time |
| 176 | # we need to isolate the intercept_scripts directories from each other, |
| 177 | # hence the ugly hash digest in dir name. |
| 178 | self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'), "intercept_scripts-%s" % |
| 179 | (hashlib.sha256(self.target_rootfs.encode()).hexdigest())) |
| 180 | |
| 181 | postinst_intercepts = (self.d.getVar("POSTINST_INTERCEPTS") or "").split() |
| 182 | if not postinst_intercepts: |
| 183 | postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_PATH") |
| 184 | if not postinst_intercepts_path: |
| 185 | postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_DIR") or self.d.expand("${COREBASE}/scripts/postinst-intercepts") |
| 186 | postinst_intercepts = oe.path.which_wild('*', postinst_intercepts_path) |
| 187 | |
| 188 | bb.debug(1, 'Collected intercepts:\n%s' % ''.join(' %s\n' % i for i in postinst_intercepts)) |
| 189 | bb.utils.remove(self.intercepts_dir, True) |
| 190 | bb.utils.mkdirhier(self.intercepts_dir) |
| 191 | for intercept in postinst_intercepts: |
| 192 | bb.utils.copyfile(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept))) |
| 193 | |
| 194 | @abstractmethod |
| 195 | def _handle_intercept_failure(self, failed_script): |
| 196 | pass |
| 197 | |
| 198 | def _postpone_to_first_boot(self, postinst_intercept_hook): |
| 199 | with open(postinst_intercept_hook) as intercept: |
| 200 | registered_pkgs = None |
| 201 | for line in intercept.read().split("\n"): |
| 202 | m = re.match(r"^##PKGS:(.*)", line) |
| 203 | if m is not None: |
| 204 | registered_pkgs = m.group(1).strip() |
| 205 | break |
| 206 | |
| 207 | if registered_pkgs is not None: |
| 208 | bb.note("If an image is being built, the postinstalls for the following packages " |
| 209 | "will be postponed for first boot: %s" % |
| 210 | registered_pkgs) |
| 211 | |
| 212 | # call the backend dependent handler |
| 213 | self._handle_intercept_failure(registered_pkgs) |
| 214 | |
| 215 | |
| 216 | def run_intercepts(self, populate_sdk=None): |
| 217 | intercepts_dir = self.intercepts_dir |
| 218 | |
| 219 | bb.note("Running intercept scripts:") |
| 220 | os.environ['D'] = self.target_rootfs |
| 221 | os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE') |
| 222 | for script in os.listdir(intercepts_dir): |
| 223 | script_full = os.path.join(intercepts_dir, script) |
| 224 | |
| 225 | if script == "postinst_intercept" or not os.access(script_full, os.X_OK): |
| 226 | continue |
| 227 | |
| 228 | # we do not want to run any multilib variant of this |
| 229 | if script.startswith("delay_to_first_boot"): |
| 230 | self._postpone_to_first_boot(script_full) |
| 231 | continue |
| 232 | |
| 233 | if populate_sdk == 'host' and self.d.getVar('SDK_OS') == 'mingw32': |
| 234 | bb.note("The postinstall intercept hook '%s' could not be executed due to missing wine support, details in %s/log.do_%s" |
| 235 | % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) |
| 236 | continue |
| 237 | |
| 238 | bb.note("> Executing %s intercept ..." % script) |
| 239 | |
| 240 | try: |
| 241 | output = subprocess.check_output(script_full, stderr=subprocess.STDOUT) |
| 242 | if output: bb.note(output.decode("utf-8")) |
| 243 | except subprocess.CalledProcessError as e: |
| 244 | bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8"))) |
| 245 | if populate_sdk == 'host': |
| 246 | bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) |
| 247 | elif populate_sdk == 'target': |
| 248 | if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"): |
| 249 | bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s" |
| 250 | % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) |
| 251 | else: |
| 252 | bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) |
| 253 | else: |
| 254 | if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"): |
| 255 | bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s" |
| 256 | % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) |
| 257 | self._postpone_to_first_boot(script_full) |
| 258 | else: |
| 259 | bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) |
| 260 | |
| 261 | @abstractmethod |
| 262 | def update(self): |
| 263 | """ |
| 264 | Update the package manager package database. |
| 265 | """ |
| 266 | pass |
| 267 | |
| 268 | @abstractmethod |
| 269 | def install(self, pkgs, attempt_only=False): |
| 270 | """ |
| 271 | Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is |
| 272 | True, installation failures are ignored. |
| 273 | """ |
| 274 | pass |
| 275 | |
| 276 | @abstractmethod |
| 277 | def remove(self, pkgs, with_dependencies=True): |
| 278 | """ |
| 279 | Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies' |
| 280 | is False, then any dependencies are left in place. |
| 281 | """ |
| 282 | pass |
| 283 | |
| 284 | @abstractmethod |
| 285 | def write_index(self): |
| 286 | """ |
| 287 | This function creates the index files |
| 288 | """ |
| 289 | pass |
| 290 | |
| 291 | @abstractmethod |
| 292 | def remove_packaging_data(self): |
| 293 | pass |
| 294 | |
| 295 | @abstractmethod |
| 296 | def list_installed(self): |
| 297 | pass |
| 298 | |
| 299 | @abstractmethod |
| 300 | def extract(self, pkg): |
| 301 | """ |
| 302 | Returns the path to a tmpdir where resides the contents of a package. |
| 303 | Deleting the tmpdir is responsability of the caller. |
| 304 | """ |
| 305 | pass |
| 306 | |
| 307 | @abstractmethod |
| 308 | def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs): |
| 309 | """ |
| 310 | Add remote package feeds into repository manager configuration. The parameters |
| 311 | for the feeds are set by feed_uris, feed_base_paths and feed_archs. |
| 312 | See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS |
| 313 | for their description. |
| 314 | """ |
| 315 | pass |
| 316 | |
| 317 | def install_glob(self, globs, sdk=False): |
| 318 | """ |
| 319 | Install all packages that match a glob. |
| 320 | """ |
| 321 | # TODO don't have sdk here but have a property on the superclass |
| 322 | # (and respect in install_complementary) |
| 323 | if sdk: |
| 324 | pkgdatadir = self.d.expand("${TMPDIR}/pkgdata/${SDK_SYS}") |
| 325 | else: |
| 326 | pkgdatadir = self.d.getVar("PKGDATA_DIR") |
| 327 | |
| 328 | try: |
| 329 | bb.note("Installing globbed packages...") |
| 330 | cmd = ["oe-pkgdata-util", "-p", pkgdatadir, "list-pkgs", globs] |
| 331 | pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8") |
| 332 | self.install(pkgs.split(), attempt_only=True) |
| 333 | except subprocess.CalledProcessError as e: |
| 334 | # Return code 1 means no packages matched |
| 335 | if e.returncode != 1: |
| 336 | bb.fatal("Could not compute globbed packages list. Command " |
| 337 | "'%s' returned %d:\n%s" % |
| 338 | (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) |
| 339 | |
| 340 | def install_complementary(self, globs=None): |
| 341 | """ |
| 342 | Install complementary packages based upon the list of currently installed |
| 343 | packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install |
| 344 | these packages, if they don't exist then no error will occur. Note: every |
| 345 | backend needs to call this function explicitly after the normal package |
| 346 | installation |
| 347 | """ |
| 348 | if globs is None: |
| 349 | globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY') |
| 350 | split_linguas = set() |
| 351 | |
| 352 | for translation in self.d.getVar('IMAGE_LINGUAS').split(): |
| 353 | split_linguas.add(translation) |
| 354 | split_linguas.add(translation.split('-')[0]) |
| 355 | |
| 356 | split_linguas = sorted(split_linguas) |
| 357 | |
| 358 | for lang in split_linguas: |
| 359 | globs += " *-locale-%s" % lang |
| 360 | for complementary_linguas in (self.d.getVar('IMAGE_LINGUAS_COMPLEMENTARY') or "").split(): |
| 361 | globs += (" " + complementary_linguas) % lang |
| 362 | |
| 363 | if globs is None: |
| 364 | return |
| 365 | |
| 366 | # we need to write the list of installed packages to a file because the |
| 367 | # oe-pkgdata-util reads it from a file |
| 368 | with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs: |
| 369 | pkgs = self.list_installed() |
| 370 | |
| 371 | provided_pkgs = set() |
| 372 | for pkg in pkgs.values(): |
| 373 | provided_pkgs |= set(pkg.get('provs', [])) |
| 374 | |
| 375 | output = oe.utils.format_pkg_list(pkgs, "arch") |
| 376 | installed_pkgs.write(output) |
| 377 | installed_pkgs.flush() |
| 378 | |
| 379 | cmd = ["oe-pkgdata-util", |
| 380 | "-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name, |
| 381 | globs] |
| 382 | exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY') |
| 383 | if exclude: |
| 384 | cmd.extend(['--exclude=' + '|'.join(exclude.split())]) |
| 385 | try: |
| 386 | bb.note('Running %s' % cmd) |
| 387 | complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8") |
| 388 | complementary_pkgs = set(complementary_pkgs.split()) |
| 389 | skip_pkgs = sorted(complementary_pkgs & provided_pkgs) |
| 390 | install_pkgs = sorted(complementary_pkgs - provided_pkgs) |
| 391 | bb.note("Installing complementary packages ... %s (skipped already provided packages %s)" % ( |
| 392 | ' '.join(install_pkgs), |
| 393 | ' '.join(skip_pkgs))) |
| 394 | self.install(install_pkgs, attempt_only=True) |
| 395 | except subprocess.CalledProcessError as e: |
| 396 | bb.fatal("Could not compute complementary packages list. Command " |
| 397 | "'%s' returned %d:\n%s" % |
| 398 | (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) |
| 399 | |
Andrew Geissler | af5e4ef | 2020-10-16 10:22:50 -0500 | [diff] [blame^] | 400 | if self.d.getVar('IMAGE_LOCALES_ARCHIVE') == '1': |
| 401 | target_arch = self.d.getVar('TARGET_ARCH') |
| 402 | localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale") |
| 403 | if os.path.exists(localedir) and os.listdir(localedir): |
| 404 | generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir) |
| 405 | # And now delete the binary locales |
| 406 | self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False) |
Andrew Geissler | 635e0e4 | 2020-08-21 15:58:33 -0500 | [diff] [blame] | 407 | |
| 408 | def deploy_dir_lock(self): |
| 409 | if self.deploy_dir is None: |
| 410 | raise RuntimeError("deploy_dir is not set!") |
| 411 | |
| 412 | lock_file_name = os.path.join(self.deploy_dir, "deploy.lock") |
| 413 | |
| 414 | self.deploy_lock = bb.utils.lockfile(lock_file_name) |
| 415 | |
| 416 | def deploy_dir_unlock(self): |
| 417 | if self.deploy_lock is None: |
| 418 | return |
| 419 | |
| 420 | bb.utils.unlockfile(self.deploy_lock) |
| 421 | |
| 422 | self.deploy_lock = None |
| 423 | |
| 424 | def construct_uris(self, uris, base_paths): |
| 425 | """ |
| 426 | Construct URIs based on the following pattern: uri/base_path where 'uri' |
| 427 | and 'base_path' correspond to each element of the corresponding array |
| 428 | argument leading to len(uris) x len(base_paths) elements on the returned |
| 429 | array |
| 430 | """ |
| 431 | def _append(arr1, arr2, sep='/'): |
| 432 | res = [] |
| 433 | narr1 = [a.rstrip(sep) for a in arr1] |
| 434 | narr2 = [a.rstrip(sep).lstrip(sep) for a in arr2] |
| 435 | for a1 in narr1: |
| 436 | if arr2: |
| 437 | for a2 in narr2: |
| 438 | res.append("%s%s%s" % (a1, sep, a2)) |
| 439 | else: |
| 440 | res.append(a1) |
| 441 | return res |
| 442 | return _append(uris, base_paths) |
| 443 | |
| 444 | def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencies): |
| 445 | """ |
| 446 | Go through our do_package_write_X dependencies and hardlink the packages we depend |
| 447 | upon into the repo directory. This prevents us seeing other packages that may |
| 448 | have been built that we don't depend upon and also packages for architectures we don't |
| 449 | support. |
| 450 | """ |
| 451 | import errno |
| 452 | |
| 453 | taskdepdata = d.getVar("BB_TASKDEPDATA", False) |
| 454 | mytaskname = d.getVar("BB_RUNTASK") |
| 455 | pn = d.getVar("PN") |
| 456 | seendirs = set() |
| 457 | multilibs = {} |
| 458 | |
| 459 | bb.utils.remove(subrepo_dir, recurse=True) |
| 460 | bb.utils.mkdirhier(subrepo_dir) |
| 461 | |
| 462 | # Detect bitbake -b usage |
| 463 | nodeps = d.getVar("BB_LIMITEDDEPS") or False |
| 464 | if nodeps or not filterbydependencies: |
| 465 | oe.path.symlink(deploydir, subrepo_dir, True) |
| 466 | return |
| 467 | |
| 468 | start = None |
| 469 | for dep in taskdepdata: |
| 470 | data = taskdepdata[dep] |
| 471 | if data[1] == mytaskname and data[0] == pn: |
| 472 | start = dep |
| 473 | break |
| 474 | if start is None: |
| 475 | bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?") |
| 476 | pkgdeps = set() |
| 477 | start = [start] |
| 478 | seen = set(start) |
| 479 | # Support direct dependencies (do_rootfs -> do_package_write_X) |
| 480 | # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> do_package_write_X) |
| 481 | while start: |
| 482 | next = [] |
| 483 | for dep2 in start: |
| 484 | for dep in taskdepdata[dep2][3]: |
| 485 | if taskdepdata[dep][0] != pn: |
| 486 | if "do_" + taskname in dep: |
| 487 | pkgdeps.add(dep) |
| 488 | elif dep not in seen: |
| 489 | next.append(dep) |
| 490 | seen.add(dep) |
| 491 | start = next |
| 492 | |
| 493 | for dep in pkgdeps: |
| 494 | c = taskdepdata[dep][0] |
| 495 | manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs) |
| 496 | if not manifest: |
| 497 | bb.fatal("No manifest generated from: %s in %s" % (c, taskdepdata[dep][2])) |
| 498 | if not os.path.exists(manifest): |
| 499 | continue |
| 500 | with open(manifest, "r") as f: |
| 501 | for l in f: |
| 502 | l = l.strip() |
| 503 | deploydir = os.path.normpath(deploydir) |
| 504 | if bb.data.inherits_class('packagefeed-stability', d): |
| 505 | dest = l.replace(deploydir + "-prediff", "") |
| 506 | else: |
| 507 | dest = l.replace(deploydir, "") |
| 508 | dest = subrepo_dir + dest |
| 509 | if l.endswith("/"): |
| 510 | if dest not in seendirs: |
| 511 | bb.utils.mkdirhier(dest) |
| 512 | seendirs.add(dest) |
| 513 | continue |
| 514 | # Try to hardlink the file, copy if that fails |
| 515 | destdir = os.path.dirname(dest) |
| 516 | if destdir not in seendirs: |
| 517 | bb.utils.mkdirhier(destdir) |
| 518 | seendirs.add(destdir) |
| 519 | try: |
| 520 | os.link(l, dest) |
| 521 | except OSError as err: |
| 522 | if err.errno == errno.EXDEV: |
| 523 | bb.utils.copyfile(l, dest) |
| 524 | else: |
| 525 | raise |
| 526 | |
| 527 | |
| 528 | def generate_index_files(d): |
| 529 | from oe.package_manager.rpm import RpmSubdirIndexer |
| 530 | from oe.package_manager.ipk import OpkgIndexer |
| 531 | from oe.package_manager.deb import DpkgIndexer |
| 532 | |
| 533 | classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split() |
| 534 | |
| 535 | indexer_map = { |
| 536 | "rpm": (RpmSubdirIndexer, d.getVar('DEPLOY_DIR_RPM')), |
| 537 | "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')), |
| 538 | "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB')) |
| 539 | } |
| 540 | |
| 541 | result = None |
| 542 | |
| 543 | for pkg_class in classes: |
| 544 | if not pkg_class in indexer_map: |
| 545 | continue |
| 546 | |
| 547 | if os.path.exists(indexer_map[pkg_class][1]): |
| 548 | result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index() |
| 549 | |
| 550 | if result is not None: |
| 551 | bb.fatal(result) |