reset upstream subtrees to HEAD
Reset the following subtrees on HEAD:
poky: 8217b477a1(master)
meta-xilinx: 64aa3d35ae(master)
meta-openembedded: 0435c9e193(master)
meta-raspberrypi: 490a4441ac(master)
meta-security: cb6d1c85ee(master)
Squashed patches:
meta-phosphor: drop systemd 239 patches
meta-phosphor: mrw-api: use correct install path
Change-Id: I268e2646d9174ad305630c6bbd3fbc1a6105f43d
Signed-off-by: Brad Bishop <bradleyb@fuzziesquirrel.com>
diff --git a/poky/meta/lib/oe/package_manager.py b/poky/meta/lib/oe/package_manager.py
index 882e7c4..2835c1d 100644
--- a/poky/meta/lib/oe/package_manager.py
+++ b/poky/meta/lib/oe/package_manager.py
@@ -29,7 +29,7 @@
a dictionary with the information of the packages. This is used
when the packages are in deb or ipk format.
"""
- verregex = re.compile(' \([=<>]* [^ )]*\)')
+ verregex = re.compile(r' \([=<>]* [^ )]*\)')
output = dict()
pkg = ""
arch = ""
@@ -94,6 +94,8 @@
# Pretty sure we don't need this for locale archive generation but
# keeping it to be safe...
locale_arch_options = { \
+ "arc": ["--uint32-align=4", "--little-endian"],
+ "arceb": ["--uint32-align=4", "--big-endian"],
"arm": ["--uint32-align=4", "--little-endian"],
"armeb": ["--uint32-align=4", "--big-endian"],
"aarch64": ["--uint32-align=4", "--little-endian"],
@@ -250,8 +252,8 @@
with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"),
"apt", "apt.conf.sample")) as apt_conf_sample:
for line in apt_conf_sample.read().split("\n"):
- line = re.sub("#ROOTFS#", "/dev/null", line)
- line = re.sub("#APTCONF#", self.apt_conf_dir, line)
+ line = re.sub(r"#ROOTFS#", "/dev/null", line)
+ line = re.sub(r"#APTCONF#", self.apt_conf_dir, line)
apt_conf.write(line + "\n")
def write_index(self):
@@ -318,7 +320,7 @@
class RpmPkgsList(PkgsList):
def list_pkgs(self):
- return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR')).list_installed()
+ return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR'), needfeed=False).list_installed()
class OpkgPkgsList(PkgsList):
def __init__(self, d, rootfs_dir, config_file):
@@ -406,7 +408,7 @@
with open(postinst_intercept_hook) as intercept:
registered_pkgs = None
for line in intercept.read().split("\n"):
- m = re.match("^##PKGS:(.*)", line)
+ m = re.match(r"^##PKGS:(.*)", line)
if m is not None:
registered_pkgs = m.group(1).strip()
break
@@ -437,6 +439,11 @@
self._postpone_to_first_boot(script_full)
continue
+ if populate_sdk == 'host' and self.d.getVar('SDK_OS') == 'mingw32':
+ bb.note("The postinstall intercept hook '%s' could not be executed due to missing wine support, details in %s/log.do_%s"
+ % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ continue
+
bb.note("> Executing %s intercept ..." % script)
try:
@@ -445,10 +452,10 @@
except subprocess.CalledProcessError as e:
bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8")))
if populate_sdk == 'host':
- bb.warn("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
+ bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
elif populate_sdk == 'target':
if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"):
- bb.warn("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
+ bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s"
% (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
else:
bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK')))
@@ -722,7 +729,8 @@
arch_var=None,
os_var=None,
rpm_repo_workdir="oe-rootfs-repo",
- filterbydependencies=True):
+ filterbydependencies=True,
+ needfeed=True):
super(RpmPM, self).__init__(d, target_rootfs)
self.target_vendor = target_vendor
self.task_name = task_name
@@ -735,8 +743,9 @@
else:
self.primary_arch = self.d.getVar('MACHINE_ARCH')
- self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir)
- create_packages_dir(self.d, oe.path.join(self.rpm_repo_dir, "rpm"), d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies)
+ if needfeed:
+ self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir)
+ create_packages_dir(self.d, oe.path.join(self.rpm_repo_dir, "rpm"), d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies)
self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name)
if not os.path.exists(self.d.expand('${T}/saved_packaging_data')):
@@ -860,7 +869,7 @@
failed_scriptlets_pkgnames = collections.OrderedDict()
for line in output.splitlines():
- if line.startswith("Non-fatal POSTIN scriptlet failure in rpm package"):
+ if line.startswith("Error in POSTIN scriptlet in rpm package"):
failed_scriptlets_pkgnames[line.split()[-1]] = True
if len(failed_scriptlets_pkgnames) > 0:
@@ -962,13 +971,14 @@
os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs
dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf")
- standard_dnf_args = ["-v", "--rpmverbosity=debug", "-y",
+ standard_dnf_args = ["-v", "--rpmverbosity=info", "-y",
"-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"),
"--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")),
- "--repofrompath=oe-repo,%s" % (self.rpm_repo_dir),
"--installroot=%s" % (self.target_rootfs),
"--setopt=logdir=%s" % (self.d.getVar('T'))
]
+ if hasattr(self, "rpm_repo_dir"):
+ standard_dnf_args.append("--repofrompath=oe-repo,%s" % (self.rpm_repo_dir))
cmd = [dnf_cmd] + standard_dnf_args + dnf_args
bb.note('Running %s' % ' '.join(cmd))
try:
@@ -1108,10 +1118,7 @@
tmp_dir = tempfile.mkdtemp()
current_dir = os.getcwd()
os.chdir(tmp_dir)
- if self.d.getVar('IMAGE_PKGTYPE') == 'deb':
- data_tar = 'data.tar.xz'
- else:
- data_tar = 'data.tar.gz'
+ data_tar = 'data.tar.xz'
try:
cmd = [ar_cmd, 'x', pkg_path]
@@ -1212,7 +1219,7 @@
priority += 5
for line in (self.d.getVar('IPK_FEED_URIS') or "").split():
- feed_match = re.match("^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
+ feed_match = re.match(r"^[ \t]*(.*)##([^ \t]*)[ \t]*$", line)
if feed_match is not None:
feed_name = feed_match.group(1)
@@ -1329,6 +1336,8 @@
cmd = "%s %s" % (self.opkg_cmd, self.opkg_args)
for exclude in (self.d.getVar("PACKAGE_EXCLUDE") or "").split():
cmd += " --add-exclude %s" % exclude
+ for bad_recommendation in (self.d.getVar("BAD_RECOMMENDATIONS") or "").split():
+ cmd += " --add-ignore-recommends %s" % bad_recommendation
cmd += " install "
cmd += " ".join(pkgs)
@@ -1397,45 +1406,6 @@
def list_installed(self):
return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list_pkgs()
- def handle_bad_recommendations(self):
- bad_recommendations = self.d.getVar("BAD_RECOMMENDATIONS") or ""
- if bad_recommendations.strip() == "":
- return
-
- status_file = os.path.join(self.opkg_dir, "status")
-
- # If status file existed, it means the bad recommendations has already
- # been handled
- if os.path.exists(status_file):
- return
-
- cmd = "%s %s info " % (self.opkg_cmd, self.opkg_args)
-
- with open(status_file, "w+") as status:
- for pkg in bad_recommendations.split():
- pkg_info = cmd + pkg
-
- try:
- output = subprocess.check_output(pkg_info.split(), stderr=subprocess.STDOUT).strip().decode("utf-8")
- except subprocess.CalledProcessError as e:
- bb.fatal("Cannot get package info. Command '%s' "
- "returned %d:\n%s" % (pkg_info, e.returncode, e.output.decode("utf-8")))
-
- if output == "":
- bb.note("Ignored bad recommendation: '%s' is "
- "not a package" % pkg)
- continue
-
- for line in output.split('\n'):
- if line.startswith("Status:"):
- status.write("Status: deinstall hold not-installed\n")
- else:
- status.write(line + "\n")
-
- # Append a blank line after each package entry to ensure that it
- # is separated from the following entry
- status.write("\n")
-
def dummy_install(self, pkgs):
"""
The following function dummy installs pkgs and returns the log of output.
@@ -1520,7 +1490,7 @@
"trying to extract the package." % pkg)
tmp_dir = super(OpkgPM, self).extract(pkg, pkg_info)
- bb.utils.remove(os.path.join(tmp_dir, "data.tar.gz"))
+ bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz"))
return tmp_dir
@@ -1592,7 +1562,7 @@
with open(status_file, "r") as status:
for line in status.read().split('\n'):
- m = re.match("^Package: (.*)", line)
+ m = re.match(r"^Package: (.*)", line)
if m is not None:
installed_pkgs.append(m.group(1))
@@ -1657,13 +1627,13 @@
# rename *.dpkg-new files/dirs
for root, dirs, files in os.walk(self.target_rootfs):
for dir in dirs:
- new_dir = re.sub("\.dpkg-new", "", dir)
+ new_dir = re.sub(r"\.dpkg-new", "", dir)
if dir != new_dir:
os.rename(os.path.join(root, dir),
os.path.join(root, new_dir))
for file in files:
- new_file = re.sub("\.dpkg-new", "", file)
+ new_file = re.sub(r"\.dpkg-new", "", file)
if file != new_file:
os.rename(os.path.join(root, file),
os.path.join(root, new_file))
@@ -1728,7 +1698,7 @@
sources_file.write("deb %s ./\n" % uri)
def _create_configs(self, archs, base_archs):
- base_archs = re.sub("_", "-", base_archs)
+ base_archs = re.sub(r"_", r"-", base_archs)
if os.path.exists(self.apt_conf_dir):
bb.utils.remove(self.apt_conf_dir, True)
@@ -1782,7 +1752,7 @@
with open(self.apt_conf_file, "w+") as apt_conf:
with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample:
for line in apt_conf_sample.read().split("\n"):
- match_arch = re.match(" Architecture \".*\";$", line)
+ match_arch = re.match(r" Architecture \".*\";$", line)
architectures = ""
if match_arch:
for base_arch in base_arch_list:
@@ -1790,8 +1760,8 @@
apt_conf.write(" Architectures {%s};\n" % architectures);
apt_conf.write(" Architecture \"%s\";\n" % base_archs)
else:
- line = re.sub("#ROOTFS#", self.target_rootfs, line)
- line = re.sub("#APTCONF#", self.apt_conf_dir, line)
+ line = re.sub(r"#ROOTFS#", self.target_rootfs, line)
+ line = re.sub(r"#APTCONF#", self.apt_conf_dir, line)
apt_conf.write(line + "\n")
target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs