meta-openembedded and poky: subtree updates
Squash of the following due to dependencies among them
and OpenBMC changes:
meta-openembedded: subtree update:d0748372d2..9201611135
meta-openembedded: subtree update:9201611135..17fd382f34
poky: subtree update:9052e5b32a..2e11d97b6c
poky: subtree update:2e11d97b6c..a8544811d7
The change log was too large for the jenkins plugin
to handle therefore it has been removed. Here is
the first and last commit of each subtree:
meta-openembedded:d0748372d2
cppzmq: bump to version 4.6.0
meta-openembedded:17fd382f34
mpv: Remove X11 dependency
poky:9052e5b32a
package_ipk: Remove pointless comment to trigger rebuild
poky:a8544811d7
pbzip2: Fix license warning
Change-Id: If0fc6c37629642ee207a4ca2f7aa501a2c673cd6
Signed-off-by: Andrew Geissler <geissonator@yahoo.com>
diff --git a/meta-openembedded/meta-oe/recipes-support/lcov/files/0001-geninfo-Add-intermediate-text-format-support.patch b/meta-openembedded/meta-oe/recipes-support/lcov/files/0001-geninfo-Add-intermediate-text-format-support.patch
new file mode 100644
index 0000000..9ac0770
--- /dev/null
+++ b/meta-openembedded/meta-oe/recipes-support/lcov/files/0001-geninfo-Add-intermediate-text-format-support.patch
@@ -0,0 +1,898 @@
+From ec3e1f411c332cbc2f2bc7ab7e2175ebf918b37a Mon Sep 17 00:00:00 2001
+From: Peter Oberparleiter <oberpar@linux.ibm.com>
+Date: Fri, 24 May 2019 16:56:52 +0200
+Subject: [PATCH 1/2] geninfo: Add intermediate text format support
+
+This change adds support for parsing the output of gcov's intermediate
+text file format as implemented by GCC versions 5 to 8. The use of the
+gcov intermediate format should increase processing speed. It also
+provides branch coverage data when using the --initial command line
+option.
+
+Users can control whether geninfo uses the intermediate format via the
+geninfo_intermediate configuration file option. Valid values are:
+
+ 0: Use normal text format
+ 1: Use intermediate format
+ auto: Use intermediate format if available. This is the default.
+
+Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+---
+ bin/geninfo | 567 ++++++++++++++++++++++++++++++++++++++++++++-------
+ lcovrc | 3 +
+ man/lcovrc.5 | 24 +++
+ 3 files changed, 521 insertions(+), 73 deletions(-)
+
+Upstream-Status: Backport
+Download URL: https://github.com/linux-test-project/lcov/commit/ebfeb3e179e450c69c3532f98cd5ea1fbf6ccba7
+
+diff --git a/bin/geninfo b/bin/geninfo
+index f41eaec..0276666 100755
+--- a/bin/geninfo
++++ b/bin/geninfo
+@@ -54,6 +54,8 @@ use warnings;
+ use File::Basename;
+ use File::Spec::Functions qw /abs2rel catdir file_name_is_absolute splitdir
+ splitpath catpath/;
++use File::Temp qw(tempfile tempdir);
++use File::Copy qw(copy);
+ use Getopt::Long;
+ use Digest::MD5 qw(md5_base64);
+ use Cwd qw/abs_path/;
+@@ -163,13 +165,13 @@ sub solve_relative_path($$);
+ sub read_gcov_header($);
+ sub read_gcov_file($);
+ sub info(@);
++sub process_intermediate($$$);
+ sub map_llvm_version($);
+ sub version_to_str($);
+ sub get_gcov_version();
+ sub system_no_output($@);
+ sub read_config($);
+ sub apply_config($);
+-sub get_exclusion_data($);
+ sub apply_exclusion_data($$);
+ sub process_graphfile($$);
+ sub filter_fn_name($);
+@@ -264,6 +266,8 @@ our $gcno_split_crc;
+ our $func_coverage = 1;
+ our $br_coverage = 0;
+ our $rc_auto_base = 1;
++our $rc_intermediate = "auto";
++our $intermediate;
+ our $excl_line = "LCOV_EXCL_LINE";
+ our $excl_br_line = "LCOV_EXCL_BR_LINE";
+
+@@ -331,6 +335,7 @@ if ($config || %opt_rc)
+ "geninfo_compat" => \$opt_compat,
+ "geninfo_adjust_src_path" => \$rc_adjust_src_path,
+ "geninfo_auto_base" => \$rc_auto_base,
++ "geninfo_intermediate" => \$rc_intermediate,
+ "lcov_function_coverage" => \$func_coverage,
+ "lcov_branch_coverage" => \$br_coverage,
+ "lcov_excl_line" => \$excl_line,
+@@ -460,15 +465,38 @@ if (system_no_output(3, $gcov_tool, "--help") == -1)
+ }
+
+ ($gcov_version, $gcov_version_string) = get_gcov_version();
++$gcov_caps = get_gcov_capabilities();
++
++# Determine intermediate mode
++if ($rc_intermediate eq "0") {
++ $intermediate = 0;
++} elsif ($rc_intermediate eq "1") {
++ $intermediate = 1;
++} elsif (lc($rc_intermediate) eq "auto") {
++ # Use intermediate format if supported by gcov
++ $intermediate = $gcov_caps->{'intermediate-format'} ? 1 : 0;
++} else {
++ die("ERROR: invalid value for geninfo_intermediate: ".
++ "'$rc_intermediate'\n");
++}
++
++if ($intermediate) {
++ info("Using intermediate gcov format\n");
++ if ($opt_derive_func_data) {
++ warn("WARNING: --derive-func-data is not compatible with ".
++ "intermediate format - ignoring\n");
++ $opt_derive_func_data = 0;
++ }
++}
+
+ # Determine gcov options
+-$gcov_caps = get_gcov_capabilities();
+ push(@gcov_options, "-b") if ($gcov_caps->{'branch-probabilities'} &&
+ ($br_coverage || $func_coverage));
+ push(@gcov_options, "-c") if ($gcov_caps->{'branch-counts'} &&
+ $br_coverage);
+ push(@gcov_options, "-a") if ($gcov_caps->{'all-blocks'} &&
+- $opt_gcov_all_blocks && $br_coverage);
++ $opt_gcov_all_blocks && $br_coverage &&
++ !$intermediate);
+ if ($gcov_caps->{'hash-filenames'})
+ {
+ push(@gcov_options, "-x");
+@@ -599,7 +627,7 @@ foreach my $entry (@data_directory) {
+ gen_info($entry);
+ }
+
+-if ($initial && $br_coverage) {
++if ($initial && $br_coverage && !$intermediate) {
+ warn("Note: --initial does not generate branch coverage ".
+ "data\n");
+ }
+@@ -768,6 +796,7 @@ sub gen_info($)
+ my $prefix;
+ my $type;
+ my $ext;
++ my $tempdir;
+
+ if ($initial) {
+ $type = "graph";
+@@ -798,16 +827,22 @@ sub gen_info($)
+ $prefix = "";
+ }
+
++ $tempdir = tempdir(CLEANUP => 1);
++
+ # Process all files in list
+ foreach $file (@file_list) {
+ # Process file
+- if ($initial) {
++ if ($intermediate) {
++ process_intermediate($file, $prefix, $tempdir);
++ } elsif ($initial) {
+ process_graphfile($file, $prefix);
+ } else {
+ process_dafile($file, $prefix);
+ }
+ }
+
++ unlink($tempdir);
++
+ # Report whether files were excluded.
+ if (%excluded_files) {
+ info("Excluded data for %d files due to include/exclude options\n",
+@@ -1058,10 +1093,12 @@ sub process_dafile($$)
+
+ # Try to find base directory automatically if requested by user
+ if ($rc_auto_base) {
+- $base_dir = find_base_from_graph($base_dir, $instr, $graph);
++ $base_dir = find_base_from_source($base_dir,
++ [ keys(%{$instr}), keys(%{$graph}) ]);
+ }
+
+- ($instr, $graph) = adjust_graph_filenames($base_dir, $instr, $graph);
++ adjust_source_filenames($instr, $base_dir);
++ adjust_source_filenames($graph, $base_dir);
+
+ # Set $object_dir to real location of object files. This may differ
+ # from $da_dir if the graph file is just a link to the "real" object
+@@ -2017,6 +2054,299 @@ sub read_gcov_file($)
+ }
+
+
++#
++# read_intermediate_text(gcov_filename, data)
++#
++# Read gcov intermediate text format in GCOV_FILENAME and add the resulting
++# data to DATA in the following format:
++#
++# data: source_filename -> file_data
++# file_data: concatenated lines of intermediate text data
++#
++
++sub read_intermediate_text($$)
++{
++ my ($gcov_filename, $data) = @_;
++ my $fd;
++ my $filename;
++
++ open($fd, "<", $gcov_filename) or
++ die("ERROR: Could not read $gcov_filename: $!\n");
++ while (my $line = <$fd>) {
++ if ($line =~ /^file:(.*)$/) {
++ $filename = $1;
++ chomp($filename);
++ } elsif (defined($filename)) {
++ $data->{$filename} .= $line;
++ }
++ }
++ close($fd);
++}
++
++
++#
++# intermediate_text_to_info(fd, data, srcdata)
++#
++# Write DATA in info format to file descriptor FD.
++#
++# data: filename -> file_data:
++# file_data: concatenated lines of intermediate text data
++#
++# srcdata: filename -> [ excl, brexcl, checksums ]
++# excl: lineno -> 1 for all lines for which to exclude all data
++# brexcl: lineno -> 1 for all lines for which to exclude branch data
++# checksums: lineno -> source code checksum
++#
++# Note: To simplify processing, gcov data is not combined here, that is counts
++# that appear multiple times for the same lines/branches are not added.
++# This is done by lcov/genhtml when reading the data files.
++#
++
++sub intermediate_text_to_info($$$)
++{
++ my ($fd, $data, $srcdata) = @_;
++ my $branch_num = 0;
++ my $c;
++
++ return if (!%{$data});
++
++ print($fd "TN:$test_name\n");
++ for my $filename (keys(%{$data})) {
++ my ($excl, $brexcl, $checksums);
++
++ if (defined($srcdata->{$filename})) {
++ ($excl, $brexcl, $checksums) = @{$srcdata->{$filename}};
++ }
++
++ print($fd "SF:$filename\n");
++ for my $line (split(/\n/, $data->{$filename})) {
++ if ($line =~ /^lcount:(\d+),(\d+),?/) {
++ # lcount:<line>,<count>
++ # lcount:<line>,<count>,<has_unexecuted_blocks>
++ if ($checksum && exists($checksums->{$1})) {
++ $c = ",".$checksums->{$1};
++ } else {
++ $c = "";
++ }
++ print($fd "DA:$1,$2$c\n") if (!$excl->{$1});
++
++ # Intermediate text format does not provide
++ # branch numbers, and the same branch may appear
++ # multiple times on the same line (e.g. in
++ # template instances). Synthesize a branch
++ # number based on the assumptions:
++ # a) the order of branches is fixed across
++ # instances
++ # b) an instance starts with an lcount line
++ $branch_num = 0;
++ } elsif ($line =~ /^function:(\d+),(\d+),([^,]+)$/) {
++ next if (!$func_coverage || $excl->{$1});
++
++ # function:<line>,<count>,<name>
++ print($fd "FN:$1,$3\n");
++ print($fd "FNDA:$2,$3\n");
++ } elsif ($line =~ /^function:(\d+),\d+,(\d+),([^,]+)$/) {
++ next if (!$func_coverage || $excl->{$1});
++
++ # function:<start_line>,<end_line>,<count>,
++ # <name>
++ print($fd "FN:$1,$3\n");
++ print($fd "FNDA:$2,$3\n");
++ } elsif ($line =~ /^branch:(\d+),(taken|nottaken|notexec)/) {
++ next if (!$br_coverage || $excl->{$1} ||
++ $brexcl->{$1});
++
++ # branch:<line>,taken|nottaken|notexec
++ if ($2 eq "taken") {
++ $c = 1;
++ } elsif ($2 eq "nottaken") {
++ $c = 0;
++ } else {
++ $c = "-";
++ }
++ print($fd "BRDA:$1,0,$branch_num,$c\n");
++ $branch_num++;
++ }
++ }
++ print($fd "end_of_record\n");
++ }
++}
++
++
++sub get_output_fd($$)
++{
++ my ($outfile, $file) = @_;
++ my $fd;
++
++ if (!defined($outfile)) {
++ open($fd, ">", "$file.info") or
++ die("ERROR: Cannot create file $file.info: $!\n");
++ } elsif ($outfile eq "-") {
++ open($fd, ">&STDOUT") or
++ die("ERROR: Cannot duplicate stdout: $!\n");
++ } else {
++ open($fd, ">>", $outfile) or
++ die("ERROR: Cannot write to file $outfile: $!\n");
++ }
++
++ return $fd;
++}
++
++
++#
++# print_gcov_warnings(stderr_file, is_graph, map)
++#
++# Print GCOV warnings in file STDERR_FILE to STDERR. If IS_GRAPH is non-zero,
++# suppress warnings about missing as these are expected. Replace keys found
++# in MAP with their values.
++#
++
++sub print_gcov_warnings($$$)
++{
++ my ($stderr_file, $is_graph, $map) = @_;
++ my $fd;
++
++ if (!open($fd, "<", $stderr_file)) {
++ warn("WARNING: Could not open GCOV stderr file ".
++ "$stderr_file: $!\n");
++ return;
++ }
++ while (my $line = <$fd>) {
++ next if ($is_graph && $line =~ /cannot open data file/);
++
++ for my $key (keys(%{$map})) {
++ $line =~ s/\Q$key\E/$map->{$key}/g;
++ }
++
++ print(STDERR $line);
++ }
++ close($fd);
++}
++
++
++#
++# process_intermediate(file, dir, tempdir)
++#
++# Create output for a single file (either a data file or a graph file) using
++# gcov's intermediate option.
++#
++
++sub process_intermediate($$$)
++{
++ my ($file, $dir, $tempdir) = @_;
++ my ($fdir, $fbase, $fext);
++ my $data_file;
++ my $errmsg;
++ my %data;
++ my $fd;
++ my $base;
++ my $srcdata;
++ my $is_graph = 0;
++ my ($out, $err, $rc);
++
++ info("Processing %s\n", abs2rel($file, $dir));
++
++ $file = solve_relative_path($cwd, $file);
++ ($fdir, $fbase, $fext) = split_filename($file);
++
++ $is_graph = 1 if (".$fext" eq $graph_file_extension);
++
++ if ($is_graph) {
++ # Process graph file - copy to temp directory to prevent
++ # accidental processing of associated data file
++ $data_file = "$tempdir/$fbase$graph_file_extension";
++ if (!copy($file, $data_file)) {
++ $errmsg = "ERROR: Could not copy file $file";
++ goto err;
++ }
++ } else {
++ # Process data file in place
++ $data_file = $file;
++ }
++
++ # Change directory
++ if (!chdir($tempdir)) {
++ $errmsg = "Could not change to directory $tempdir: $!";
++ goto err;
++ }
++
++ # Run gcov on data file
++ ($out, $err, $rc) = system_no_output(1 + 2 + 4, $gcov_tool,
++ $data_file, @gcov_options, "-i");
++ defined($out) && unlink($out);
++ if (defined($err)) {
++ print_gcov_warnings($err, $is_graph, {
++ $data_file => $file,
++ });
++ unlink($err);
++ }
++ if ($rc) {
++ $errmsg = "GCOV failed for $file";
++ goto err;
++ }
++
++ if ($is_graph) {
++ # Remove graph file copy
++ unlink($data_file);
++ }
++
++ # Parse resulting file(s)
++ for my $gcov_filename (glob("*.gcov")) {
++ read_intermediate_text($gcov_filename, \%data);
++ unlink($gcov_filename);
++ }
++
++ if (!%data) {
++ warn("WARNING: GCOV did not produce any data for $file\n");
++ return;
++ }
++
++ # Determine base directory
++ if (defined($base_directory)) {
++ $base = $base_directory;
++ } else {
++ $base = $fdir;
++
++ if (is_compat($COMPAT_MODE_LIBTOOL)) {
++ # Avoid files from .libs dirs
++ $base =~ s/\.libs$//;
++ }
++
++ # Try to find base directory automatically if requested by user
++ if ($rc_auto_base) {
++ $base = find_base_from_source($base, [ keys(%data) ]);
++ }
++ }
++
++ # Apply base file name to relative source files
++ adjust_source_filenames(\%data, $base);
++
++ # Remove excluded source files
++ filter_source_files(\%data);
++
++ # Get data on exclusion markers and checksums if requested
++ if (!$no_markers || $checksum) {
++ $srcdata = get_all_source_data(keys(%data));
++ }
++
++ # Generate output
++ $fd = get_output_fd($output_filename, $file);
++ intermediate_text_to_info($fd, \%data, $srcdata);
++ close($fd);
++
++ chdir($cwd);
++
++ return;
++
++err:
++ if ($ignore[$ERROR_GCOV]) {
++ warn("WARNING: $errmsg!\n");
++ } else {
++ die("ERROR: $errmsg!\n")
++ }
++}
++
++
+ # Map LLVM versions to the version of GCC gcov which they emulate.
+
+ sub map_llvm_version($)
+@@ -2151,8 +2481,12 @@ sub int_handler()
+ #
+ # MODE & 1: suppress STDOUT
+ # MODE & 2: suppress STDERR
++# MODE & 4: redirect to temporary files instead of suppressing
+ #
+-# Return 0 on success, non-zero otherwise.
++# Return (stdout, stderr, rc):
++# stdout: path to tempfile containing stdout or undef
++# stderr: path to tempfile containing stderr or undef
++# 0 on success, non-zero otherwise
+ #
+
+ sub system_no_output($@)
+@@ -2161,14 +2495,31 @@ sub system_no_output($@)
+ my $result;
+ local *OLD_STDERR;
+ local *OLD_STDOUT;
++ my $stdout_file;
++ my $stderr_file;
++ my $fd;
+
+ # Save old stdout and stderr handles
+ ($mode & 1) && open(OLD_STDOUT, ">>&", "STDOUT");
+ ($mode & 2) && open(OLD_STDERR, ">>&", "STDERR");
+
+- # Redirect to /dev/null
+- ($mode & 1) && open(STDOUT, ">", "/dev/null");
+- ($mode & 2) && open(STDERR, ">", "/dev/null");
++ if ($mode & 4) {
++ # Redirect to temporary files
++ if ($mode & 1) {
++ ($fd, $stdout_file) = tempfile(UNLINK => 1);
++ open(STDOUT, ">", $stdout_file) || warn("$!\n");
++ close($fd);
++ }
++ if ($mode & 2) {
++ ($fd, $stderr_file) = tempfile(UNLINK => 1);
++ open(STDERR, ">", $stderr_file) || warn("$!\n");
++ close($fd);
++ }
++ } else {
++ # Redirect to /dev/null
++ ($mode & 1) && open(STDOUT, ">", "/dev/null");
++ ($mode & 2) && open(STDERR, ">", "/dev/null");
++ }
+
+ debug("system(".join(' ', @_).")\n");
+ system(@_);
+@@ -2181,8 +2532,18 @@ sub system_no_output($@)
+ # Restore old handles
+ ($mode & 1) && open(STDOUT, ">>&", "OLD_STDOUT");
+ ($mode & 2) && open(STDERR, ">>&", "OLD_STDERR");
++
++ # Remove empty output files
++ if (defined($stdout_file) && -z $stdout_file) {
++ unlink($stdout_file);
++ $stdout_file = undef;
++ }
++ if (defined($stderr_file) && -z $stderr_file) {
++ unlink($stderr_file);
++ $stderr_file = undef;
++ }
+
+- return $result;
++ return ($stdout_file, $stderr_file, $result);
+ }
+
+
+@@ -2260,23 +2621,28 @@ sub apply_config($)
+
+
+ #
+-# get_exclusion_data(filename)
++# get_source_data(filename)
+ #
+-# Scan specified source code file for exclusion markers and return
+-# linenumber -> 1
+-# for all lines which should be excluded.
++# Scan specified source code file for exclusion markers and checksums. Return
++# ( excl, brexcl, checksums ) where
++# excl: lineno -> 1 for all lines for which to exclude all data
++# brexcl: lineno -> 1 for all lines for which to exclude branch data
++# checksums: lineno -> source code checksum
+ #
+
+-sub get_exclusion_data($)
++sub get_source_data($)
+ {
+ my ($filename) = @_;
+ my %list;
+ my $flag = 0;
++ my %brdata;
++ my $brflag = 0;
++ my %checksums;
+ local *HANDLE;
+
+ if (!open(HANDLE, "<", $filename)) {
+ warn("WARNING: could not open $filename\n");
+- return undef;
++ return;
+ }
+ while (<HANDLE>) {
+ if (/$EXCL_STOP/) {
+@@ -2287,14 +2653,62 @@ sub get_exclusion_data($)
+ if (/$excl_line/ || $flag) {
+ $list{$.} = 1;
+ }
++ if (/$EXCL_BR_STOP/) {
++ $brflag = 0;
++ } elsif (/$EXCL_BR_START/) {
++ $brflag = 1;
++ }
++ if (/$excl_br_line/ || $brflag) {
++ $brdata{$.} = 1;
++ }
++ if ($checksum) {
++ chomp();
++ $checksums{$.} = md5_base64($_);
++ }
+ }
+ close(HANDLE);
+
+- if ($flag) {
++ if ($flag || $brflag) {
+ warn("WARNING: unterminated exclusion section in $filename\n");
+ }
+
+- return \%list;
++ return (\%list, \%brdata, \%checksums);
++}
++
++
++#
++# get_all_source_data(filenames)
++#
++# Scan specified source code files for exclusion markers and return
++# filename -> [ excl, brexcl, checksums ]
++# excl: lineno -> 1 for all lines for which to exclude all data
++# brexcl: lineno -> 1 for all lines for which to exclude branch data
++# checksums: lineno -> source code checksum
++#
++
++sub get_all_source_data(@)
++{
++ my @filenames = @_;
++ my %data;
++ my $failed = 0;
++
++ for my $filename (@filenames) {
++ my @d;
++ next if (exists($data{$filename}));
++
++ @d = get_source_data($filename);
++ if (@d) {
++ $data{$filename} = [ @d ];
++ } else {
++ $failed = 1;
++ }
++ }
++
++ if ($failed) {
++ warn("WARNING: some exclusion markers may be ignored\n");
++ }
++
++ return \%data;
+ }
+
+
+@@ -2318,35 +2732,17 @@ sub apply_exclusion_data($$)
+ {
+ my ($instr, $graph) = @_;
+ my $filename;
+- my %excl_data;
+- my $excl_read_failed = 0;
++ my $excl_data;
+
+- # Collect exclusion marker data
+- foreach $filename (sort_uniq_lex(keys(%{$graph}), keys(%{$instr}))) {
+- my $excl = get_exclusion_data($filename);
+-
+- # Skip and note if file could not be read
+- if (!defined($excl)) {
+- $excl_read_failed = 1;
+- next;
+- }
+-
+- # Add to collection if there are markers
+- $excl_data{$filename} = $excl if (keys(%{$excl}) > 0);
+- }
+-
+- # Warn if not all source files could be read
+- if ($excl_read_failed) {
+- warn("WARNING: some exclusion markers may be ignored\n");
+- }
++ ($excl_data) = get_all_source_data(keys(%{$graph}), keys(%{$instr}));
+
+ # Skip if no markers were found
+- return ($instr, $graph) if (keys(%excl_data) == 0);
++ return ($instr, $graph) if (!%$excl_data);
+
+ # Apply exclusion marker data to graph
+- foreach $filename (keys(%excl_data)) {
++ foreach $filename (keys(%$excl_data)) {
+ my $function_data = $graph->{$filename};
+- my $excl = $excl_data{$filename};
++ my $excl = $excl_data->{$filename}->[0];
+ my $function;
+
+ next if (!defined($function_data));
+@@ -2384,9 +2780,9 @@ sub apply_exclusion_data($$)
+ }
+
+ # Apply exclusion marker data to instr
+- foreach $filename (keys(%excl_data)) {
++ foreach $filename (keys(%$excl_data)) {
+ my $line_data = $instr->{$filename};
+- my $excl = $excl_data{$filename};
++ my $excl = $excl_data->{$filename}->[0];
+ my $line;
+ my @new_data;
+
+@@ -2468,10 +2864,12 @@ sub process_graphfile($$)
+
+ # Try to find base directory automatically if requested by user
+ if ($rc_auto_base) {
+- $base_dir = find_base_from_graph($base_dir, $instr, $graph);
++ $base_dir = find_base_from_source($base_dir,
++ [ keys(%{$instr}), keys(%{$graph}) ]);
+ }
+
+- ($instr, $graph) = adjust_graph_filenames($base_dir, $instr, $graph);
++ adjust_source_filenames($instr, $base_dir);
++ adjust_source_filenames($graph, $base_dir);
+
+ if (!$no_markers) {
+ # Apply exclusion marker data to graph file data
+@@ -2767,11 +3165,11 @@ sub parent_dir($)
+ }
+
+ #
+-# find_base_from_graph(base_dir, instr, graph)
++# find_base_from_source(base_dir, source_files)
+ #
+-# Try to determine the base directory of the graph file specified by INSTR
+-# and GRAPH. The base directory is the base for all relative filenames in
+-# the graph file. It is defined by the current working directory at time
++# Try to determine the base directory of the object file built from
++# SOURCE_FILES. The base directory is the base for all relative filenames in
++# the gcov data. It is defined by the current working directory at time
+ # of compiling the source file.
+ #
+ # This function implements a heuristic which relies on the following
+@@ -2781,16 +3179,16 @@ sub parent_dir($)
+ # - files by the same name are not present in multiple parent directories
+ #
+
+-sub find_base_from_graph($$$)
++sub find_base_from_source($$)
+ {
+- my ($base_dir, $instr, $graph) = @_;
++ my ($base_dir, $source_files) = @_;
+ my $old_base;
+ my $best_miss;
+ my $best_base;
+ my %rel_files;
+
+ # Determine list of relative paths
+- foreach my $filename (keys(%{$instr}), keys(%{$graph})) {
++ foreach my $filename (@$source_files) {
+ next if (file_name_is_absolute($filename));
+
+ $rel_files{$filename} = 1;
+@@ -2829,17 +3227,17 @@ sub find_base_from_graph($$$)
+ }
+
+ #
+-# adjust_graph_filenames(base_dir, instr, graph)
++# adjust_source_filenames(hash, base_dir)
+ #
+-# Make relative paths in INSTR and GRAPH absolute and apply
+-# geninfo_adjust_src_path setting to graph file data.
++# Transform all keys of HASH to absolute form and apply requested
++# transformations.
+ #
+
+-sub adjust_graph_filenames($$$)
++sub adjust_source_filenames($$$)
+ {
+- my ($base_dir, $instr, $graph) = @_;
++ my ($hash, $base_dir) = @_;
+
+- foreach my $filename (keys(%{$instr})) {
++ foreach my $filename (keys(%{$hash})) {
+ my $old_filename = $filename;
+
+ # Convert to absolute canonical form
+@@ -2851,28 +3249,50 @@ sub adjust_graph_filenames($$$)
+ }
+
+ if ($filename ne $old_filename) {
+- $instr->{$filename} = delete($instr->{$old_filename});
++ $hash->{$filename} = delete($hash->{$old_filename});
+ }
+ }
++}
+
+- foreach my $filename (keys(%{$graph})) {
+- my $old_filename = $filename;
+
+- # Make absolute
+- # Convert to absolute canonical form
+- $filename = solve_relative_path($base_dir, $filename);
++#
++# filter_source_files(hash)
++#
++# Remove unwanted source file data from HASH.
++#
+
+- # Apply adjustment
+- if (defined($adjust_src_pattern)) {
+- $filename =~ s/$adjust_src_pattern/$adjust_src_replace/g;
++sub filter_source_files($)
++{
++ my ($hash) = @_;
++
++ foreach my $filename (keys(%{$hash})) {
++ # Skip external files if requested
++ goto del if (!$opt_external && is_external($filename));
++
++ # Apply include patterns
++ if (@include_patterns) {
++ my $keep;
++
++ foreach my $pattern (@include_patterns) {
++ if ($filename =~ (/^$pattern$/)) {
++ $keep = 1;
++ last;
++ }
++ }
++ goto del if (!$keep);
+ }
+
+- if ($filename ne $old_filename) {
+- $graph->{$filename} = delete($graph->{$old_filename});
++ # Apply exclude patterns
++ foreach my $pattern (@exclude_patterns) {
++ goto del if ($filename =~ (/^$pattern$/));
+ }
+- }
++ next;
+
+- return ($instr, $graph);
++del:
++ # Remove file data
++ delete($hash->{$filename});
++ $excluded_files{$filename} = 1;
++ }
+ }
+
+ #
+@@ -3784,6 +4204,7 @@ sub get_gcov_capabilities()
+ 'c' => 'branch-counts',
+ 'f' => 'function-summaries',
+ 'h' => 'help',
++ 'i' => 'intermediate-format',
+ 'l' => 'long-file-names',
+ 'n' => 'no-output',
+ 'o' => 'object-directory',
+diff --git a/lcovrc b/lcovrc
+index 40f364f..bd4bc3b 100644
+--- a/lcovrc
++++ b/lcovrc
+@@ -134,6 +134,9 @@ genhtml_desc_html=0
+ # when collecting coverage data.
+ geninfo_auto_base = 1
+
++# Use gcov intermediate format? Valid values are 0, 1, auto
++geninfo_intermediate = auto
++
+ # Directory containing gcov kernel files
+ # lcov_gcov_dir = /proc/gcov
+
+diff --git a/man/lcovrc.5 b/man/lcovrc.5
+index f20d273..bf0ce7a 100644
+--- a/man/lcovrc.5
++++ b/man/lcovrc.5
+@@ -223,6 +223,11 @@ geninfo_compat_libtool = 0
+ geninfo_auto_base = 1
+ .br
+
++# Use gcov intermediate format? Valid values are 0, 1, auto
++.br
++geninfo_intermediate = auto
++.br
++
+ # Directory containing gcov kernel files
+ .br
+ lcov_gcov_dir = /proc/gcov
+@@ -789,6 +794,25 @@ located, and in addition, is different between files of the same project.
+ Default is 1.
+ .PP
+
++.BR geninfo_intermediate " ="
++.IR 0 | 1 | auto
++.IP
++Specify whether to use gcov intermediate format
++.br
++
++Use this option to control whether geninfo should use the gcov intermediate
++format while collecting coverage data. The use of the gcov intermediate format
++should increase processing speed. It also provides branch coverage data when
++using the \-\-initial command line option.
++.br
++
++Valid values are 0 for off, 1 for on, and "auto" to let geninfo automatically
++use immediate format when supported by gcov.
++.br
++
++Default is "auto".
++.PP
++
+ .BR lcov_gcov_dir " ="
+ .I path_to_kernel_coverage_data
+ .IP
+--
+2.17.1
+
diff --git a/meta-openembedded/meta-oe/recipes-support/lcov/files/0002-geninfo-Add-intermediate-JSON-format-support.patch b/meta-openembedded/meta-oe/recipes-support/lcov/files/0002-geninfo-Add-intermediate-JSON-format-support.patch
new file mode 100644
index 0000000..7b18063
--- /dev/null
+++ b/meta-openembedded/meta-oe/recipes-support/lcov/files/0002-geninfo-Add-intermediate-JSON-format-support.patch
@@ -0,0 +1,247 @@
+From e13b2b6f8443da660cafa0679c3b16240843ce9f Mon Sep 17 00:00:00 2001
+From: Peter Oberparleiter <oberpar@linux.ibm.com>
+Date: Fri, 24 May 2019 17:16:56 +0200
+Subject: [PATCH 2/2] geninfo: Add intermediate JSON format support
+
+This change adds support for parsing the output of gcov's intermediate
+JSON file format as implemented by GCC version 9.
+
+Note: The way that the intermediate file format support is implemented
+in geninfo removes the need to parse .gcno files directly. Since geninfo
+does not include support for parsing GCC 9 .gcno files, using the
+intermediate format is the only option for geninfo to collect coverage
+data generated by GCC version 9.
+
+Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+---
+ bin/geninfo | 162 +++++++++++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 160 insertions(+), 2 deletions(-)
+
+Upstream-Status: Backport
+Download URL: https://github.com/linux-test-project/lcov/commit/75fbae1cfc5027f818a0bb865bf6f96fab3202da
+
+diff --git a/bin/geninfo b/bin/geninfo
+index 0276666..cceb782 100755
+--- a/bin/geninfo
++++ b/bin/geninfo
+@@ -59,6 +59,9 @@ use File::Copy qw(copy);
+ use Getopt::Long;
+ use Digest::MD5 qw(md5_base64);
+ use Cwd qw/abs_path/;
++use PerlIO::gzip;
++use JSON qw(decode_json);
++
+ if( $^O eq "msys" )
+ {
+ require File::Spec::Win32;
+@@ -474,7 +477,8 @@ if ($rc_intermediate eq "0") {
+ $intermediate = 1;
+ } elsif (lc($rc_intermediate) eq "auto") {
+ # Use intermediate format if supported by gcov
+- $intermediate = $gcov_caps->{'intermediate-format'} ? 1 : 0;
++ $intermediate = ($gcov_caps->{'intermediate-format'} ||
++ $gcov_caps->{'json-format'}) ? 1 : 0;
+ } else {
+ die("ERROR: invalid value for geninfo_intermediate: ".
+ "'$rc_intermediate'\n");
+@@ -2084,6 +2088,48 @@ sub read_intermediate_text($$)
+ }
+
+
++#
++# read_intermediate_json(gcov_filename, data, basedir_ref)
++#
++# Read gcov intermediate JSON format in GCOV_FILENAME and add the resulting
++# data to DATA in the following format:
++#
++# data: source_filename -> file_data
++# file_data: GCOV JSON data for file
++#
++# Also store the value for current_working_directory to BASEDIR_REF.
++#
++
++sub read_intermediate_json($$$)
++{
++ my ($gcov_filename, $data, $basedir_ref) = @_;
++ my $fd;
++ my $text;
++ my $json;
++
++ open($fd, "<:gzip", $gcov_filename) or
++ die("ERROR: Could not read $gcov_filename: $!\n");
++ local $/;
++ $text = <$fd>;
++ close($fd);
++
++ $json = decode_json($text);
++ if (!defined($json) || !exists($json->{"files"}) ||
++ ref($json->{"files"} ne "ARRAY")) {
++ die("ERROR: Unrecognized JSON output format in ".
++ "$gcov_filename\n");
++ }
++
++ $$basedir_ref = $json->{"current_working_directory"};
++
++ for my $file (@{$json->{"files"}}) {
++ my $filename = $file->{"file"};
++
++ $data->{$filename} = $file;
++ }
++}
++
++
+ #
+ # intermediate_text_to_info(fd, data, srcdata)
+ #
+@@ -2173,6 +2219,104 @@ sub intermediate_text_to_info($$$)
+ }
+
+
++#
++# intermediate_json_to_info(fd, data, srcdata)
++#
++# Write DATA in info format to file descriptor FD.
++#
++# data: filename -> file_data:
++# file_data: GCOV JSON data for file
++#
++# srcdata: filename -> [ excl, brexcl, checksums ]
++# excl: lineno -> 1 for all lines for which to exclude all data
++# brexcl: lineno -> 1 for all lines for which to exclude branch data
++# checksums: lineno -> source code checksum
++#
++# Note: To simplify processing, gcov data is not combined here, that is counts
++# that appear multiple times for the same lines/branches are not added.
++# This is done by lcov/genhtml when reading the data files.
++#
++
++sub intermediate_json_to_info($$$)
++{
++ my ($fd, $data, $srcdata) = @_;
++ my $branch_num = 0;
++
++ return if (!%{$data});
++
++ print($fd "TN:$test_name\n");
++ for my $filename (keys(%{$data})) {
++ my ($excl, $brexcl, $checksums);
++ my $file_data = $data->{$filename};
++
++ if (defined($srcdata->{$filename})) {
++ ($excl, $brexcl, $checksums) = @{$srcdata->{$filename}};
++ }
++
++ print($fd "SF:$filename\n");
++
++ # Function data
++ if ($func_coverage) {
++ for my $d (@{$file_data->{"functions"}}) {
++ my $line = $d->{"start_line"};
++ my $count = $d->{"execution_count"};
++ my $name = $d->{"name"};
++
++ next if (!defined($line) || !defined($count) ||
++ !defined($name) || $excl->{$line});
++
++ print($fd "FN:$line,$name\n");
++ print($fd "FNDA:$count,$name\n");
++ }
++ }
++
++ # Line data
++ for my $d (@{$file_data->{"lines"}}) {
++ my $line = $d->{"line_number"};
++ my $count = $d->{"count"};
++ my $c;
++ my $branches = $d->{"branches"};
++ my $unexec = $d->{"unexecuted_block"};
++
++ next if (!defined($line) || !defined($count) ||
++ $excl->{$line});
++
++ if (defined($unexec) && $unexec && $count == 0) {
++ $unexec = 1;
++ } else {
++ $unexec = 0;
++ }
++
++ if ($checksum && exists($checksums->{$line})) {
++ $c = ",".$checksums->{$line};
++ } else {
++ $c = "";
++ }
++ print($fd "DA:$line,$count$c\n");
++
++ $branch_num = 0;
++ # Branch data
++ if ($br_coverage && !$brexcl->{$line}) {
++ for my $b (@$branches) {
++ my $brcount = $b->{"count"};
++
++ if (!defined($brcount) || $unexec) {
++ $brcount = "-";
++ }
++ print($fd "BRDA:$line,0,$branch_num,".
++ "$brcount\n");
++
++ $branch_num++;
++ }
++ }
++
++ }
++
++ print($fd "end_of_record\n");
++ }
++}
++
++
+ sub get_output_fd($$)
+ {
+ my ($outfile, $file) = @_;
+@@ -2243,6 +2387,8 @@ sub process_intermediate($$$)
+ my $srcdata;
+ my $is_graph = 0;
+ my ($out, $err, $rc);
++ my $json_basedir;
++ my $json_format;
+
+ info("Processing %s\n", abs2rel($file, $dir));
+
+@@ -2296,6 +2442,12 @@ sub process_intermediate($$$)
+ unlink($gcov_filename);
+ }
+
++ for my $gcov_filename (glob("*.gcov.json.gz")) {
++ read_intermediate_json($gcov_filename, \%data, \$json_basedir);
++ unlink($gcov_filename);
++ $json_format = 1;
++ }
++
+ if (!%data) {
+ warn("WARNING: GCOV did not produce any data for $file\n");
+ return;
+@@ -2304,6 +2456,8 @@ sub process_intermediate($$$)
+ # Determine base directory
+ if (defined($base_directory)) {
+ $base = $base_directory;
++ } elsif (defined($json_basedir)) {
++ $base = $json_basedir;
+ } else {
+ $base = $fdir;
+
+@@ -2331,7 +2485,11 @@ sub process_intermediate($$$)
+
+ # Generate output
+ $fd = get_output_fd($output_filename, $file);
+- intermediate_text_to_info($fd, \%data, $srcdata);
++ if ($json_format) {
++ intermediate_json_to_info($fd, \%data, $srcdata);
++ } else {
++ intermediate_text_to_info($fd, \%data, $srcdata);
++ }
+ close($fd);
+
+ chdir($cwd);
+--
+2.17.1
+
diff --git a/meta-openembedded/meta-oe/recipes-support/lcov/lcov_1.14.bb b/meta-openembedded/meta-oe/recipes-support/lcov/lcov_1.14.bb
index 26e797a..1471818 100755
--- a/meta-openembedded/meta-oe/recipes-support/lcov/lcov_1.14.bb
+++ b/meta-openembedded/meta-oe/recipes-support/lcov/lcov_1.14.bb
@@ -10,6 +10,9 @@
RDEPENDS_${PN} += " \
gcov \
+ gcov-symlinks \
+ libjson-perl \
+ libperlio-gzip-perl \
perl \
perl-module-filehandle \
perl-module-getopt-std \
@@ -19,6 +22,7 @@
perl-module-cwd \
perl-module-errno \
perl-module-file-basename \
+ perl-module-file-copy \
perl-module-file-find \
perl-module-file-path \
perl-module-file-spec \
@@ -42,7 +46,11 @@
perl-module-tie-hash \
"
-SRC_URI = "http://downloads.sourceforge.net/ltp/${BP}.tar.gz"
+SRC_URI = " \
+ http://downloads.sourceforge.net/ltp/${BP}.tar.gz \
+ file://0001-geninfo-Add-intermediate-text-format-support.patch \
+ file://0002-geninfo-Add-intermediate-JSON-format-support.patch \
+ "
SRC_URI[md5sum] = "0220d01753469f83921f8f41ae5054c1"
SRC_URI[sha256sum] = "14995699187440e0ae4da57fe3a64adc0a3c5cf14feab971f8db38fb7d8f071a"
@@ -51,3 +59,4 @@
oe_runmake install PREFIX=${D}${prefix} CFG_DIR=${D}${sysconfdir}
}
+BBCLASSEXTEND = "native nativesdk"