IAP GITLAB

Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • AirShowerPhysics/corsika
  • rulrich/corsika
  • AAAlvesJr/corsika
  • Andre/corsika
  • arrabito/corsika
  • Nikos/corsika
  • olheiser73/corsika
  • AirShowerPhysics/papers/corsika
  • pranav/corsika
9 results
Show changes
#!/usr/bin/env perl
#
# Copyright (c) International Business Machines Corp., 2002,2012
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# lcov
#
# This is a wrapper script which provides a single interface for accessing
# LCOV coverage data.
#
#
# History:
# 2002-08-29 created by Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
# IBM Lab Boeblingen
# 2002-09-05 / Peter Oberparleiter: implemented --kernel-directory +
# multiple directories
# 2002-10-16 / Peter Oberparleiter: implemented --add-tracefile option
# 2002-10-17 / Peter Oberparleiter: implemented --extract option
# 2002-11-04 / Peter Oberparleiter: implemented --list option
# 2003-03-07 / Paul Larson: Changed to make it work with the latest gcov
# kernel patch. This will break it with older gcov-kernel
# patches unless you change the value of $gcovmod in this script
# 2003-04-07 / Peter Oberparleiter: fixed bug which resulted in an error
# when trying to combine .info files containing data without
# a test name
# 2003-04-10 / Peter Oberparleiter: extended Paul's change so that LCOV
# works both with the new and the old gcov-kernel patch
# 2003-04-10 / Peter Oberparleiter: added $gcov_dir constant in anticipation
# of a possible move of the gcov kernel directory to another
# file system in a future version of the gcov-kernel patch
# 2003-04-15 / Paul Larson: make info write to STDERR, not STDOUT
# 2003-04-15 / Paul Larson: added --remove option
# 2003-04-30 / Peter Oberparleiter: renamed --reset to --zerocounters
# to remove naming ambiguity with --remove
# 2003-04-30 / Peter Oberparleiter: adjusted help text to include --remove
# 2003-06-27 / Peter Oberparleiter: implemented --diff
# 2003-07-03 / Peter Oberparleiter: added line checksum support, added
# --no-checksum
# 2003-12-11 / Laurent Deniel: added --follow option
# 2004-03-29 / Peter Oberparleiter: modified --diff option to better cope with
# ambiguous patch file entries, modified --capture option to use
# modprobe before insmod (needed for 2.6)
# 2004-03-30 / Peter Oberparleiter: added --path option
# 2004-08-09 / Peter Oberparleiter: added configuration file support
# 2008-08-13 / Peter Oberparleiter: added function coverage support
#
use strict;
use warnings;
use File::Basename;
use File::Path;
use File::Find;
use File::Temp qw /tempdir/;
use File::Spec::Functions qw /abs2rel canonpath catdir catfile catpath
file_name_is_absolute rootdir splitdir splitpath/;
use Getopt::Long;
use Cwd qw /abs_path getcwd/;
# Global constants
our $tool_dir = abs_path(dirname($0));
our $lcov_version = "LCOV version 1.14";
our $lcov_url = "http://ltp.sourceforge.net/coverage/lcov.php";
our $tool_name = basename($0);
# Directory containing gcov kernel files
our $gcov_dir;
# Where to create temporary directories
our $tmp_dir;
# Internal constants
our $GKV_PROC = 0; # gcov-kernel data in /proc via external patch
our $GKV_SYS = 1; # gcov-kernel data in /sys via vanilla 2.6.31+
our @GKV_NAME = ( "external", "upstream" );
our $pkg_gkv_file = ".gcov_kernel_version";
our $pkg_build_file = ".build_directory";
# Branch data combination types
our $BR_SUB = 0;
our $BR_ADD = 1;
# Prototypes
sub print_usage(*);
sub check_options();
sub userspace_reset();
sub userspace_capture();
sub kernel_reset();
sub kernel_capture();
sub kernel_capture_initial();
sub package_capture();
sub add_traces();
sub read_info_file($);
sub get_info_entry($);
sub set_info_entry($$$$$$$$$;$$$$$$);
sub add_counts($$);
sub merge_checksums($$$);
sub combine_info_entries($$$);
sub combine_info_files($$);
sub write_info_file(*$);
sub extract();
sub remove();
sub list();
sub get_common_filename($$);
sub read_diff($);
sub diff();
sub system_no_output($@);
sub read_config($);
sub apply_config($);
sub info(@);
sub create_temp_dir();
sub transform_pattern($);
sub warn_handler($);
sub die_handler($);
sub abort_handler($);
sub temp_cleanup();
sub setup_gkv();
sub get_overall_line($$$$);
sub print_overall_rate($$$$$$$$$);
sub lcov_geninfo(@);
sub create_package($$$;$);
sub get_func_found_and_hit($);
sub summary();
sub rate($$;$$$);
# Global variables & initialization
our @directory; # Specifies where to get coverage data from
our @kernel_directory; # If set, captures only from specified kernel subdirs
our @add_tracefile; # If set, reads in and combines all files in list
our $list; # If set, list contents of tracefile
our $extract; # If set, extracts parts of tracefile
our $remove; # If set, removes parts of tracefile
our $diff; # If set, modifies tracefile according to diff
our $reset; # If set, reset all coverage data to zero
our $capture; # If set, capture data
our $output_filename; # Name for file to write coverage data to
our $test_name = ""; # Test case name
our $quiet = ""; # If set, suppress information messages
our $help; # Help option flag
our $version; # Version option flag
our $convert_filenames; # If set, convert filenames when applying diff
our $strip; # If set, strip leading directories when applying diff
our $temp_dir_name; # Name of temporary directory
our $cwd = `pwd`; # Current working directory
our $data_stdout; # If set, indicates that data is written to stdout
our $follow; # If set, indicates that find shall follow links
our $diff_path = ""; # Path removed from tracefile when applying diff
our $base_directory; # Base directory (cwd of gcc during compilation)
our $checksum; # If set, calculate a checksum for each line
our $no_checksum; # If set, don't calculate a checksum for each line
our $compat_libtool; # If set, indicates that libtool mode is to be enabled
our $no_compat_libtool; # If set, indicates that libtool mode is to be disabled
our $gcov_tool;
our @opt_ignore_errors;
our $initial;
our @include_patterns; # List of source file patterns to include
our @exclude_patterns; # List of source file patterns to exclude
our $no_recursion = 0;
our $to_package;
our $from_package;
our $maxdepth;
our $no_markers;
our $config; # Configuration file contents
chomp($cwd);
our @temp_dirs;
our $gcov_gkv; # gcov kernel support version found on machine
our $opt_derive_func_data;
our $opt_debug;
our $opt_list_full_path;
our $opt_no_list_full_path;
our $opt_list_width = 80;
our $opt_list_truncate_max = 20;
our $opt_external;
our $opt_no_external;
our $opt_config_file;
our %opt_rc;
our @opt_summary;
our $opt_compat;
our $ln_overall_found;
our $ln_overall_hit;
our $fn_overall_found;
our $fn_overall_hit;
our $br_overall_found;
our $br_overall_hit;
our $func_coverage = 1;
our $br_coverage = 0;
#
# Code entry point
#
$SIG{__WARN__} = \&warn_handler;
$SIG{__DIE__} = \&die_handler;
$SIG{'INT'} = \&abort_handler;
$SIG{'QUIT'} = \&abort_handler;
# Check command line for a configuration file name
Getopt::Long::Configure("pass_through", "no_auto_abbrev");
GetOptions("config-file=s" => \$opt_config_file,
"rc=s%" => \%opt_rc);
Getopt::Long::Configure("default");
{
# Remove spaces around rc options
my %new_opt_rc;
while (my ($key, $value) = each(%opt_rc)) {
$key =~ s/^\s+|\s+$//g;
$value =~ s/^\s+|\s+$//g;
$new_opt_rc{$key} = $value;
}
%opt_rc = %new_opt_rc;
}
# Read configuration file if available
if (defined($opt_config_file)) {
$config = read_config($opt_config_file);
} elsif (defined($ENV{"HOME"}) && (-r $ENV{"HOME"}."/.lcovrc"))
{
$config = read_config($ENV{"HOME"}."/.lcovrc");
}
elsif (-r "/etc/lcovrc")
{
$config = read_config("/etc/lcovrc");
} elsif (-r "/usr/local/etc/lcovrc")
{
$config = read_config("/usr/local/etc/lcovrc");
}
if ($config || %opt_rc)
{
# Copy configuration file and --rc values to variables
apply_config({
"lcov_gcov_dir" => \$gcov_dir,
"lcov_tmp_dir" => \$tmp_dir,
"lcov_list_full_path" => \$opt_list_full_path,
"lcov_list_width" => \$opt_list_width,
"lcov_list_truncate_max"=> \$opt_list_truncate_max,
"lcov_branch_coverage" => \$br_coverage,
"lcov_function_coverage"=> \$func_coverage,
});
}
# Parse command line options
if (!GetOptions("directory|d|di=s" => \@directory,
"add-tracefile|a=s" => \@add_tracefile,
"list|l=s" => \$list,
"kernel-directory|k=s" => \@kernel_directory,
"extract|e=s" => \$extract,
"remove|r=s" => \$remove,
"diff=s" => \$diff,
"convert-filenames" => \$convert_filenames,
"strip=i" => \$strip,
"capture|c" => \$capture,
"output-file|o=s" => \$output_filename,
"test-name|t=s" => \$test_name,
"zerocounters|z" => \$reset,
"quiet|q" => \$quiet,
"help|h|?" => \$help,
"version|v" => \$version,
"follow|f" => \$follow,
"path=s" => \$diff_path,
"base-directory|b=s" => \$base_directory,
"checksum" => \$checksum,
"no-checksum" => \$no_checksum,
"compat-libtool" => \$compat_libtool,
"no-compat-libtool" => \$no_compat_libtool,
"gcov-tool=s" => \$gcov_tool,
"ignore-errors=s" => \@opt_ignore_errors,
"initial|i" => \$initial,
"include=s" => \@include_patterns,
"exclude=s" => \@exclude_patterns,
"no-recursion" => \$no_recursion,
"to-package=s" => \$to_package,
"from-package=s" => \$from_package,
"no-markers" => \$no_markers,
"derive-func-data" => \$opt_derive_func_data,
"debug" => \$opt_debug,
"list-full-path" => \$opt_list_full_path,
"no-list-full-path" => \$opt_no_list_full_path,
"external" => \$opt_external,
"no-external" => \$opt_no_external,
"summary=s" => \@opt_summary,
"compat=s" => \$opt_compat,
"config-file=s" => \$opt_config_file,
"rc=s%" => \%opt_rc,
))
{
print(STDERR "Use $tool_name --help to get usage information\n");
exit(1);
}
else
{
# Merge options
if (defined($no_checksum))
{
$checksum = ($no_checksum ? 0 : 1);
$no_checksum = undef;
}
if (defined($no_compat_libtool))
{
$compat_libtool = ($no_compat_libtool ? 0 : 1);
$no_compat_libtool = undef;
}
if (defined($opt_no_list_full_path))
{
$opt_list_full_path = ($opt_no_list_full_path ? 0 : 1);
$opt_no_list_full_path = undef;
}
if (defined($opt_no_external)) {
$opt_external = 0;
$opt_no_external = undef;
}
}
# Check for help option
if ($help)
{
print_usage(*STDOUT);
exit(0);
}
# Check for version option
if ($version)
{
print("$tool_name: $lcov_version\n");
exit(0);
}
# Check list width option
if ($opt_list_width <= 40) {
die("ERROR: lcov_list_width parameter out of range (needs to be ".
"larger than 40)\n");
}
# Normalize --path text
$diff_path =~ s/\/$//;
if ($follow)
{
$follow = "-follow";
}
else
{
$follow = "";
}
if ($no_recursion)
{
$maxdepth = "-maxdepth 1";
}
else
{
$maxdepth = "";
}
# Check for valid options
check_options();
# Only --extract, --remove and --diff allow unnamed parameters
if (@ARGV && !($extract || $remove || $diff || @opt_summary))
{
die("Extra parameter found: '".join(" ", @ARGV)."'\n".
"Use $tool_name --help to get usage information\n");
}
# Check for output filename
$data_stdout = !($output_filename && ($output_filename ne "-"));
if ($capture)
{
if ($data_stdout)
{
# Option that tells geninfo to write to stdout
$output_filename = "-";
}
}
# Determine kernel directory for gcov data
if (!$from_package && !@directory && ($capture || $reset)) {
($gcov_gkv, $gcov_dir) = setup_gkv();
}
# Check for requested functionality
if ($reset)
{
$data_stdout = 0;
# Differentiate between user space and kernel reset
if (@directory)
{
userspace_reset();
}
else
{
kernel_reset();
}
}
elsif ($capture)
{
# Capture source can be user space, kernel or package
if ($from_package) {
package_capture();
} elsif (@directory) {
userspace_capture();
} else {
if ($initial) {
if (defined($to_package)) {
die("ERROR: --initial cannot be used together ".
"with --to-package\n");
}
kernel_capture_initial();
} else {
kernel_capture();
}
}
}
elsif (@add_tracefile)
{
($ln_overall_found, $ln_overall_hit,
$fn_overall_found, $fn_overall_hit,
$br_overall_found, $br_overall_hit) = add_traces();
}
elsif ($remove)
{
($ln_overall_found, $ln_overall_hit,
$fn_overall_found, $fn_overall_hit,
$br_overall_found, $br_overall_hit) = remove();
}
elsif ($extract)
{
($ln_overall_found, $ln_overall_hit,
$fn_overall_found, $fn_overall_hit,
$br_overall_found, $br_overall_hit) = extract();
}
elsif ($list)
{
$data_stdout = 0;
list();
}
elsif ($diff)
{
if (scalar(@ARGV) != 1)
{
die("ERROR: option --diff requires one additional argument!\n".
"Use $tool_name --help to get usage information\n");
}
($ln_overall_found, $ln_overall_hit,
$fn_overall_found, $fn_overall_hit,
$br_overall_found, $br_overall_hit) = diff();
}
elsif (@opt_summary)
{
$data_stdout = 0;
($ln_overall_found, $ln_overall_hit,
$fn_overall_found, $fn_overall_hit,
$br_overall_found, $br_overall_hit) = summary();
}
temp_cleanup();
if (defined($ln_overall_found)) {
print_overall_rate(1, $ln_overall_found, $ln_overall_hit,
1, $fn_overall_found, $fn_overall_hit,
1, $br_overall_found, $br_overall_hit);
} else {
info("Done.\n") if (!$list && !$capture);
}
exit(0);
#
# print_usage(handle)
#
# Print usage information.
#
sub print_usage(*)
{
local *HANDLE = $_[0];
print(HANDLE <<END_OF_USAGE);
Usage: $tool_name [OPTIONS]
Use lcov to collect coverage data from either the currently running Linux
kernel or from a user space application. Specify the --directory option to
get coverage data for a user space program.
Misc:
-h, --help Print this help, then exit
-v, --version Print version number, then exit
-q, --quiet Do not print progress messages
Operation:
-z, --zerocounters Reset all execution counts to zero
-c, --capture Capture coverage data
-a, --add-tracefile FILE Add contents of tracefiles
-e, --extract FILE PATTERN Extract files matching PATTERN from FILE
-r, --remove FILE PATTERN Remove files matching PATTERN from FILE
-l, --list FILE List contents of tracefile FILE
--diff FILE DIFF Transform tracefile FILE according to DIFF
--summary FILE Show summary coverage data for tracefiles
Options:
-i, --initial Capture initial zero coverage data
-t, --test-name NAME Specify test name to be stored with data
-o, --output-file FILENAME Write data to FILENAME instead of stdout
-d, --directory DIR Use .da files in DIR instead of kernel
-f, --follow Follow links when searching .da files
-k, --kernel-directory KDIR Capture kernel coverage data only from KDIR
-b, --base-directory DIR Use DIR as base directory for relative paths
--convert-filenames Convert filenames when applying diff
--strip DEPTH Strip initial DEPTH directory levels in diff
--path PATH Strip PATH from tracefile when applying diff
--(no-)checksum Enable (disable) line checksumming
--(no-)compat-libtool Enable (disable) libtool compatibility mode
--gcov-tool TOOL Specify gcov tool location
--ignore-errors ERRORS Continue after ERRORS (gcov, source, graph)
--no-recursion Exclude subdirectories from processing
--to-package FILENAME Store unprocessed coverage data in FILENAME
--from-package FILENAME Capture from unprocessed data in FILENAME
--no-markers Ignore exclusion markers in source code
--derive-func-data Generate function data from line data
--list-full-path Print full path during a list operation
--(no-)external Include (ignore) data for external files
--config-file FILENAME Specify configuration file location
--rc SETTING=VALUE Override configuration file setting
--compat MODE=on|off|auto Set compat MODE (libtool, hammer, split_crc)
--include PATTERN Include files matching PATTERN
--exclude PATTERN Exclude files matching PATTERN
For more information see: $lcov_url
END_OF_USAGE
;
}
#
# check_options()
#
# Check for valid combination of command line options. Die on error.
#
sub check_options()
{
my $i = 0;
# Count occurrence of mutually exclusive options
$reset && $i++;
$capture && $i++;
@add_tracefile && $i++;
$extract && $i++;
$remove && $i++;
$list && $i++;
$diff && $i++;
@opt_summary && $i++;
if ($i == 0)
{
die("Need one of options -z, -c, -a, -e, -r, -l, ".
"--diff or --summary\n".
"Use $tool_name --help to get usage information\n");
}
elsif ($i > 1)
{
die("ERROR: only one of -z, -c, -a, -e, -r, -l, ".
"--diff or --summary allowed!\n".
"Use $tool_name --help to get usage information\n");
}
}
#
# userspace_reset()
#
# Reset coverage data found in DIRECTORY by deleting all contained .da files.
#
# Die on error.
#
sub userspace_reset()
{
my $current_dir;
my @file_list;
foreach $current_dir (@directory)
{
info("Deleting all .da files in $current_dir".
($no_recursion?"\n":" and subdirectories\n"));
@file_list = `find "$current_dir" $maxdepth $follow -name \\*\\.da -type f -o -name \\*\\.gcda -type f 2>/dev/null`;
chomp(@file_list);
foreach (@file_list)
{
unlink($_) or die("ERROR: cannot remove file $_!\n");
}
}
}
#
# userspace_capture()
#
# Capture coverage data found in DIRECTORY and write it to a package (if
# TO_PACKAGE specified) or to OUTPUT_FILENAME or STDOUT.
#
# Die on error.
#
sub userspace_capture()
{
my $dir;
my $build;
if (!defined($to_package)) {
lcov_geninfo(@directory);
return;
}
if (scalar(@directory) != 1) {
die("ERROR: -d may be specified only once with --to-package\n");
}
$dir = $directory[0];
if (defined($base_directory)) {
$build = $base_directory;
} else {
$build = $dir;
}
create_package($to_package, $dir, $build);
}
#
# kernel_reset()
#
# Reset kernel coverage.
#
# Die on error.
#
sub kernel_reset()
{
local *HANDLE;
my $reset_file;
info("Resetting kernel execution counters\n");
if (-e "$gcov_dir/vmlinux") {
$reset_file = "$gcov_dir/vmlinux";
} elsif (-e "$gcov_dir/reset") {
$reset_file = "$gcov_dir/reset";
} else {
die("ERROR: no reset control found in $gcov_dir\n");
}
open(HANDLE, ">", $reset_file) or
die("ERROR: cannot write to $reset_file!\n");
print(HANDLE "0");
close(HANDLE);
}
#
# lcov_copy_single(from, to)
#
# Copy single regular file FROM to TO without checking its size. This is
# required to work with special files generated by the kernel
# seq_file-interface.
#
#
sub lcov_copy_single($$)
{
my ($from, $to) = @_;
my $content;
local $/;
local *HANDLE;
open(HANDLE, "<", $from) or die("ERROR: cannot read $from: $!\n");
$content = <HANDLE>;
close(HANDLE);
open(HANDLE, ">", $to) or die("ERROR: cannot write $from: $!\n");
if (defined($content)) {
print(HANDLE $content);
}
close(HANDLE);
}
#
# lcov_find(dir, function, data[, extension, ...)])
#
# Search DIR for files and directories whose name matches PATTERN and run
# FUNCTION for each match. If not pattern is specified, match all names.
#
# FUNCTION has the following prototype:
# function(dir, relative_name, data)
#
# Where:
# dir: the base directory for this search
# relative_name: the name relative to the base directory of this entry
# data: the DATA variable passed to lcov_find
#
sub lcov_find($$$;@)
{
my ($dir, $fn, $data, @pattern) = @_;
my $result;
my $_fn = sub {
my $filename = $File::Find::name;
if (defined($result)) {
return;
}
$filename = abs2rel($filename, $dir);
foreach (@pattern) {
if ($filename =~ /$_/) {
goto ok;
}
}
return;
ok:
$result = &$fn($dir, $filename, $data);
};
if (scalar(@pattern) == 0) {
@pattern = ".*";
}
find( { wanted => $_fn, no_chdir => 1 }, $dir);
return $result;
}
#
# lcov_copy_fn(from, rel, to)
#
# Copy directories, files and links from/rel to to/rel.
#
sub lcov_copy_fn($$$)
{
my ($from, $rel, $to) = @_;
my $absfrom = canonpath(catfile($from, $rel));
my $absto = canonpath(catfile($to, $rel));
if (-d) {
if (! -d $absto) {
mkpath($absto) or
die("ERROR: cannot create directory $absto\n");
chmod(0700, $absto);
}
} elsif (-l) {
# Copy symbolic link
my $link = readlink($absfrom);
if (!defined($link)) {
die("ERROR: cannot read link $absfrom: $!\n");
}
symlink($link, $absto) or
die("ERROR: cannot create link $absto: $!\n");
} else {
lcov_copy_single($absfrom, $absto);
chmod(0600, $absto);
}
return undef;
}
#
# lcov_copy(from, to, subdirs)
#
# Copy all specified SUBDIRS and files from directory FROM to directory TO. For
# regular files, copy file contents without checking its size. This is required
# to work with seq_file-generated files.
#
sub lcov_copy($$;@)
{
my ($from, $to, @subdirs) = @_;
my @pattern;
foreach (@subdirs) {
push(@pattern, "^$_");
}
lcov_find($from, \&lcov_copy_fn, $to, @pattern);
}
#
# lcov_geninfo(directory)
#
# Call geninfo for the specified directory and with the parameters specified
# at the command line.
#
sub lcov_geninfo(@)
{
my (@dir) = @_;
my @param;
# Capture data
info("Capturing coverage data from ".join(" ", @dir)."\n");
@param = ("$tool_dir/geninfo", @dir);
if ($output_filename)
{
@param = (@param, "--output-filename", $output_filename);
}
if ($test_name)
{
@param = (@param, "--test-name", $test_name);
}
if ($follow)
{
@param = (@param, "--follow");
}
if ($quiet)
{
@param = (@param, "--quiet");
}
if (defined($checksum))
{
if ($checksum)
{
@param = (@param, "--checksum");
}
else
{
@param = (@param, "--no-checksum");
}
}
if ($base_directory)
{
@param = (@param, "--base-directory", $base_directory);
}
if ($no_compat_libtool)
{
@param = (@param, "--no-compat-libtool");
}
elsif ($compat_libtool)
{
@param = (@param, "--compat-libtool");
}
if ($gcov_tool)
{
@param = (@param, "--gcov-tool", $gcov_tool);
}
foreach (@opt_ignore_errors) {
@param = (@param, "--ignore-errors", $_);
}
if ($no_recursion) {
@param = (@param, "--no-recursion");
}
if ($initial)
{
@param = (@param, "--initial");
}
if ($no_markers)
{
@param = (@param, "--no-markers");
}
if ($opt_derive_func_data)
{
@param = (@param, "--derive-func-data");
}
if ($opt_debug)
{
@param = (@param, "--debug");
}
if (defined($opt_external) && $opt_external)
{
@param = (@param, "--external");
}
if (defined($opt_external) && !$opt_external)
{
@param = (@param, "--no-external");
}
if (defined($opt_compat)) {
@param = (@param, "--compat", $opt_compat);
}
if (%opt_rc) {
foreach my $key (keys(%opt_rc)) {
@param = (@param, "--rc", "$key=".$opt_rc{$key});
}
}
if (defined($opt_config_file)) {
@param = (@param, "--config-file", $opt_config_file);
}
foreach (@include_patterns) {
@param = (@param, "--include", $_);
}
foreach (@exclude_patterns) {
@param = (@param, "--exclude", $_);
}
system(@param) and exit($? >> 8);
}
#
# read_file(filename)
#
# Return the contents of the file defined by filename.
#
sub read_file($)
{
my ($filename) = @_;
my $content;
local $\;
local *HANDLE;
open(HANDLE, "<", $filename) || return undef;
$content = <HANDLE>;
close(HANDLE);
return $content;
}
#
# get_package(package_file)
#
# Unpack unprocessed coverage data files from package_file to a temporary
# directory and return directory name, build directory and gcov kernel version
# as found in package.
#
sub get_package($)
{
my ($file) = @_;
my $dir = create_temp_dir();
my $gkv;
my $build;
my $cwd = getcwd();
my $count;
local *HANDLE;
info("Reading package $file:\n");
$file = abs_path($file);
chdir($dir);
open(HANDLE, "-|", "tar xvfz '$file' 2>/dev/null")
or die("ERROR: could not process package $file\n");
$count = 0;
while (<HANDLE>) {
if (/\.da$/ || /\.gcda$/) {
$count++;
}
}
close(HANDLE);
if ($count == 0) {
die("ERROR: no data file found in package $file\n");
}
info(" data directory .......: $dir\n");
$build = read_file("$dir/$pkg_build_file");
if (defined($build)) {
info(" build directory ......: $build\n");
}
$gkv = read_file("$dir/$pkg_gkv_file");
if (defined($gkv)) {
$gkv = int($gkv);
if ($gkv != $GKV_PROC && $gkv != $GKV_SYS) {
die("ERROR: unsupported gcov kernel version found ".
"($gkv)\n");
}
info(" content type .........: kernel data\n");
info(" gcov kernel version ..: %s\n", $GKV_NAME[$gkv]);
} else {
info(" content type .........: application data\n");
}
info(" data files ...........: $count\n");
chdir($cwd);
return ($dir, $build, $gkv);
}
#
# write_file(filename, $content)
#
# Create a file named filename and write the specified content to it.
#
sub write_file($$)
{
my ($filename, $content) = @_;
local *HANDLE;
open(HANDLE, ">", $filename) || return 0;
print(HANDLE $content);
close(HANDLE) || return 0;
return 1;
}
# count_package_data(filename)
#
# Count the number of coverage data files in the specified package file.
#
sub count_package_data($)
{
my ($filename) = @_;
local *HANDLE;
my $count = 0;
open(HANDLE, "-|", "tar tfz '$filename'") or return undef;
while (<HANDLE>) {
if (/\.da$/ || /\.gcda$/) {
$count++;
}
}
close(HANDLE);
return $count;
}
#
# create_package(package_file, source_directory, build_directory[,
# kernel_gcov_version])
#
# Store unprocessed coverage data files from source_directory to package_file.
#
sub create_package($$$;$)
{
my ($file, $dir, $build, $gkv) = @_;
my $cwd = getcwd();
# Check for availability of tar tool first
system("tar --help > /dev/null")
and die("ERROR: tar command not available\n");
# Print information about the package
info("Creating package $file:\n");
info(" data directory .......: $dir\n");
# Handle build directory
if (defined($build)) {
info(" build directory ......: $build\n");
write_file("$dir/$pkg_build_file", $build)
or die("ERROR: could not write to ".
"$dir/$pkg_build_file\n");
}
# Handle gcov kernel version data
if (defined($gkv)) {
info(" content type .........: kernel data\n");
info(" gcov kernel version ..: %s\n", $GKV_NAME[$gkv]);
write_file("$dir/$pkg_gkv_file", $gkv)
or die("ERROR: could not write to ".
"$dir/$pkg_gkv_file\n");
} else {
info(" content type .........: application data\n");
}
# Create package
$file = abs_path($file);
chdir($dir);
system("tar cfz $file .")
and die("ERROR: could not create package $file\n");
chdir($cwd);
# Remove temporary files
unlink("$dir/$pkg_build_file");
unlink("$dir/$pkg_gkv_file");
# Show number of data files
if (!$quiet) {
my $count = count_package_data($file);
if (defined($count)) {
info(" data files ...........: $count\n");
}
}
}
sub find_link_fn($$$)
{
my ($from, $rel, $filename) = @_;
my $absfile = catfile($from, $rel, $filename);
if (-l $absfile) {
return $absfile;
}
return undef;
}
#
# get_base(dir)
#
# Return (BASE, OBJ), where
# - BASE: is the path to the kernel base directory relative to dir
# - OBJ: is the absolute path to the kernel build directory
#
sub get_base($)
{
my ($dir) = @_;
my $marker = "kernel/gcov/base.gcno";
my $markerfile;
my $sys;
my $obj;
my $link;
$markerfile = lcov_find($dir, \&find_link_fn, $marker);
if (!defined($markerfile)) {
return (undef, undef);
}
# sys base is parent of parent of markerfile.
$sys = abs2rel(dirname(dirname(dirname($markerfile))), $dir);
# obj base is parent of parent of markerfile link target.
$link = readlink($markerfile);
if (!defined($link)) {
die("ERROR: could not read $markerfile\n");
}
$obj = dirname(dirname(dirname($link)));
return ($sys, $obj);
}
#
# apply_base_dir(data_dir, base_dir, build_dir, @directories)
#
# Make entries in @directories relative to data_dir.
#
sub apply_base_dir($$$@)
{
my ($data, $base, $build, @dirs) = @_;
my $dir;
my @result;
foreach $dir (@dirs) {
# Is directory path relative to data directory?
if (-d catdir($data, $dir)) {
push(@result, $dir);
next;
}
# Relative to the auto-detected base-directory?
if (defined($base)) {
if (-d catdir($data, $base, $dir)) {
push(@result, catdir($base, $dir));
next;
}
}
# Relative to the specified base-directory?
if (defined($base_directory)) {
if (file_name_is_absolute($base_directory)) {
$base = abs2rel($base_directory, rootdir());
} else {
$base = $base_directory;
}
if (-d catdir($data, $base, $dir)) {
push(@result, catdir($base, $dir));
next;
}
}
# Relative to the build directory?
if (defined($build)) {
if (file_name_is_absolute($build)) {
$base = abs2rel($build, rootdir());
} else {
$base = $build;
}
if (-d catdir($data, $base, $dir)) {
push(@result, catdir($base, $dir));
next;
}
}
die("ERROR: subdirectory $dir not found\n".
"Please use -b to specify the correct directory\n");
}
return @result;
}
#
# copy_gcov_dir(dir, [@subdirectories])
#
# Create a temporary directory and copy all or, if specified, only some
# subdirectories from dir to that directory. Return the name of the temporary
# directory.
#
sub copy_gcov_dir($;@)
{
my ($data, @dirs) = @_;
my $tempdir = create_temp_dir();
info("Copying data to temporary directory $tempdir\n");
lcov_copy($data, $tempdir, @dirs);
return $tempdir;
}
#
# kernel_capture_initial
#
# Capture initial kernel coverage data, i.e. create a coverage data file from
# static graph files which contains zero coverage data for all instrumented
# lines.
#
sub kernel_capture_initial()
{
my $build;
my $source;
my @params;
if (defined($base_directory)) {
$build = $base_directory;
$source = "specified";
} else {
(undef, $build) = get_base($gcov_dir);
if (!defined($build)) {
die("ERROR: could not auto-detect build directory.\n".
"Please use -b to specify the build directory\n");
}
$source = "auto-detected";
}
info("Using $build as kernel build directory ($source)\n");
# Build directory needs to be passed to geninfo
$base_directory = $build;
if (@kernel_directory) {
foreach my $dir (@kernel_directory) {
push(@params, "$build/$dir");
}
} else {
push(@params, $build);
}
lcov_geninfo(@params);
}
#
# kernel_capture_from_dir(directory, gcov_kernel_version, build)
#
# Perform the actual kernel coverage capturing from the specified directory
# assuming that the data was copied from the specified gcov kernel version.
#
sub kernel_capture_from_dir($$$)
{
my ($dir, $gkv, $build) = @_;
# Create package or coverage file
if (defined($to_package)) {
create_package($to_package, $dir, $build, $gkv);
} else {
# Build directory needs to be passed to geninfo
$base_directory = $build;
lcov_geninfo($dir);
}
}
#
# adjust_kernel_dir(dir, build)
#
# Adjust directories specified with -k so that they point to the directory
# relative to DIR. Return the build directory if specified or the auto-
# detected build-directory.
#
sub adjust_kernel_dir($$)
{
my ($dir, $build) = @_;
my ($sys_base, $build_auto) = get_base($dir);
if (!defined($build)) {
$build = $build_auto;
}
if (!defined($build)) {
die("ERROR: could not auto-detect build directory.\n".
"Please use -b to specify the build directory\n");
}
# Make @kernel_directory relative to sysfs base
if (@kernel_directory) {
@kernel_directory = apply_base_dir($dir, $sys_base, $build,
@kernel_directory);
}
return $build;
}
sub kernel_capture()
{
my $data_dir;
my $build = $base_directory;
if ($gcov_gkv == $GKV_SYS) {
$build = adjust_kernel_dir($gcov_dir, $build);
}
$data_dir = copy_gcov_dir($gcov_dir, @kernel_directory);
kernel_capture_from_dir($data_dir, $gcov_gkv, $build);
}
#
# link_data_cb(datadir, rel, graphdir)
#
# Create symbolic link in GRAPDIR/REL pointing to DATADIR/REL.
#
sub link_data_cb($$$)
{
my ($datadir, $rel, $graphdir) = @_;
my $absfrom = catfile($datadir, $rel);
my $absto = catfile($graphdir, $rel);
my $base;
my $dir;
if (-e $absto) {
die("ERROR: could not create symlink at $absto: ".
"File already exists!\n");
}
if (-l $absto) {
# Broken link - possibly from an interrupted earlier run
unlink($absto);
}
# Check for graph file
$base = $absto;
$base =~ s/\.(gcda|da)$//;
if (! -e $base.".gcno" && ! -e $base.".bbg" && ! -e $base.".bb") {
die("ERROR: No graph file found for $absfrom in ".
dirname($base)."!\n");
}
symlink($absfrom, $absto) or
die("ERROR: could not create symlink at $absto: $!\n");
}
#
# unlink_data_cb(datadir, rel, graphdir)
#
# Remove symbolic link from GRAPHDIR/REL to DATADIR/REL.
#
sub unlink_data_cb($$$)
{
my ($datadir, $rel, $graphdir) = @_;
my $absfrom = catfile($datadir, $rel);
my $absto = catfile($graphdir, $rel);
my $target;
return if (!-l $absto);
$target = readlink($absto);
return if (!defined($target) || $target ne $absfrom);
unlink($absto) or
warn("WARNING: could not remove symlink $absto: $!\n");
}
#
# link_data(datadir, graphdir, create)
#
# If CREATE is non-zero, create symbolic links in GRAPHDIR for data files
# found in DATADIR. Otherwise remove link in GRAPHDIR.
#
sub link_data($$$)
{
my ($datadir, $graphdir, $create) = @_;
$datadir = abs_path($datadir);
$graphdir = abs_path($graphdir);
if ($create) {
lcov_find($datadir, \&link_data_cb, $graphdir, '\.gcda$',
'\.da$');
} else {
lcov_find($datadir, \&unlink_data_cb, $graphdir, '\.gcda$',
'\.da$');
}
}
#
# find_graph_cb(datadir, rel, count_ref)
#
# Count number of files found.
#
sub find_graph_cb($$$)
{
my ($dir, $rel, $count_ref) = @_;
($$count_ref)++;
}
#
# find_graph(dir)
#
# Search DIR for a graph file. Return non-zero if one was found, zero otherwise.
#
sub find_graph($)
{
my ($dir) = @_;
my $count = 0;
lcov_find($dir, \&find_graph_cb, \$count, '\.gcno$', '\.bb$', '\.bbg$');
return $count > 0 ? 1 : 0;
}
#
# package_capture()
#
# Capture coverage data from a package of unprocessed coverage data files
# as generated by lcov --to-package.
#
sub package_capture()
{
my $dir;
my $build;
my $gkv;
($dir, $build, $gkv) = get_package($from_package);
# Check for build directory
if (defined($base_directory)) {
if (defined($build)) {
info("Using build directory specified by -b.\n");
}
$build = $base_directory;
}
# Do the actual capture
if (defined($gkv)) {
if ($gkv == $GKV_SYS) {
$build = adjust_kernel_dir($dir, $build);
}
if (@kernel_directory) {
$dir = copy_gcov_dir($dir, @kernel_directory);
}
kernel_capture_from_dir($dir, $gkv, $build);
} else {
# Build directory needs to be passed to geninfo
$base_directory = $build;
if (find_graph($dir)) {
# Package contains graph files - collect from there
lcov_geninfo($dir);
} else {
# No graph files found, link data files next to
# graph files
link_data($dir, $base_directory, 1);
lcov_geninfo($base_directory);
link_data($dir, $base_directory, 0);
}
}
}
#
# info(printf_parameter)
#
# Use printf to write PRINTF_PARAMETER to stdout only when the $quiet flag
# is not set.
#
sub info(@)
{
if (!$quiet)
{
# Print info string
if (!$data_stdout)
{
printf(@_)
}
else
{
# Don't interfere with the .info output to STDOUT
printf(STDERR @_);
}
}
}
#
# create_temp_dir()
#
# Create a temporary directory and return its path.
#
# Die on error.
#
sub create_temp_dir()
{
my $dir;
if (defined($tmp_dir)) {
$dir = tempdir(DIR => $tmp_dir, CLEANUP => 1);
} else {
$dir = tempdir(CLEANUP => 1);
}
if (!defined($dir)) {
die("ERROR: cannot create temporary directory\n");
}
push(@temp_dirs, $dir);
return $dir;
}
sub compress_brcount($)
{
my ($brcount) = @_;
my $db;
$db = brcount_to_db($brcount);
return db_to_brcount($db, $brcount);
}
sub get_br_found_and_hit($)
{
my ($brcount) = @_;
my $db;
$db = brcount_to_db($brcount);
return brcount_db_get_found_and_hit($db);
}
#
# read_info_file(info_filename)
#
# Read in the contents of the .info file specified by INFO_FILENAME. Data will
# be returned as a reference to a hash containing the following mappings:
#
# %result: for each filename found in file -> \%data
#
# %data: "test" -> \%testdata
# "sum" -> \%sumcount
# "func" -> \%funcdata
# "found" -> $lines_found (number of instrumented lines found in file)
# "hit" -> $lines_hit (number of executed lines in file)
# "f_found" -> $fn_found (number of instrumented functions found in file)
# "f_hit" -> $fn_hit (number of executed functions in file)
# "b_found" -> $br_found (number of instrumented branches found in file)
# "b_hit" -> $br_hit (number of executed branches in file)
# "check" -> \%checkdata
# "testfnc" -> \%testfncdata
# "sumfnc" -> \%sumfnccount
# "testbr" -> \%testbrdata
# "sumbr" -> \%sumbrcount
#
# %testdata : name of test affecting this file -> \%testcount
# %testfncdata: name of test affecting this file -> \%testfnccount
# %testbrdata: name of test affecting this file -> \%testbrcount
#
# %testcount : line number -> execution count for a single test
# %testfnccount: function name -> execution count for a single test
# %testbrcount : line number -> branch coverage data for a single test
# %sumcount : line number -> execution count for all tests
# %sumfnccount : function name -> execution count for all tests
# %sumbrcount : line number -> branch coverage data for all tests
# %funcdata : function name -> line number
# %checkdata : line number -> checksum of source code line
# $brdata : text "block,branch,taken:..."
#
# Note that .info file sections referring to the same file and test name
# will automatically be combined by adding all execution counts.
#
# Note that if INFO_FILENAME ends with ".gz", it is assumed that the file
# is compressed using GZIP. If available, GUNZIP will be used to decompress
# this file.
#
# Die on error.
#
sub read_info_file($)
{
my $tracefile = $_[0]; # Name of tracefile
my %result; # Resulting hash: file -> data
my $data; # Data handle for current entry
my $testdata; # " "
my $testcount; # " "
my $sumcount; # " "
my $funcdata; # " "
my $checkdata; # " "
my $testfncdata;
my $testfnccount;
my $sumfnccount;
my $testbrdata;
my $testbrcount;
my $sumbrcount;
my $line; # Current line read from .info file
my $testname; # Current test name
my $filename; # Current filename
my $hitcount; # Count for lines hit
my $count; # Execution count of current line
my $negative; # If set, warn about negative counts
my $changed_testname; # If set, warn about changed testname
my $line_checksum; # Checksum of current line
local *INFO_HANDLE; # Filehandle for .info file
info("Reading tracefile $tracefile\n");
# Check if file exists and is readable
stat($_[0]);
if (!(-r _))
{
die("ERROR: cannot read file $_[0]!\n");
}
# Check if this is really a plain file
if (!(-f _))
{
die("ERROR: not a plain file: $_[0]!\n");
}
# Check for .gz extension
if ($_[0] =~ /\.gz$/)
{
# Check for availability of GZIP tool
system_no_output(1, "gunzip" ,"-h")
and die("ERROR: gunzip command not available!\n");
# Check integrity of compressed file
system_no_output(1, "gunzip", "-t", $_[0])
and die("ERROR: integrity check failed for ".
"compressed file $_[0]!\n");
# Open compressed file
open(INFO_HANDLE, "-|", "gunzip -c '$_[0]'")
or die("ERROR: cannot start gunzip to decompress ".
"file $_[0]!\n");
}
else
{
# Open decompressed file
open(INFO_HANDLE, "<", $_[0])
or die("ERROR: cannot read file $_[0]!\n");
}
$testname = "";
while (<INFO_HANDLE>)
{
chomp($_);
$line = $_;
# Switch statement
foreach ($line)
{
/^TN:([^,]*)(,diff)?/ && do
{
# Test name information found
$testname = defined($1) ? $1 : "";
if ($testname =~ s/\W/_/g)
{
$changed_testname = 1;
}
$testname .= $2 if (defined($2));
last;
};
/^[SK]F:(.*)/ && do
{
# Filename information found
# Retrieve data for new entry
$filename = $1;
$data = $result{$filename};
($testdata, $sumcount, $funcdata, $checkdata,
$testfncdata, $sumfnccount, $testbrdata,
$sumbrcount) =
get_info_entry($data);
if (defined($testname))
{
$testcount = $testdata->{$testname};
$testfnccount = $testfncdata->{$testname};
$testbrcount = $testbrdata->{$testname};
}
else
{
$testcount = {};
$testfnccount = {};
$testbrcount = {};
}
last;
};
/^DA:(\d+),(-?\d+)(,[^,\s]+)?/ && do
{
# Fix negative counts
$count = $2 < 0 ? 0 : $2;
if ($2 < 0)
{
$negative = 1;
}
# Execution count found, add to structure
# Add summary counts
$sumcount->{$1} += $count;
# Add test-specific counts
if (defined($testname))
{
$testcount->{$1} += $count;
}
# Store line checksum if available
if (defined($3))
{
$line_checksum = substr($3, 1);
# Does it match a previous definition
if (defined($checkdata->{$1}) &&
($checkdata->{$1} ne
$line_checksum))
{
die("ERROR: checksum mismatch ".
"at $filename:$1\n");
}
$checkdata->{$1} = $line_checksum;
}
last;
};
/^FN:(\d+),([^,]+)/ && do
{
last if (!$func_coverage);
# Function data found, add to structure
$funcdata->{$2} = $1;
# Also initialize function call data
if (!defined($sumfnccount->{$2})) {
$sumfnccount->{$2} = 0;
}
if (defined($testname))
{
if (!defined($testfnccount->{$2})) {
$testfnccount->{$2} = 0;
}
}
last;
};
/^FNDA:(\d+),([^,]+)/ && do
{
last if (!$func_coverage);
# Function call count found, add to structure
# Add summary counts
$sumfnccount->{$2} += $1;
# Add test-specific counts
if (defined($testname))
{
$testfnccount->{$2} += $1;
}
last;
};
/^BRDA:(\d+),(\d+),(\d+),(\d+|-)/ && do {
# Branch coverage data found
my ($line, $block, $branch, $taken) =
($1, $2, $3, $4);
last if (!$br_coverage);
$sumbrcount->{$line} .=
"$block,$branch,$taken:";
# Add test-specific counts
if (defined($testname)) {
$testbrcount->{$line} .=
"$block,$branch,$taken:";
}
last;
};
/^end_of_record/ && do
{
# Found end of section marker
if ($filename)
{
# Store current section data
if (defined($testname))
{
$testdata->{$testname} =
$testcount;
$testfncdata->{$testname} =
$testfnccount;
$testbrdata->{$testname} =
$testbrcount;
}
set_info_entry($data, $testdata,
$sumcount, $funcdata,
$checkdata, $testfncdata,
$sumfnccount,
$testbrdata,
$sumbrcount);
$result{$filename} = $data;
last;
}
};
# default
last;
}
}
close(INFO_HANDLE);
# Calculate hit and found values for lines and functions of each file
foreach $filename (keys(%result))
{
$data = $result{$filename};
($testdata, $sumcount, undef, undef, $testfncdata,
$sumfnccount, $testbrdata, $sumbrcount) =
get_info_entry($data);
# Filter out empty files
if (scalar(keys(%{$sumcount})) == 0)
{
delete($result{$filename});
next;
}
# Filter out empty test cases
foreach $testname (keys(%{$testdata}))
{
if (!defined($testdata->{$testname}) ||
scalar(keys(%{$testdata->{$testname}})) == 0)
{
delete($testdata->{$testname});
delete($testfncdata->{$testname});
}
}
$data->{"found"} = scalar(keys(%{$sumcount}));
$hitcount = 0;
foreach (keys(%{$sumcount}))
{
if ($sumcount->{$_} > 0) { $hitcount++; }
}
$data->{"hit"} = $hitcount;
# Get found/hit values for function call data
$data->{"f_found"} = scalar(keys(%{$sumfnccount}));
$hitcount = 0;
foreach (keys(%{$sumfnccount})) {
if ($sumfnccount->{$_} > 0) {
$hitcount++;
}
}
$data->{"f_hit"} = $hitcount;
# Combine branch data for the same branches
(undef, $data->{"b_found"}, $data->{"b_hit"}) =
compress_brcount($sumbrcount);
foreach $testname (keys(%{$testbrdata})) {
compress_brcount($testbrdata->{$testname});
}
}
if (scalar(keys(%result)) == 0)
{
die("ERROR: no valid records found in tracefile $tracefile\n");
}
if ($negative)
{
warn("WARNING: negative counts found in tracefile ".
"$tracefile\n");
}
if ($changed_testname)
{
warn("WARNING: invalid characters removed from testname in ".
"tracefile $tracefile\n");
}
return(\%result);
}
#
# get_info_entry(hash_ref)
#
# Retrieve data from an entry of the structure generated by read_info_file().
# Return a list of references to hashes:
# (test data hash ref, sum count hash ref, funcdata hash ref, checkdata hash
# ref, testfncdata hash ref, sumfnccount hash ref, testbrdata hash ref,
# sumbrcount hash ref, lines found, lines hit, functions found,
# functions hit, branches found, branches hit)
#
sub get_info_entry($)
{
my $testdata_ref = $_[0]->{"test"};
my $sumcount_ref = $_[0]->{"sum"};
my $funcdata_ref = $_[0]->{"func"};
my $checkdata_ref = $_[0]->{"check"};
my $testfncdata = $_[0]->{"testfnc"};
my $sumfnccount = $_[0]->{"sumfnc"};
my $testbrdata = $_[0]->{"testbr"};
my $sumbrcount = $_[0]->{"sumbr"};
my $lines_found = $_[0]->{"found"};
my $lines_hit = $_[0]->{"hit"};
my $f_found = $_[0]->{"f_found"};
my $f_hit = $_[0]->{"f_hit"};
my $br_found = $_[0]->{"b_found"};
my $br_hit = $_[0]->{"b_hit"};
return ($testdata_ref, $sumcount_ref, $funcdata_ref, $checkdata_ref,
$testfncdata, $sumfnccount, $testbrdata, $sumbrcount,
$lines_found, $lines_hit, $f_found, $f_hit,
$br_found, $br_hit);
}
#
# set_info_entry(hash_ref, testdata_ref, sumcount_ref, funcdata_ref,
# checkdata_ref, testfncdata_ref, sumfcncount_ref,
# testbrdata_ref, sumbrcount_ref[,lines_found,
# lines_hit, f_found, f_hit, $b_found, $b_hit])
#
# Update the hash referenced by HASH_REF with the provided data references.
#
sub set_info_entry($$$$$$$$$;$$$$$$)
{
my $data_ref = $_[0];
$data_ref->{"test"} = $_[1];
$data_ref->{"sum"} = $_[2];
$data_ref->{"func"} = $_[3];
$data_ref->{"check"} = $_[4];
$data_ref->{"testfnc"} = $_[5];
$data_ref->{"sumfnc"} = $_[6];
$data_ref->{"testbr"} = $_[7];
$data_ref->{"sumbr"} = $_[8];
if (defined($_[9])) { $data_ref->{"found"} = $_[9]; }
if (defined($_[10])) { $data_ref->{"hit"} = $_[10]; }
if (defined($_[11])) { $data_ref->{"f_found"} = $_[11]; }
if (defined($_[12])) { $data_ref->{"f_hit"} = $_[12]; }
if (defined($_[13])) { $data_ref->{"b_found"} = $_[13]; }
if (defined($_[14])) { $data_ref->{"b_hit"} = $_[14]; }
}
#
# add_counts(data1_ref, data2_ref)
#
# DATA1_REF and DATA2_REF are references to hashes containing a mapping
#
# line number -> execution count
#
# Return a list (RESULT_REF, LINES_FOUND, LINES_HIT) where RESULT_REF
# is a reference to a hash containing the combined mapping in which
# execution counts are added.
#
sub add_counts($$)
{
my $data1_ref = $_[0]; # Hash 1
my $data2_ref = $_[1]; # Hash 2
my %result; # Resulting hash
my $line; # Current line iteration scalar
my $data1_count; # Count of line in hash1
my $data2_count; # Count of line in hash2
my $found = 0; # Total number of lines found
my $hit = 0; # Number of lines with a count > 0
foreach $line (keys(%$data1_ref))
{
$data1_count = $data1_ref->{$line};
$data2_count = $data2_ref->{$line};
# Add counts if present in both hashes
if (defined($data2_count)) { $data1_count += $data2_count; }
# Store sum in %result
$result{$line} = $data1_count;
$found++;
if ($data1_count > 0) { $hit++; }
}
# Add lines unique to data2_ref
foreach $line (keys(%$data2_ref))
{
# Skip lines already in data1_ref
if (defined($data1_ref->{$line})) { next; }
# Copy count from data2_ref
$result{$line} = $data2_ref->{$line};
$found++;
if ($result{$line} > 0) { $hit++; }
}
return (\%result, $found, $hit);
}
#
# merge_checksums(ref1, ref2, filename)
#
# REF1 and REF2 are references to hashes containing a mapping
#
# line number -> checksum
#
# Merge checksum lists defined in REF1 and REF2 and return reference to
# resulting hash. Die if a checksum for a line is defined in both hashes
# but does not match.
#
sub merge_checksums($$$)
{
my $ref1 = $_[0];
my $ref2 = $_[1];
my $filename = $_[2];
my %result;
my $line;
foreach $line (keys(%{$ref1}))
{
if (defined($ref2->{$line}) &&
($ref1->{$line} ne $ref2->{$line}))
{
die("ERROR: checksum mismatch at $filename:$line\n");
}
$result{$line} = $ref1->{$line};
}
foreach $line (keys(%{$ref2}))
{
$result{$line} = $ref2->{$line};
}
return \%result;
}
#
# merge_func_data(funcdata1, funcdata2, filename)
#
sub merge_func_data($$$)
{
my ($funcdata1, $funcdata2, $filename) = @_;
my %result;
my $func;
if (defined($funcdata1)) {
%result = %{$funcdata1};
}
foreach $func (keys(%{$funcdata2})) {
my $line1 = $result{$func};
my $line2 = $funcdata2->{$func};
if (defined($line1) && ($line1 != $line2)) {
warn("WARNING: function data mismatch at ".
"$filename:$line2\n");
next;
}
$result{$func} = $line2;
}
return \%result;
}
#
# add_fnccount(fnccount1, fnccount2)
#
# Add function call count data. Return list (fnccount_added, f_found, f_hit)
#
sub add_fnccount($$)
{
my ($fnccount1, $fnccount2) = @_;
my %result;
my $f_found;
my $f_hit;
my $function;
if (defined($fnccount1)) {
%result = %{$fnccount1};
}
foreach $function (keys(%{$fnccount2})) {
$result{$function} += $fnccount2->{$function};
}
$f_found = scalar(keys(%result));
$f_hit = 0;
foreach $function (keys(%result)) {
if ($result{$function} > 0) {
$f_hit++;
}
}
return (\%result, $f_found, $f_hit);
}
#
# add_testfncdata(testfncdata1, testfncdata2)
#
# Add function call count data for several tests. Return reference to
# added_testfncdata.
#
sub add_testfncdata($$)
{
my ($testfncdata1, $testfncdata2) = @_;
my %result;
my $testname;
foreach $testname (keys(%{$testfncdata1})) {
if (defined($testfncdata2->{$testname})) {
my $fnccount;
# Function call count data for this testname exists
# in both data sets: merge
($fnccount) = add_fnccount(
$testfncdata1->{$testname},
$testfncdata2->{$testname});
$result{$testname} = $fnccount;
next;
}
# Function call count data for this testname is unique to
# data set 1: copy
$result{$testname} = $testfncdata1->{$testname};
}
# Add count data for testnames unique to data set 2
foreach $testname (keys(%{$testfncdata2})) {
if (!defined($result{$testname})) {
$result{$testname} = $testfncdata2->{$testname};
}
}
return \%result;
}
#
# brcount_to_db(brcount)
#
# Convert brcount data to the following format:
#
# db: line number -> block hash
# block hash: block number -> branch hash
# branch hash: branch number -> taken value
#
sub brcount_to_db($)
{
my ($brcount) = @_;
my $line;
my $db = {};
# Add branches to database
foreach $line (keys(%{$brcount})) {
my $brdata = $brcount->{$line};
foreach my $entry (split(/:/, $brdata)) {
my ($block, $branch, $taken) = split(/,/, $entry);
my $old = $db->{$line}->{$block}->{$branch};
if (!defined($old) || $old eq "-") {
$old = $taken;
} elsif ($taken ne "-") {
$old += $taken;
}
$db->{$line}->{$block}->{$branch} = $old;
}
}
return $db;
}
#
# db_to_brcount(db[, brcount])
#
# Convert branch coverage data back to brcount format. If brcount is specified,
# the converted data is directly inserted in brcount.
#
sub db_to_brcount($;$)
{
my ($db, $brcount) = @_;
my $line;
my $br_found = 0;
my $br_hit = 0;
# Convert database back to brcount format
foreach $line (sort({$a <=> $b} keys(%{$db}))) {
my $ldata = $db->{$line};
my $brdata;
my $block;
foreach $block (sort({$a <=> $b} keys(%{$ldata}))) {
my $bdata = $ldata->{$block};
my $branch;
foreach $branch (sort({$a <=> $b} keys(%{$bdata}))) {
my $taken = $bdata->{$branch};
$br_found++;
$br_hit++ if ($taken ne "-" && $taken > 0);
$brdata .= "$block,$branch,$taken:";
}
}
$brcount->{$line} = $brdata;
}
return ($brcount, $br_found, $br_hit);
}
#
# brcount_db_combine(db1, db2, op)
#
# db1 := db1 op db2, where
# db1, db2: brcount data as returned by brcount_to_db
# op: one of $BR_ADD and BR_SUB
#
sub brcount_db_combine($$$)
{
my ($db1, $db2, $op) = @_;
foreach my $line (keys(%{$db2})) {
my $ldata = $db2->{$line};
foreach my $block (keys(%{$ldata})) {
my $bdata = $ldata->{$block};
foreach my $branch (keys(%{$bdata})) {
my $taken = $bdata->{$branch};
my $new = $db1->{$line}->{$block}->{$branch};
if (!defined($new) || $new eq "-") {
$new = $taken;
} elsif ($taken ne "-") {
if ($op == $BR_ADD) {
$new += $taken;
} elsif ($op == $BR_SUB) {
$new -= $taken;
$new = 0 if ($new < 0);
}
}
$db1->{$line}->{$block}->{$branch} = $new;
}
}
}
}
#
# brcount_db_get_found_and_hit(db)
#
# Return (br_found, br_hit) for db.
#
sub brcount_db_get_found_and_hit($)
{
my ($db) = @_;
my ($br_found , $br_hit) = (0, 0);
foreach my $line (keys(%{$db})) {
my $ldata = $db->{$line};
foreach my $block (keys(%{$ldata})) {
my $bdata = $ldata->{$block};
foreach my $branch (keys(%{$bdata})) {
my $taken = $bdata->{$branch};
$br_found++;
$br_hit++ if ($taken ne "-" && $taken > 0);
}
}
}
return ($br_found, $br_hit);
}
# combine_brcount(brcount1, brcount2, type, inplace)
#
# If add is BR_ADD, add branch coverage data and return list brcount_added.
# If add is BR_SUB, subtract the taken values of brcount2 from brcount1 and
# return brcount_sub. If inplace is set, the result is inserted into brcount1.
#
sub combine_brcount($$$;$)
{
my ($brcount1, $brcount2, $type, $inplace) = @_;
my ($db1, $db2);
$db1 = brcount_to_db($brcount1);
$db2 = brcount_to_db($brcount2);
brcount_db_combine($db1, $db2, $type);
return db_to_brcount($db1, $inplace ? $brcount1 : undef);
}
#
# add_testbrdata(testbrdata1, testbrdata2)
#
# Add branch coverage data for several tests. Return reference to
# added_testbrdata.
#
sub add_testbrdata($$)
{
my ($testbrdata1, $testbrdata2) = @_;
my %result;
my $testname;
foreach $testname (keys(%{$testbrdata1})) {
if (defined($testbrdata2->{$testname})) {
my $brcount;
# Branch coverage data for this testname exists
# in both data sets: add
($brcount) = combine_brcount(
$testbrdata1->{$testname},
$testbrdata2->{$testname}, $BR_ADD);
$result{$testname} = $brcount;
next;
}
# Branch coverage data for this testname is unique to
# data set 1: copy
$result{$testname} = $testbrdata1->{$testname};
}
# Add count data for testnames unique to data set 2
foreach $testname (keys(%{$testbrdata2})) {
if (!defined($result{$testname})) {
$result{$testname} = $testbrdata2->{$testname};
}
}
return \%result;
}
#
# combine_info_entries(entry_ref1, entry_ref2, filename)
#
# Combine .info data entry hashes referenced by ENTRY_REF1 and ENTRY_REF2.
# Return reference to resulting hash.
#
sub combine_info_entries($$$)
{
my $entry1 = $_[0]; # Reference to hash containing first entry
my $testdata1;
my $sumcount1;
my $funcdata1;
my $checkdata1;
my $testfncdata1;
my $sumfnccount1;
my $testbrdata1;
my $sumbrcount1;
my $entry2 = $_[1]; # Reference to hash containing second entry
my $testdata2;
my $sumcount2;
my $funcdata2;
my $checkdata2;
my $testfncdata2;
my $sumfnccount2;
my $testbrdata2;
my $sumbrcount2;
my %result; # Hash containing combined entry
my %result_testdata;
my $result_sumcount = {};
my $result_funcdata;
my $result_testfncdata;
my $result_sumfnccount;
my $result_testbrdata;
my $result_sumbrcount;
my $lines_found;
my $lines_hit;
my $f_found;
my $f_hit;
my $br_found;
my $br_hit;
my $testname;
my $filename = $_[2];
# Retrieve data
($testdata1, $sumcount1, $funcdata1, $checkdata1, $testfncdata1,
$sumfnccount1, $testbrdata1, $sumbrcount1) = get_info_entry($entry1);
($testdata2, $sumcount2, $funcdata2, $checkdata2, $testfncdata2,
$sumfnccount2, $testbrdata2, $sumbrcount2) = get_info_entry($entry2);
# Merge checksums
$checkdata1 = merge_checksums($checkdata1, $checkdata2, $filename);
# Combine funcdata
$result_funcdata = merge_func_data($funcdata1, $funcdata2, $filename);
# Combine function call count data
$result_testfncdata = add_testfncdata($testfncdata1, $testfncdata2);
($result_sumfnccount, $f_found, $f_hit) =
add_fnccount($sumfnccount1, $sumfnccount2);
# Combine branch coverage data
$result_testbrdata = add_testbrdata($testbrdata1, $testbrdata2);
($result_sumbrcount, $br_found, $br_hit) =
combine_brcount($sumbrcount1, $sumbrcount2, $BR_ADD);
# Combine testdata
foreach $testname (keys(%{$testdata1}))
{
if (defined($testdata2->{$testname}))
{
# testname is present in both entries, requires
# combination
($result_testdata{$testname}) =
add_counts($testdata1->{$testname},
$testdata2->{$testname});
}
else
{
# testname only present in entry1, add to result
$result_testdata{$testname} = $testdata1->{$testname};
}
# update sum count hash
($result_sumcount, $lines_found, $lines_hit) =
add_counts($result_sumcount,
$result_testdata{$testname});
}
foreach $testname (keys(%{$testdata2}))
{
# Skip testnames already covered by previous iteration
if (defined($testdata1->{$testname})) { next; }
# testname only present in entry2, add to result hash
$result_testdata{$testname} = $testdata2->{$testname};
# update sum count hash
($result_sumcount, $lines_found, $lines_hit) =
add_counts($result_sumcount,
$result_testdata{$testname});
}
# Calculate resulting sumcount
# Store result
set_info_entry(\%result, \%result_testdata, $result_sumcount,
$result_funcdata, $checkdata1, $result_testfncdata,
$result_sumfnccount, $result_testbrdata,
$result_sumbrcount, $lines_found, $lines_hit,
$f_found, $f_hit, $br_found, $br_hit);
return(\%result);
}
#
# combine_info_files(info_ref1, info_ref2)
#
# Combine .info data in hashes referenced by INFO_REF1 and INFO_REF2. Return
# reference to resulting hash.
#
sub combine_info_files($$)
{
my %hash1 = %{$_[0]};
my %hash2 = %{$_[1]};
my $filename;
foreach $filename (keys(%hash2))
{
if ($hash1{$filename})
{
# Entry already exists in hash1, combine them
$hash1{$filename} =
combine_info_entries($hash1{$filename},
$hash2{$filename},
$filename);
}
else
{
# Entry is unique in both hashes, simply add to
# resulting hash
$hash1{$filename} = $hash2{$filename};
}
}
return(\%hash1);
}
#
# add_traces()
#
sub add_traces()
{
my $total_trace;
my $current_trace;
my $tracefile;
my @result;
local *INFO_HANDLE;
info("Combining tracefiles.\n");
foreach $tracefile (@add_tracefile)
{
$current_trace = read_info_file($tracefile);
if ($total_trace)
{
$total_trace = combine_info_files($total_trace,
$current_trace);
}
else
{
$total_trace = $current_trace;
}
}
# Write combined data
if (!$data_stdout)
{
info("Writing data to $output_filename\n");
open(INFO_HANDLE, ">", $output_filename)
or die("ERROR: cannot write to $output_filename!\n");
@result = write_info_file(*INFO_HANDLE, $total_trace);
close(*INFO_HANDLE);
}
else
{
@result = write_info_file(*STDOUT, $total_trace);
}
return @result;
}
#
# write_info_file(filehandle, data)
#
sub write_info_file(*$)
{
local *INFO_HANDLE = $_[0];
my %data = %{$_[1]};
my $source_file;
my $entry;
my $testdata;
my $sumcount;
my $funcdata;
my $checkdata;
my $testfncdata;
my $sumfnccount;
my $testbrdata;
my $sumbrcount;
my $testname;
my $line;
my $func;
my $testcount;
my $testfnccount;
my $testbrcount;
my $found;
my $hit;
my $f_found;
my $f_hit;
my $br_found;
my $br_hit;
my $ln_total_found = 0;
my $ln_total_hit = 0;
my $fn_total_found = 0;
my $fn_total_hit = 0;
my $br_total_found = 0;
my $br_total_hit = 0;
foreach $source_file (sort(keys(%data)))
{
$entry = $data{$source_file};
($testdata, $sumcount, $funcdata, $checkdata, $testfncdata,
$sumfnccount, $testbrdata, $sumbrcount, $found, $hit,
$f_found, $f_hit, $br_found, $br_hit) =
get_info_entry($entry);
# Add to totals
$ln_total_found += $found;
$ln_total_hit += $hit;
$fn_total_found += $f_found;
$fn_total_hit += $f_hit;
$br_total_found += $br_found;
$br_total_hit += $br_hit;
foreach $testname (sort(keys(%{$testdata})))
{
$testcount = $testdata->{$testname};
$testfnccount = $testfncdata->{$testname};
$testbrcount = $testbrdata->{$testname};
$found = 0;
$hit = 0;
print(INFO_HANDLE "TN:$testname\n");
print(INFO_HANDLE "SF:$source_file\n");
# Write function related data
foreach $func (
sort({$funcdata->{$a} <=> $funcdata->{$b}}
keys(%{$funcdata})))
{
print(INFO_HANDLE "FN:".$funcdata->{$func}.
",$func\n");
}
foreach $func (keys(%{$testfnccount})) {
print(INFO_HANDLE "FNDA:".
$testfnccount->{$func}.
",$func\n");
}
($f_found, $f_hit) =
get_func_found_and_hit($testfnccount);
print(INFO_HANDLE "FNF:$f_found\n");
print(INFO_HANDLE "FNH:$f_hit\n");
# Write branch related data
$br_found = 0;
$br_hit = 0;
foreach $line (sort({$a <=> $b}
keys(%{$testbrcount}))) {
my $brdata = $testbrcount->{$line};
foreach my $brentry (split(/:/, $brdata)) {
my ($block, $branch, $taken) =
split(/,/, $brentry);
print(INFO_HANDLE "BRDA:$line,$block,".
"$branch,$taken\n");
$br_found++;
$br_hit++ if ($taken ne '-' &&
$taken > 0);
}
}
if ($br_found > 0) {
print(INFO_HANDLE "BRF:$br_found\n");
print(INFO_HANDLE "BRH:$br_hit\n");
}
# Write line related data
foreach $line (sort({$a <=> $b} keys(%{$testcount})))
{
print(INFO_HANDLE "DA:$line,".
$testcount->{$line}.
(defined($checkdata->{$line}) &&
$checksum ?
",".$checkdata->{$line} : "")."\n");
$found++;
if ($testcount->{$line} > 0)
{
$hit++;
}
}
print(INFO_HANDLE "LF:$found\n");
print(INFO_HANDLE "LH:$hit\n");
print(INFO_HANDLE "end_of_record\n");
}
}
return ($ln_total_found, $ln_total_hit, $fn_total_found, $fn_total_hit,
$br_total_found, $br_total_hit);
}
#
# transform_pattern(pattern)
#
# Transform shell wildcard expression to equivalent Perl regular expression.
# Return transformed pattern.
#
sub transform_pattern($)
{
my $pattern = $_[0];
# Escape special chars
$pattern =~ s/\\/\\\\/g;
$pattern =~ s/\//\\\//g;
$pattern =~ s/\^/\\\^/g;
$pattern =~ s/\$/\\\$/g;
$pattern =~ s/\(/\\\(/g;
$pattern =~ s/\)/\\\)/g;
$pattern =~ s/\[/\\\[/g;
$pattern =~ s/\]/\\\]/g;
$pattern =~ s/\{/\\\{/g;
$pattern =~ s/\}/\\\}/g;
$pattern =~ s/\./\\\./g;
$pattern =~ s/\,/\\\,/g;
$pattern =~ s/\|/\\\|/g;
$pattern =~ s/\+/\\\+/g;
$pattern =~ s/\!/\\\!/g;
# Transform ? => (.) and * => (.*)
$pattern =~ s/\*/\(\.\*\)/g;
$pattern =~ s/\?/\(\.\)/g;
return $pattern;
}
#
# extract()
#
sub extract()
{
my $data = read_info_file($extract);
my $filename;
my $keep;
my $pattern;
my @pattern_list;
my $extracted = 0;
my @result;
local *INFO_HANDLE;
# Need perlreg expressions instead of shell pattern
@pattern_list = map({ transform_pattern($_); } @ARGV);
# Filter out files which do not match any pattern
foreach $filename (sort(keys(%{$data})))
{
$keep = 0;
foreach $pattern (@pattern_list)
{
$keep ||= ($filename =~ (/^$pattern$/));
}
if (!$keep)
{
delete($data->{$filename});
}
else
{
info("Extracting $filename\n"),
$extracted++;
}
}
# Write extracted data
if (!$data_stdout)
{
info("Extracted $extracted files\n");
info("Writing data to $output_filename\n");
open(INFO_HANDLE, ">", $output_filename)
or die("ERROR: cannot write to $output_filename!\n");
@result = write_info_file(*INFO_HANDLE, $data);
close(*INFO_HANDLE);
}
else
{
@result = write_info_file(*STDOUT, $data);
}
return @result;
}
#
# remove()
#
sub remove()
{
my $data = read_info_file($remove);
my $filename;
my $match_found;
my $pattern;
my @pattern_list;
my $removed = 0;
my @result;
local *INFO_HANDLE;
# Need perlreg expressions instead of shell pattern
@pattern_list = map({ transform_pattern($_); } @ARGV);
# Filter out files that match the pattern
foreach $filename (sort(keys(%{$data})))
{
$match_found = 0;
foreach $pattern (@pattern_list)
{
$match_found ||= ($filename =~ (/^$pattern$/));
}
if ($match_found)
{
delete($data->{$filename});
info("Removing $filename\n"),
$removed++;
}
}
# Write data
if (!$data_stdout)
{
info("Deleted $removed files\n");
info("Writing data to $output_filename\n");
open(INFO_HANDLE, ">", $output_filename)
or die("ERROR: cannot write to $output_filename!\n");
@result = write_info_file(*INFO_HANDLE, $data);
close(*INFO_HANDLE);
}
else
{
@result = write_info_file(*STDOUT, $data);
}
return @result;
}
# get_prefix(max_width, max_percentage_too_long, path_list)
#
# Return a path prefix that satisfies the following requirements:
# - is shared by more paths in path_list than any other prefix
# - the percentage of paths which would exceed the given max_width length
# after applying the prefix does not exceed max_percentage_too_long
#
# If multiple prefixes satisfy all requirements, the longest prefix is
# returned. Return an empty string if no prefix could be found.
sub get_prefix($$@)
{
my ($max_width, $max_long, @path_list) = @_;
my $path;
my $ENTRY_NUM = 0;
my $ENTRY_LONG = 1;
my %prefix;
# Build prefix hash
foreach $path (@path_list) {
my ($v, $d, $f) = splitpath($path);
my @dirs = splitdir($d);
my $p_len = length($path);
my $i;
# Remove trailing '/'
pop(@dirs) if ($dirs[scalar(@dirs) - 1] eq '');
for ($i = 0; $i < scalar(@dirs); $i++) {
my $subpath = catpath($v, catdir(@dirs[0..$i]), '');
my $entry = $prefix{$subpath};
$entry = [ 0, 0 ] if (!defined($entry));
$entry->[$ENTRY_NUM]++;
if (($p_len - length($subpath) - 1) > $max_width) {
$entry->[$ENTRY_LONG]++;
}
$prefix{$subpath} = $entry;
}
}
# Find suitable prefix (sort descending by two keys: 1. number of
# entries covered by a prefix, 2. length of prefix)
foreach $path (sort {($prefix{$a}->[$ENTRY_NUM] ==
$prefix{$b}->[$ENTRY_NUM]) ?
length($b) <=> length($a) :
$prefix{$b}->[$ENTRY_NUM] <=>
$prefix{$a}->[$ENTRY_NUM]}
keys(%prefix)) {
my ($num, $long) = @{$prefix{$path}};
# Check for additional requirement: number of filenames
# that would be too long may not exceed a certain percentage
if ($long <= $num * $max_long / 100) {
return $path;
}
}
return "";
}
#
# shorten_filename(filename, width)
#
# Truncate filename if it is longer than width characters.
#
sub shorten_filename($$)
{
my ($filename, $width) = @_;
my $l = length($filename);
my $s;
my $e;
return $filename if ($l <= $width);
$e = int(($width - 3) / 2);
$s = $width - 3 - $e;
return substr($filename, 0, $s).'...'.substr($filename, $l - $e);
}
sub shorten_number($$)
{
my ($number, $width) = @_;
my $result = sprintf("%*d", $width, $number);
return $result if (length($result) <= $width);
$number = $number / 1000;
return $result if (length($result) <= $width);
$result = sprintf("%*dk", $width - 1, $number);
return $result if (length($result) <= $width);
$number = $number / 1000;
$result = sprintf("%*dM", $width - 1, $number);
return $result if (length($result) <= $width);
return '#';
}
sub shorten_rate($$$)
{
my ($hit, $found, $width) = @_;
my $result = rate($hit, $found, "%", 1, $width);
return $result if (length($result) <= $width);
$result = rate($hit, $found, "%", 0, $width);
return $result if (length($result) <= $width);
return "#";
}
#
# list()
#
sub list()
{
my $data = read_info_file($list);
my $filename;
my $found;
my $hit;
my $entry;
my $fn_found;
my $fn_hit;
my $br_found;
my $br_hit;
my $total_found = 0;
my $total_hit = 0;
my $fn_total_found = 0;
my $fn_total_hit = 0;
my $br_total_found = 0;
my $br_total_hit = 0;
my $prefix;
my $strlen = length("Filename");
my $format;
my $heading1;
my $heading2;
my @footer;
my $barlen;
my $rate;
my $fnrate;
my $brrate;
my $lastpath;
my $F_LN_NUM = 0;
my $F_LN_RATE = 1;
my $F_FN_NUM = 2;
my $F_FN_RATE = 3;
my $F_BR_NUM = 4;
my $F_BR_RATE = 5;
my @fwidth_narrow = (5, 5, 3, 5, 4, 5);
my @fwidth_wide = (6, 5, 5, 5, 6, 5);
my @fwidth = @fwidth_wide;
my $w;
my $max_width = $opt_list_width;
my $max_long = $opt_list_truncate_max;
my $fwidth_narrow_length;
my $fwidth_wide_length;
my $got_prefix = 0;
my $root_prefix = 0;
# Calculate total width of narrow fields
$fwidth_narrow_length = 0;
foreach $w (@fwidth_narrow) {
$fwidth_narrow_length += $w + 1;
}
# Calculate total width of wide fields
$fwidth_wide_length = 0;
foreach $w (@fwidth_wide) {
$fwidth_wide_length += $w + 1;
}
# Get common file path prefix
$prefix = get_prefix($max_width - $fwidth_narrow_length, $max_long,
keys(%{$data}));
$root_prefix = 1 if ($prefix eq rootdir());
$got_prefix = 1 if (length($prefix) > 0);
$prefix =~ s/\/$//;
# Get longest filename length
foreach $filename (keys(%{$data})) {
if (!$opt_list_full_path) {
if (!$got_prefix || !$root_prefix &&
!($filename =~ s/^\Q$prefix\/\E//)) {
my ($v, $d, $f) = splitpath($filename);
$filename = $f;
}
}
# Determine maximum length of entries
if (length($filename) > $strlen) {
$strlen = length($filename)
}
}
if (!$opt_list_full_path) {
my $blanks;
$w = $fwidth_wide_length;
# Check if all columns fit into max_width characters
if ($strlen + $fwidth_wide_length > $max_width) {
# Use narrow fields
@fwidth = @fwidth_narrow;
$w = $fwidth_narrow_length;
if (($strlen + $fwidth_narrow_length) > $max_width) {
# Truncate filenames at max width
$strlen = $max_width - $fwidth_narrow_length;
}
}
# Add some blanks between filename and fields if possible
$blanks = int($strlen * 0.5);
$blanks = 4 if ($blanks < 4);
$blanks = 8 if ($blanks > 8);
if (($strlen + $w + $blanks) < $max_width) {
$strlen += $blanks;
} else {
$strlen = $max_width - $w;
}
}
# Filename
$w = $strlen;
$format = "%-${w}s|";
$heading1 = sprintf("%*s|", $w, "");
$heading2 = sprintf("%-*s|", $w, "Filename");
$barlen = $w + 1;
# Line coverage rate
$w = $fwidth[$F_LN_RATE];
$format .= "%${w}s ";
$heading1 .= sprintf("%-*s |", $w + $fwidth[$F_LN_NUM],
"Lines");
$heading2 .= sprintf("%-*s ", $w, "Rate");
$barlen += $w + 1;
# Number of lines
$w = $fwidth[$F_LN_NUM];
$format .= "%${w}s|";
$heading2 .= sprintf("%*s|", $w, "Num");
$barlen += $w + 1;
# Function coverage rate
$w = $fwidth[$F_FN_RATE];
$format .= "%${w}s ";
$heading1 .= sprintf("%-*s|", $w + $fwidth[$F_FN_NUM] + 1,
"Functions");
$heading2 .= sprintf("%-*s ", $w, "Rate");
$barlen += $w + 1;
# Number of functions
$w = $fwidth[$F_FN_NUM];
$format .= "%${w}s|";
$heading2 .= sprintf("%*s|", $w, "Num");
$barlen += $w + 1;
# Branch coverage rate
$w = $fwidth[$F_BR_RATE];
$format .= "%${w}s ";
$heading1 .= sprintf("%-*s", $w + $fwidth[$F_BR_NUM] + 1,
"Branches");
$heading2 .= sprintf("%-*s ", $w, "Rate");
$barlen += $w + 1;
# Number of branches
$w = $fwidth[$F_BR_NUM];
$format .= "%${w}s";
$heading2 .= sprintf("%*s", $w, "Num");
$barlen += $w;
# Line end
$format .= "\n";
$heading1 .= "\n";
$heading2 .= "\n";
# Print heading
print($heading1);
print($heading2);
print(("="x$barlen)."\n");
# Print per file information
foreach $filename (sort(keys(%{$data})))
{
my @file_data;
my $print_filename = $filename;
$entry = $data->{$filename};
if (!$opt_list_full_path) {
my $p;
$print_filename = $filename;
if (!$got_prefix || !$root_prefix &&
!($print_filename =~ s/^\Q$prefix\/\E//)) {
my ($v, $d, $f) = splitpath($filename);
$p = catpath($v, $d, "");
$p =~ s/\/$//;
$print_filename = $f;
} else {
$p = $prefix;
}
if (!defined($lastpath) || $lastpath ne $p) {
print("\n") if (defined($lastpath));
$lastpath = $p;
print("[$lastpath/]\n") if (!$root_prefix);
}
$print_filename = shorten_filename($print_filename,
$strlen);
}
(undef, undef, undef, undef, undef, undef, undef, undef,
$found, $hit, $fn_found, $fn_hit, $br_found, $br_hit) =
get_info_entry($entry);
# Assume zero count if there is no function data for this file
if (!defined($fn_found) || !defined($fn_hit)) {
$fn_found = 0;
$fn_hit = 0;
}
# Assume zero count if there is no branch data for this file
if (!defined($br_found) || !defined($br_hit)) {
$br_found = 0;
$br_hit = 0;
}
# Add line coverage totals
$total_found += $found;
$total_hit += $hit;
# Add function coverage totals
$fn_total_found += $fn_found;
$fn_total_hit += $fn_hit;
# Add branch coverage totals
$br_total_found += $br_found;
$br_total_hit += $br_hit;
# Determine line coverage rate for this file
$rate = shorten_rate($hit, $found, $fwidth[$F_LN_RATE]);
# Determine function coverage rate for this file
$fnrate = shorten_rate($fn_hit, $fn_found, $fwidth[$F_FN_RATE]);
# Determine branch coverage rate for this file
$brrate = shorten_rate($br_hit, $br_found, $fwidth[$F_BR_RATE]);
# Assemble line parameters
push(@file_data, $print_filename);
push(@file_data, $rate);
push(@file_data, shorten_number($found, $fwidth[$F_LN_NUM]));
push(@file_data, $fnrate);
push(@file_data, shorten_number($fn_found, $fwidth[$F_FN_NUM]));
push(@file_data, $brrate);
push(@file_data, shorten_number($br_found, $fwidth[$F_BR_NUM]));
# Print assembled line
printf($format, @file_data);
}
# Determine total line coverage rate
$rate = shorten_rate($total_hit, $total_found, $fwidth[$F_LN_RATE]);
# Determine total function coverage rate
$fnrate = shorten_rate($fn_total_hit, $fn_total_found,
$fwidth[$F_FN_RATE]);
# Determine total branch coverage rate
$brrate = shorten_rate($br_total_hit, $br_total_found,
$fwidth[$F_BR_RATE]);
# Print separator
print(("="x$barlen)."\n");
# Assemble line parameters
push(@footer, sprintf("%*s", $strlen, "Total:"));
push(@footer, $rate);
push(@footer, shorten_number($total_found, $fwidth[$F_LN_NUM]));
push(@footer, $fnrate);
push(@footer, shorten_number($fn_total_found, $fwidth[$F_FN_NUM]));
push(@footer, $brrate);
push(@footer, shorten_number($br_total_found, $fwidth[$F_BR_NUM]));
# Print assembled line
printf($format, @footer);
}
#
# get_common_filename(filename1, filename2)
#
# Check for filename components which are common to FILENAME1 and FILENAME2.
# Upon success, return
#
# (common, path1, path2)
#
# or 'undef' in case there are no such parts.
#
sub get_common_filename($$)
{
my @list1 = split("/", $_[0]);
my @list2 = split("/", $_[1]);
my @result;
# Work in reverse order, i.e. beginning with the filename itself
while (@list1 && @list2 && ($list1[$#list1] eq $list2[$#list2]))
{
unshift(@result, pop(@list1));
pop(@list2);
}
# Did we find any similarities?
if (scalar(@result) > 0)
{
return (join("/", @result), join("/", @list1),
join("/", @list2));
}
else
{
return undef;
}
}
#
# strip_directories($path, $depth)
#
# Remove DEPTH leading directory levels from PATH.
#
sub strip_directories($$)
{
my $filename = $_[0];
my $depth = $_[1];
my $i;
if (!defined($depth) || ($depth < 1))
{
return $filename;
}
for ($i = 0; $i < $depth; $i++)
{
$filename =~ s/^[^\/]*\/+(.*)$/$1/;
}
return $filename;
}
#
# read_diff(filename)
#
# Read diff output from FILENAME to memory. The diff file has to follow the
# format generated by 'diff -u'. Returns a list of hash references:
#
# (mapping, path mapping)
#
# mapping: filename -> reference to line hash
# line hash: line number in new file -> corresponding line number in old file
#
# path mapping: filename -> old filename
#
# Die in case of error.
#
sub read_diff($)
{
my $diff_file = $_[0]; # Name of diff file
my %diff; # Resulting mapping filename -> line hash
my %paths; # Resulting mapping old path -> new path
my $mapping; # Reference to current line hash
my $line; # Contents of current line
my $num_old; # Current line number in old file
my $num_new; # Current line number in new file
my $file_old; # Name of old file in diff section
my $file_new; # Name of new file in diff section
my $filename; # Name of common filename of diff section
my $in_block = 0; # Non-zero while we are inside a diff block
local *HANDLE; # File handle for reading the diff file
info("Reading diff $diff_file\n");
# Check if file exists and is readable
stat($diff_file);
if (!(-r _))
{
die("ERROR: cannot read file $diff_file!\n");
}
# Check if this is really a plain file
if (!(-f _))
{
die("ERROR: not a plain file: $diff_file!\n");
}
# Check for .gz extension
if ($diff_file =~ /\.gz$/)
{
# Check for availability of GZIP tool
system_no_output(1, "gunzip", "-h")
and die("ERROR: gunzip command not available!\n");
# Check integrity of compressed file
system_no_output(1, "gunzip", "-t", $diff_file)
and die("ERROR: integrity check failed for ".
"compressed file $diff_file!\n");
# Open compressed file
open(HANDLE, "-|", "gunzip -c '$diff_file'")
or die("ERROR: cannot start gunzip to decompress ".
"file $_[0]!\n");
}
else
{
# Open decompressed file
open(HANDLE, "<", $diff_file)
or die("ERROR: cannot read file $_[0]!\n");
}
# Parse diff file line by line
while (<HANDLE>)
{
chomp($_);
$line = $_;
foreach ($line)
{
# Filename of old file:
# --- <filename> <date>
/^--- (\S+)/ && do
{
$file_old = strip_directories($1, $strip);
last;
};
# Filename of new file:
# +++ <filename> <date>
/^\+\+\+ (\S+)/ && do
{
# Add last file to resulting hash
if ($filename)
{
my %new_hash;
$diff{$filename} = $mapping;
$mapping = \%new_hash;
}
$file_new = strip_directories($1, $strip);
$filename = $file_old;
$paths{$filename} = $file_new;
$num_old = 1;
$num_new = 1;
last;
};
# Start of diff block:
# @@ -old_start,old_num, +new_start,new_num @@
/^\@\@\s+-(\d+),(\d+)\s+\+(\d+),(\d+)\s+\@\@$/ && do
{
$in_block = 1;
while ($num_old < $1)
{
$mapping->{$num_new} = $num_old;
$num_old++;
$num_new++;
}
last;
};
# Unchanged line
# <line starts with blank>
/^ / && do
{
if ($in_block == 0)
{
last;
}
$mapping->{$num_new} = $num_old;
$num_old++;
$num_new++;
last;
};
# Line as seen in old file
# <line starts with '-'>
/^-/ && do
{
if ($in_block == 0)
{
last;
}
$num_old++;
last;
};
# Line as seen in new file
# <line starts with '+'>
/^\+/ && do
{
if ($in_block == 0)
{
last;
}
$num_new++;
last;
};
# Empty line
/^$/ && do
{
if ($in_block == 0)
{
last;
}
$mapping->{$num_new} = $num_old;
$num_old++;
$num_new++;
last;
};
}
}
close(HANDLE);
# Add final diff file section to resulting hash
if ($filename)
{
$diff{$filename} = $mapping;
}
if (!%diff)
{
die("ERROR: no valid diff data found in $diff_file!\n".
"Make sure to use 'diff -u' when generating the diff ".
"file.\n");
}
return (\%diff, \%paths);
}
#
# apply_diff($count_data, $line_hash)
#
# Transform count data using a mapping of lines:
#
# $count_data: reference to hash: line number -> data
# $line_hash: reference to hash: line number new -> line number old
#
# Return a reference to transformed count data.
#
sub apply_diff($$)
{
my $count_data = $_[0]; # Reference to data hash: line -> hash
my $line_hash = $_[1]; # Reference to line hash: new line -> old line
my %result; # Resulting hash
my $last_new = 0; # Last new line number found in line hash
my $last_old = 0; # Last old line number found in line hash
# Iterate all new line numbers found in the diff
foreach (sort({$a <=> $b} keys(%{$line_hash})))
{
$last_new = $_;
$last_old = $line_hash->{$last_new};
# Is there data associated with the corresponding old line?
if (defined($count_data->{$line_hash->{$_}}))
{
# Copy data to new hash with a new line number
$result{$_} = $count_data->{$line_hash->{$_}};
}
}
# Transform all other lines which come after the last diff entry
foreach (sort({$a <=> $b} keys(%{$count_data})))
{
if ($_ <= $last_old)
{
# Skip lines which were covered by line hash
next;
}
# Copy data to new hash with an offset
$result{$_ + ($last_new - $last_old)} = $count_data->{$_};
}
return \%result;
}
#
# apply_diff_to_brcount(brcount, linedata)
#
# Adjust line numbers of branch coverage data according to linedata.
#
sub apply_diff_to_brcount($$)
{
my ($brcount, $linedata) = @_;
my $db;
# Convert brcount to db format
$db = brcount_to_db($brcount);
# Apply diff to db format
$db = apply_diff($db, $linedata);
# Convert db format back to brcount format
($brcount) = db_to_brcount($db);
return $brcount;
}
#
# get_hash_max(hash_ref)
#
# Return the highest integer key from hash.
#
sub get_hash_max($)
{
my ($hash) = @_;
my $max;
foreach (keys(%{$hash})) {
if (!defined($max)) {
$max = $_;
} elsif ($hash->{$_} > $max) {
$max = $_;
}
}
return $max;
}
sub get_hash_reverse($)
{
my ($hash) = @_;
my %result;
foreach (keys(%{$hash})) {
$result{$hash->{$_}} = $_;
}
return \%result;
}
#
# apply_diff_to_funcdata(funcdata, line_hash)
#
sub apply_diff_to_funcdata($$)
{
my ($funcdata, $linedata) = @_;
my $last_new = get_hash_max($linedata);
my $last_old = $linedata->{$last_new};
my $func;
my %result;
my $line_diff = get_hash_reverse($linedata);
foreach $func (keys(%{$funcdata})) {
my $line = $funcdata->{$func};
if (defined($line_diff->{$line})) {
$result{$func} = $line_diff->{$line};
} elsif ($line > $last_old) {
$result{$func} = $line + $last_new - $last_old;
}
}
return \%result;
}
#
# get_line_hash($filename, $diff_data, $path_data)
#
# Find line hash in DIFF_DATA which matches FILENAME. On success, return list
# line hash. or undef in case of no match. Die if more than one line hashes in
# DIFF_DATA match.
#
sub get_line_hash($$$)
{
my $filename = $_[0];
my $diff_data = $_[1];
my $path_data = $_[2];
my $conversion;
my $old_path;
my $new_path;
my $diff_name;
my $common;
my $old_depth;
my $new_depth;
# Remove trailing slash from diff path
$diff_path =~ s/\/$//;
foreach (keys(%{$diff_data}))
{
my $sep = "";
$sep = '/' if (!/^\//);
# Try to match diff filename with filename
if ($filename =~ /^\Q$diff_path$sep$_\E$/)
{
if ($diff_name)
{
# Two files match, choose the more specific one
# (the one with more path components)
$old_depth = ($diff_name =~ tr/\///);
$new_depth = (tr/\///);
if ($old_depth == $new_depth)
{
die("ERROR: diff file contains ".
"ambiguous entries for ".
"$filename\n");
}
elsif ($new_depth > $old_depth)
{
$diff_name = $_;
}
}
else
{
$diff_name = $_;
}
};
}
if ($diff_name)
{
# Get converted path
if ($filename =~ /^(.*)$diff_name$/)
{
($common, $old_path, $new_path) =
get_common_filename($filename,
$1.$path_data->{$diff_name});
}
return ($diff_data->{$diff_name}, $old_path, $new_path);
}
else
{
return undef;
}
}
#
# convert_paths(trace_data, path_conversion_data)
#
# Rename all paths in TRACE_DATA which show up in PATH_CONVERSION_DATA.
#
sub convert_paths($$)
{
my $trace_data = $_[0];
my $path_conversion_data = $_[1];
my $filename;
my $new_path;
if (scalar(keys(%{$path_conversion_data})) == 0)
{
info("No path conversion data available.\n");
return;
}
# Expand path conversion list
foreach $filename (keys(%{$path_conversion_data}))
{
$new_path = $path_conversion_data->{$filename};
while (($filename =~ s/^(.*)\/[^\/]+$/$1/) &&
($new_path =~ s/^(.*)\/[^\/]+$/$1/) &&
($filename ne $new_path))
{
$path_conversion_data->{$filename} = $new_path;
}
}
# Adjust paths
FILENAME: foreach $filename (keys(%{$trace_data}))
{
# Find a path in our conversion table that matches, starting
# with the longest path
foreach (sort({length($b) <=> length($a)}
keys(%{$path_conversion_data})))
{
# Is this path a prefix of our filename?
if (!($filename =~ /^$_(.*)$/))
{
next;
}
$new_path = $path_conversion_data->{$_}.$1;
# Make sure not to overwrite an existing entry under
# that path name
if ($trace_data->{$new_path})
{
# Need to combine entries
$trace_data->{$new_path} =
combine_info_entries(
$trace_data->{$filename},
$trace_data->{$new_path},
$filename);
}
else
{
# Simply rename entry
$trace_data->{$new_path} =
$trace_data->{$filename};
}
delete($trace_data->{$filename});
next FILENAME;
}
info("No conversion available for filename $filename\n");
}
}
#
# sub adjust_fncdata(funcdata, testfncdata, sumfnccount)
#
# Remove function call count data from testfncdata and sumfnccount which
# is no longer present in funcdata.
#
sub adjust_fncdata($$$)
{
my ($funcdata, $testfncdata, $sumfnccount) = @_;
my $testname;
my $func;
my $f_found;
my $f_hit;
# Remove count data in testfncdata for functions which are no longer
# in funcdata
foreach $testname (keys(%{$testfncdata})) {
my $fnccount = $testfncdata->{$testname};
foreach $func (keys(%{$fnccount})) {
if (!defined($funcdata->{$func})) {
delete($fnccount->{$func});
}
}
}
# Remove count data in sumfnccount for functions which are no longer
# in funcdata
foreach $func (keys(%{$sumfnccount})) {
if (!defined($funcdata->{$func})) {
delete($sumfnccount->{$func});
}
}
}
#
# get_func_found_and_hit(sumfnccount)
#
# Return (f_found, f_hit) for sumfnccount
#
sub get_func_found_and_hit($)
{
my ($sumfnccount) = @_;
my $function;
my $f_found;
my $f_hit;
$f_found = scalar(keys(%{$sumfnccount}));
$f_hit = 0;
foreach $function (keys(%{$sumfnccount})) {
if ($sumfnccount->{$function} > 0) {
$f_hit++;
}
}
return ($f_found, $f_hit);
}
#
# diff()
#
sub diff()
{
my $trace_data = read_info_file($diff);
my $diff_data;
my $path_data;
my $old_path;
my $new_path;
my %path_conversion_data;
my $filename;
my $line_hash;
my $new_name;
my $entry;
my $testdata;
my $testname;
my $sumcount;
my $funcdata;
my $checkdata;
my $testfncdata;
my $sumfnccount;
my $testbrdata;
my $sumbrcount;
my $found;
my $hit;
my $f_found;
my $f_hit;
my $br_found;
my $br_hit;
my $converted = 0;
my $unchanged = 0;
my @result;
local *INFO_HANDLE;
($diff_data, $path_data) = read_diff($ARGV[0]);
foreach $filename (sort(keys(%{$trace_data})))
{
# Find a diff section corresponding to this file
($line_hash, $old_path, $new_path) =
get_line_hash($filename, $diff_data, $path_data);
if (!$line_hash)
{
# There's no diff section for this file
$unchanged++;
next;
}
$converted++;
if ($old_path && $new_path && ($old_path ne $new_path))
{
$path_conversion_data{$old_path} = $new_path;
}
# Check for deleted files
if (scalar(keys(%{$line_hash})) == 0)
{
info("Removing $filename\n");
delete($trace_data->{$filename});
next;
}
info("Converting $filename\n");
$entry = $trace_data->{$filename};
($testdata, $sumcount, $funcdata, $checkdata, $testfncdata,
$sumfnccount, $testbrdata, $sumbrcount) =
get_info_entry($entry);
# Convert test data
foreach $testname (keys(%{$testdata}))
{
# Adjust line numbers of line coverage data
$testdata->{$testname} =
apply_diff($testdata->{$testname}, $line_hash);
# Adjust line numbers of branch coverage data
$testbrdata->{$testname} =
apply_diff_to_brcount($testbrdata->{$testname},
$line_hash);
# Remove empty sets of test data
if (scalar(keys(%{$testdata->{$testname}})) == 0)
{
delete($testdata->{$testname});
delete($testfncdata->{$testname});
delete($testbrdata->{$testname});
}
}
# Rename test data to indicate conversion
foreach $testname (keys(%{$testdata}))
{
# Skip testnames which already contain an extension
if ($testname =~ /,[^,]+$/)
{
next;
}
# Check for name conflict
if (defined($testdata->{$testname.",diff"}))
{
# Add counts
($testdata->{$testname}) = add_counts(
$testdata->{$testname},
$testdata->{$testname.",diff"});
delete($testdata->{$testname.",diff"});
# Add function call counts
($testfncdata->{$testname}) = add_fnccount(
$testfncdata->{$testname},
$testfncdata->{$testname.",diff"});
delete($testfncdata->{$testname.",diff"});
# Add branch counts
combine_brcount(
$testbrdata->{$testname},
$testbrdata->{$testname.",diff"},
$BR_ADD, 1);
delete($testbrdata->{$testname.",diff"});
}
# Move test data to new testname
$testdata->{$testname.",diff"} = $testdata->{$testname};
delete($testdata->{$testname});
# Move function call count data to new testname
$testfncdata->{$testname.",diff"} =
$testfncdata->{$testname};
delete($testfncdata->{$testname});
# Move branch count data to new testname
$testbrdata->{$testname.",diff"} =
$testbrdata->{$testname};
delete($testbrdata->{$testname});
}
# Convert summary of test data
$sumcount = apply_diff($sumcount, $line_hash);
# Convert function data
$funcdata = apply_diff_to_funcdata($funcdata, $line_hash);
# Convert branch coverage data
$sumbrcount = apply_diff_to_brcount($sumbrcount, $line_hash);
# Update found/hit numbers
# Convert checksum data
$checkdata = apply_diff($checkdata, $line_hash);
# Convert function call count data
adjust_fncdata($funcdata, $testfncdata, $sumfnccount);
($f_found, $f_hit) = get_func_found_and_hit($sumfnccount);
($br_found, $br_hit) = get_br_found_and_hit($sumbrcount);
# Update found/hit numbers
$found = 0;
$hit = 0;
foreach (keys(%{$sumcount}))
{
$found++;
if ($sumcount->{$_} > 0)
{
$hit++;
}
}
if ($found > 0)
{
# Store converted entry
set_info_entry($entry, $testdata, $sumcount, $funcdata,
$checkdata, $testfncdata, $sumfnccount,
$testbrdata, $sumbrcount, $found, $hit,
$f_found, $f_hit, $br_found, $br_hit);
}
else
{
# Remove empty data set
delete($trace_data->{$filename});
}
}
# Convert filenames as well if requested
if ($convert_filenames)
{
convert_paths($trace_data, \%path_conversion_data);
}
info("$converted entr".($converted != 1 ? "ies" : "y")." converted, ".
"$unchanged entr".($unchanged != 1 ? "ies" : "y")." left ".
"unchanged.\n");
# Write data
if (!$data_stdout)
{
info("Writing data to $output_filename\n");
open(INFO_HANDLE, ">", $output_filename)
or die("ERROR: cannot write to $output_filename!\n");
@result = write_info_file(*INFO_HANDLE, $trace_data);
close(*INFO_HANDLE);
}
else
{
@result = write_info_file(*STDOUT, $trace_data);
}
return @result;
}
#
# summary()
#
sub summary()
{
my $filename;
my $current;
my $total;
my $ln_total_found;
my $ln_total_hit;
my $fn_total_found;
my $fn_total_hit;
my $br_total_found;
my $br_total_hit;
# Read and combine trace files
foreach $filename (@opt_summary) {
$current = read_info_file($filename);
if (!defined($total)) {
$total = $current;
} else {
$total = combine_info_files($total, $current);
}
}
# Calculate coverage data
foreach $filename (keys(%{$total}))
{
my $entry = $total->{$filename};
my $ln_found;
my $ln_hit;
my $fn_found;
my $fn_hit;
my $br_found;
my $br_hit;
(undef, undef, undef, undef, undef, undef, undef, undef,
$ln_found, $ln_hit, $fn_found, $fn_hit, $br_found,
$br_hit) = get_info_entry($entry);
# Add to totals
$ln_total_found += $ln_found;
$ln_total_hit += $ln_hit;
$fn_total_found += $fn_found;
$fn_total_hit += $fn_hit;
$br_total_found += $br_found;
$br_total_hit += $br_hit;
}
return ($ln_total_found, $ln_total_hit, $fn_total_found, $fn_total_hit,
$br_total_found, $br_total_hit);
}
#
# system_no_output(mode, parameters)
#
# Call an external program using PARAMETERS while suppressing depending on
# the value of MODE:
#
# MODE & 1: suppress STDOUT
# MODE & 2: suppress STDERR
#
# Return 0 on success, non-zero otherwise.
#
sub system_no_output($@)
{
my $mode = shift;
my $result;
local *OLD_STDERR;
local *OLD_STDOUT;
# Save old stdout and stderr handles
($mode & 1) && open(OLD_STDOUT, ">>&", "STDOUT");
($mode & 2) && open(OLD_STDERR, ">>&", "STDERR");
# Redirect to /dev/null
($mode & 1) && open(STDOUT, ">", "/dev/null");
($mode & 2) && open(STDERR, ">", "/dev/null");
system(@_);
$result = $?;
# Close redirected handles
($mode & 1) && close(STDOUT);
($mode & 2) && close(STDERR);
# Restore old handles
($mode & 1) && open(STDOUT, ">>&", "OLD_STDOUT");
($mode & 2) && open(STDERR, ">>&", "OLD_STDERR");
return $result;
}
#
# read_config(filename)
#
# Read configuration file FILENAME and return a reference to a hash containing
# all valid key=value pairs found.
#
sub read_config($)
{
my $filename = $_[0];
my %result;
my $key;
my $value;
local *HANDLE;
if (!open(HANDLE, "<", $filename))
{
warn("WARNING: cannot read configuration file $filename\n");
return undef;
}
while (<HANDLE>)
{
chomp;
# Skip comments
s/#.*//;
# Remove leading blanks
s/^\s+//;
# Remove trailing blanks
s/\s+$//;
next unless length;
($key, $value) = split(/\s*=\s*/, $_, 2);
if (defined($key) && defined($value))
{
$result{$key} = $value;
}
else
{
warn("WARNING: malformed statement in line $. ".
"of configuration file $filename\n");
}
}
close(HANDLE);
return \%result;
}
#
# apply_config(REF)
#
# REF is a reference to a hash containing the following mapping:
#
# key_string => var_ref
#
# where KEY_STRING is a keyword and VAR_REF is a reference to an associated
# variable. If the global configuration hashes CONFIG or OPT_RC contain a value
# for keyword KEY_STRING, VAR_REF will be assigned the value for that keyword.
#
sub apply_config($)
{
my $ref = $_[0];
foreach (keys(%{$ref}))
{
if (defined($opt_rc{$_})) {
${$ref->{$_}} = $opt_rc{$_};
} elsif (defined($config->{$_})) {
${$ref->{$_}} = $config->{$_};
}
}
}
sub warn_handler($)
{
my ($msg) = @_;
warn("$tool_name: $msg");
}
sub die_handler($)
{
my ($msg) = @_;
temp_cleanup();
die("$tool_name: $msg");
}
sub abort_handler($)
{
temp_cleanup();
exit(1);
}
sub temp_cleanup()
{
# Ensure temp directory is not in use by current process
chdir("/");
if (@temp_dirs) {
info("Removing temporary directories.\n");
foreach (@temp_dirs) {
rmtree($_);
}
@temp_dirs = ();
}
}
sub setup_gkv_sys()
{
system_no_output(3, "mount", "-t", "debugfs", "nodev",
"/sys/kernel/debug");
}
sub setup_gkv_proc()
{
if (system_no_output(3, "modprobe", "gcov_proc")) {
system_no_output(3, "modprobe", "gcov_prof");
}
}
sub check_gkv_sys($)
{
my ($dir) = @_;
if (-e "$dir/reset") {
return 1;
}
return 0;
}
sub check_gkv_proc($)
{
my ($dir) = @_;
if (-e "$dir/vmlinux") {
return 1;
}
return 0;
}
sub setup_gkv()
{
my $dir;
my $sys_dir = "/sys/kernel/debug/gcov";
my $proc_dir = "/proc/gcov";
my @todo;
if (!defined($gcov_dir)) {
info("Auto-detecting gcov kernel support.\n");
@todo = ( "cs", "cp", "ss", "cs", "sp", "cp" );
} elsif ($gcov_dir =~ /proc/) {
info("Checking gcov kernel support at $gcov_dir ".
"(user-specified).\n");
@todo = ( "cp", "sp", "cp", "cs", "ss", "cs");
} else {
info("Checking gcov kernel support at $gcov_dir ".
"(user-specified).\n");
@todo = ( "cs", "ss", "cs", "cp", "sp", "cp", );
}
foreach (@todo) {
if ($_ eq "cs") {
# Check /sys
$dir = defined($gcov_dir) ? $gcov_dir : $sys_dir;
if (check_gkv_sys($dir)) {
info("Found ".$GKV_NAME[$GKV_SYS]." gcov ".
"kernel support at $dir\n");
return ($GKV_SYS, $dir);
}
} elsif ($_ eq "cp") {
# Check /proc
$dir = defined($gcov_dir) ? $gcov_dir : $proc_dir;
if (check_gkv_proc($dir)) {
info("Found ".$GKV_NAME[$GKV_PROC]." gcov ".
"kernel support at $dir\n");
return ($GKV_PROC, $dir);
}
} elsif ($_ eq "ss") {
# Setup /sys
setup_gkv_sys();
} elsif ($_ eq "sp") {
# Setup /proc
setup_gkv_proc();
}
}
if (defined($gcov_dir)) {
die("ERROR: could not find gcov kernel data at $gcov_dir\n");
} else {
die("ERROR: no gcov kernel data found\n");
}
}
#
# get_overall_line(found, hit, name_singular, name_plural)
#
# Return a string containing overall information for the specified
# found/hit data.
#
sub get_overall_line($$$$)
{
my ($found, $hit, $name_sn, $name_pl) = @_;
my $name;
return "no data found" if (!defined($found) || $found == 0);
$name = ($found == 1) ? $name_sn : $name_pl;
return rate($hit, $found, "% ($hit of $found $name)");
}
#
# print_overall_rate(ln_do, ln_found, ln_hit, fn_do, fn_found, fn_hit, br_do
# br_found, br_hit)
#
# Print overall coverage rates for the specified coverage types.
#
sub print_overall_rate($$$$$$$$$)
{
my ($ln_do, $ln_found, $ln_hit, $fn_do, $fn_found, $fn_hit,
$br_do, $br_found, $br_hit) = @_;
info("Summary coverage rate:\n");
info(" lines......: %s\n",
get_overall_line($ln_found, $ln_hit, "line", "lines"))
if ($ln_do);
info(" functions..: %s\n",
get_overall_line($fn_found, $fn_hit, "function", "functions"))
if ($fn_do);
info(" branches...: %s\n",
get_overall_line($br_found, $br_hit, "branch", "branches"))
if ($br_do);
}
#
# rate(hit, found[, suffix, precision, width])
#
# Return the coverage rate [0..100] for HIT and FOUND values. 0 is only
# returned when HIT is 0. 100 is only returned when HIT equals FOUND.
# PRECISION specifies the precision of the result. SUFFIX defines a
# string that is appended to the result if FOUND is non-zero. Spaces
# are added to the start of the resulting string until it is at least WIDTH
# characters wide.
#
sub rate($$;$$$)
{
my ($hit, $found, $suffix, $precision, $width) = @_;
my $rate;
# Assign defaults if necessary
$precision = 1 if (!defined($precision));
$suffix = "" if (!defined($suffix));
$width = 0 if (!defined($width));
return sprintf("%*s", $width, "-") if (!defined($found) || $found == 0);
$rate = sprintf("%.*f", $precision, $hit * 100 / $found);
# Adjust rates if necessary
if ($rate == 0 && $hit > 0) {
$rate = sprintf("%.*f", $precision, 1 / 10 ** $precision);
} elsif ($rate == 100 && $hit != $found) {
$rate = sprintf("%.*f", $precision, 100 - 1 / 10 ** $precision);
}
return sprintf("%*s", $width, $rate.$suffix);
}
#!/usr/bin/env perl
use strict;
use warnings;
use File::Basename;
sub update_man_page($);
sub update_bin_tool($);
sub update_txt_file($);
sub update_spec_file($);
sub write_version_file($);
sub get_file_info($);
our $directory = $ARGV[0];
our $version = $ARGV[1];
our $release = $ARGV[2];
our $full = $ARGV[3];
our @man_pages = ("man/gendesc.1", "man/genhtml.1", "man/geninfo.1",
"man/genpng.1", "man/lcov.1", "man/lcovrc.5");
our @bin_tools = ("bin/gendesc", "bin/genhtml", "bin/geninfo",
"bin/genpng", "bin/lcov");
our @txt_files = ("README");
our @spec_files = ("rpm/lcov.spec");
if (!defined($directory) || !defined($version) || !defined($release)) {
die("Usage: $0 DIRECTORY|FILE VERSION RELEASE FULL_VERSION\n");
}
# Determine mode of operation
if (-f $directory) {
my $file = $directory;
my $base = basename($file);
if (grep(/^$base$/, map({ basename($_) } @man_pages))) {
print("Updating man page $file\n");
update_man_page($file);
} elsif (grep(/^$base$/, map({ basename($_) } @bin_tools))) {
print("Updating bin tool $file\n");
update_bin_tool($file);
} elsif (grep(/^$base$/, map({ basename($_) } @txt_files))) {
print("Updating text file $file\n");
update_txt_file($file);
} elsif (grep(/^$base$/, map({ basename($_) } @spec_files))) {
print("Updating spec file $file\n");
update_spec_file($file);
} elsif ($base eq ".version") {
print("Updating version file $file\n");
write_version_file($file);
} else {
print("WARNING: Skipping unknown file $file\n");
}
print("Done.\n");
exit(0);
}
foreach (@man_pages) {
print("Updating man page $_\n");
update_man_page($directory."/".$_);
}
foreach (@bin_tools) {
print("Updating bin tool $_\n");
update_bin_tool($directory."/".$_);
}
foreach (@txt_files) {
print("Updating text file $_\n");
update_txt_file($directory."/".$_);
}
foreach (@spec_files) {
print("Updating spec file $_\n");
update_spec_file($directory."/".$_);
}
print("Updating version file $directory/.version\n");
write_version_file("$directory/.version");
print("Done.\n");
sub get_file_info($)
{
my ($filename) = @_;
my ($sec, $min, $hour, $year, $month, $day);
my @stat;
my $gittime;
return (0, 0, 0) if (!-e $filename);
@stat = stat($filename);
($sec, $min, $hour, $day, $month, $year) = gmtime($stat[9]);
$year += 1900;
$month += 1;
return (sprintf("%04d-%02d-%02d", $year, $month, $day),
sprintf("%04d%02d%02d%02d%02d.%02d", $year, $month, $day,
$hour, $min, $sec),
sprintf("%o", $stat[2] & 07777));
}
sub update_man_page($)
{
my ($filename) = @_;
my @date = get_file_info($filename);
my $date_string = $date[0];
local *IN;
local *OUT;
$date_string =~ s/-/\\-/g;
open(IN, "<$filename") || die ("Error: cannot open $filename\n");
open(OUT, ">$filename.new") ||
die("Error: cannot create $filename.new\n");
while (<IN>) {
s/\"LCOV\s+\d+\.\d+\"/\"LCOV $version\"/g;
s/\d\d\d\d\\\-\d\d\\\-\d\d/$date_string/g;
print(OUT $_);
}
close(OUT);
close(IN);
chmod(oct($date[2]), "$filename.new");
system("mv", "-f", "$filename.new", "$filename");
system("touch", "$filename", "-t", $date[1]);
}
sub update_bin_tool($)
{
my ($filename) = @_;
my @date = get_file_info($filename);
local *IN;
local *OUT;
open(IN, "<$filename") || die ("Error: cannot open $filename\n");
open(OUT, ">$filename.new") ||
die("Error: cannot create $filename.new\n");
while (<IN>) {
s/^(our\s+\$lcov_version\s*=).*$/$1 "LCOV version $full";/g;
print(OUT $_);
}
close(OUT);
close(IN);
chmod(oct($date[2]), "$filename.new");
system("mv", "-f", "$filename.new", "$filename");
system("touch", "$filename", "-t", $date[1]);
}
sub update_txt_file($)
{
my ($filename) = @_;
my @date = get_file_info($filename);
local *IN;
local *OUT;
open(IN, "<$filename") || die ("Error: cannot open $filename\n");
open(OUT, ">$filename.new") ||
die("Error: cannot create $filename.new\n");
while (<IN>) {
s/(Last\s+changes:\s+)\d\d\d\d-\d\d-\d\d/$1$date[0]/g;
print(OUT $_);
}
close(OUT);
close(IN);
chmod(oct($date[2]), "$filename.new");
system("mv", "-f", "$filename.new", "$filename");
system("touch", "$filename", "-t", $date[1]);
}
sub update_spec_file($)
{
my ($filename) = @_;
my @date = get_file_info($filename);
local *IN;
local *OUT;
open(IN, "<$filename") || die ("Error: cannot open $filename\n");
open(OUT, ">$filename.new") ||
die("Error: cannot create $filename.new\n");
while (<IN>) {
s/^(Version:\s*)\d+\.\d+.*$/$1$version/;
s/^(Release:\s*).*$/$1$release/;
print(OUT $_);
}
close(OUT);
close(IN);
system("mv", "-f", "$filename.new", "$filename");
system("touch", "$filename", "-t", $date[1]);
}
sub write_version_file($)
{
my ($filename) = @_;
my $fd;
open($fd, ">", $filename) or die("Error: cannot write $filename: $!\n");
print($fd "VERSION=$version\n");
print($fd "RELEASE=$release\n");
print($fd "FULL=$full\n");
close($fd);
}
#
# Makefile for the LCOV example program.
#
# Make targets:
# - example: compile the example program
# - output: run test cases on example program and create HTML output
# - clean: clean up directory
#
CC := gcc
CFLAGS := -Wall -I. -fprofile-arcs -ftest-coverage
LCOV := ../bin/lcov
GENHTML := ../bin/genhtml
GENDESC := ../bin/gendesc
GENPNG := ../bin/genpng
# Depending on the presence of the GD.pm perl module, we can use the
# special option '--frames' for genhtml
USE_GENPNG := $(shell $(GENPNG) --help >/dev/null 2>/dev/null; echo $$?)
ifeq ($(USE_GENPNG),0)
FRAMES := --frames
else
FRAMES :=
endif
.PHONY: clean output test_noargs test_2_to_2000 test_overflow
all: output
example: example.o iterate.o gauss.o
$(CC) example.o iterate.o gauss.o -o example -lgcov
example.o: example.c iterate.h gauss.h
$(CC) $(CFLAGS) -c example.c -o example.o
iterate.o: methods/iterate.c iterate.h
$(CC) $(CFLAGS) -c methods/iterate.c -o iterate.o
gauss.o: methods/gauss.c gauss.h
$(CC) $(CFLAGS) -c methods/gauss.c -o gauss.o
output: example descriptions test_noargs test_2_to_2000 test_overflow
@echo
@echo '*'
@echo '* Generating HTML output'
@echo '*'
@echo
$(GENHTML) trace_noargs.info trace_args.info trace_overflow.info \
--output-directory output --title "Basic example" \
--show-details --description-file descriptions $(FRAMES) \
--legend
@echo
@echo '*'
@echo '* See '`pwd`/output/index.html
@echo '*'
@echo
descriptions: descriptions.txt
$(GENDESC) descriptions.txt -o descriptions
all_tests: example test_noargs test_2_to_2000 test_overflow
test_noargs:
@echo
@echo '*'
@echo '* Test case 1: running ./example without parameters'
@echo '*'
@echo
$(LCOV) --zerocounters --directory .
./example
$(LCOV) --capture --directory . --output-file trace_noargs.info --test-name test_noargs --no-external
test_2_to_2000:
@echo
@echo '*'
@echo '* Test case 2: running ./example 2 2000'
@echo '*'
@echo
$(LCOV) --zerocounters --directory .
./example 2 2000
$(LCOV) --capture --directory . --output-file trace_args.info --test-name test_2_to_2000 --no-external
test_overflow:
@echo
@echo '*'
@echo '* Test case 3: running ./example 0 100000 (causes an overflow)'
@echo '*'
@echo
$(LCOV) --zerocounters --directory .
./example 0 100000 || true
$(LCOV) --capture --directory . --output-file trace_overflow.info --test-name "test_overflow" --no-external
clean:
rm -rf *.o *.bb *.bbg *.da *.gcno *.gcda *.info output example \
descriptions
To get an example of how the LCOV generated HTML output looks like,
type 'make output' and point a web browser to the resulting file
output/index.html
test_noargs
Example program is called without arguments so that default range
[0..9] is used.
test_2_to_2000
Example program is called with "2" and "2000" as arguments.
test_overflow
Example program is called with "0" and "100000" as arguments. The
resulting sum is too large to be stored as an int variable.
/*
* example.c
*
* Calculate the sum of a given range of integer numbers. The range is
* specified by providing two integer numbers as command line argument.
* If no arguments are specified, assume the predefined range [0..9].
* Abort with an error message if the resulting number is too big to be
* stored as int variable.
*
* This program example is similar to the one found in the GCOV documentation.
* It is used to demonstrate the HTML output generated by LCOV.
*
* The program is split into 3 modules to better demonstrate the 'directory
* overview' function. There are also a lot of bloated comments inserted to
* artificially increase the source code size so that the 'source code
* overview' function makes at least a minimum of sense.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include "iterate.h"
#include "gauss.h"
static int start = 0;
static int end = 9;
int main (int argc, char* argv[])
{
int total1, total2;
/* Accept a pair of numbers as command line arguments. */
if (argc == 3)
{
start = atoi(argv[1]);
end = atoi(argv[2]);
}
/* Use both methods to calculate the result. */
total1 = iterate_get_sum (start, end);
total2 = gauss_get_sum (start, end);
/* Make sure both results are the same. */
if (total1 != total2)
{
printf ("Failure (%d != %d)!\n", total1, total2);
}
else
{
printf ("Success, sum[%d..%d] = %d\n", start, end, total1);
}
return 0;
}
#ifndef GAUSS_H
#define GAUSS_H GAUSS_h
extern int gauss_get_sum (int min, int max);
#endif /* GAUSS_H */
#ifndef ITERATE_H
#define ITERATE_H ITERATE_H
extern int iterate_get_sum (int min, int max);
#endif /* ITERATE_H */
/*
* methods/gauss.c
*
* Calculate the sum of a given range of integer numbers.
*
* Somewhat of a more subtle way of calculation - and it even has a story
* behind it:
*
* Supposedly during math classes in elementary school, the teacher of
* young mathematician Gauss gave the class an assignment to calculate the
* sum of all natural numbers between 1 and 100, hoping that this task would
* keep the kids occupied for some time. The story goes that Gauss had the
* result ready after only a few minutes. What he had written on his black
* board was something like this:
*
* 1 + 100 = 101
* 2 + 99 = 101
* 3 + 98 = 101
* .
* .
* 100 + 1 = 101
*
* s = (1/2) * 100 * 101 = 5050
*
* A more general form of this formula would be
*
* s = (1/2) * (max + min) * (max - min + 1)
*
* which is used in the piece of code below to implement the requested
* function in constant time, i.e. without dependencies on the size of the
* input parameters.
*
*/
#include "gauss.h"
int gauss_get_sum (int min, int max)
{
/* This algorithm doesn't work well with invalid range specifications
so we're intercepting them here. */
if (max < min)
{
return 0;
}
return (int) ((max + min) * (double) (max - min + 1) / 2);
}
/*
* methods/iterate.c
*
* Calculate the sum of a given range of integer numbers.
*
* This particular method of implementation works by way of brute force,
* i.e. it iterates over the entire range while adding the numbers to finally
* get the total sum. As a positive side effect, we're able to easily detect
* overflows, i.e. situations in which the sum would exceed the capacity
* of an integer variable.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include "iterate.h"
int iterate_get_sum (int min, int max)
{
int i, total;
total = 0;
/* This is where we loop over each number in the range, including
both the minimum and the maximum number. */
for (i = min; i <= max; i++)
{
/* We can detect an overflow by checking whether the new
sum would become negative. */
if (total + i < total)
{
printf ("Error: sum too large!\n");
exit (1);
}
/* Everything seems to fit into an int, so continue adding. */
total += i;
}
return total;
}
#
# /etc/lcovrc - system-wide defaults for LCOV
#
# To change settings for a single user, place a customized copy of this file
# at location ~/.lcovrc
#
# Specify an external style sheet file (same as --css-file option of genhtml)
#genhtml_css_file = gcov.css
# Specify coverage rate limits (in %) for classifying file entries
# HI: hi_limit <= rate <= 100 graph color: green
# MED: med_limit <= rate < hi_limit graph color: orange
# LO: 0 <= rate < med_limit graph color: red
genhtml_hi_limit = 90
genhtml_med_limit = 75
# Width of line coverage field in source code view
genhtml_line_field_width = 12
# Width of branch coverage field in source code view
genhtml_branch_field_width = 16
# Width of overview image (used by --frames option of genhtml)
genhtml_overview_width = 80
# Resolution of overview navigation: this number specifies the maximum
# difference in lines between the position a user selected from the overview
# and the position the source code window is scrolled to (used by --frames
# option of genhtml)
genhtml_nav_resolution = 4
# Clicking a line in the overview image should show the source code view at
# a position a bit further up so that the requested line is not the first
# line in the window. This number specifies that offset in lines (used by
# --frames option of genhtml)
genhtml_nav_offset = 10
# Do not remove unused test descriptions if non-zero (same as
# --keep-descriptions option of genhtml)
genhtml_keep_descriptions = 0
# Do not remove prefix from directory names if non-zero (same as --no-prefix
# option of genhtml)
genhtml_no_prefix = 0
# Do not create source code view if non-zero (same as --no-source option of
# genhtml)
genhtml_no_source = 0
# Replace tabs with number of spaces in source view (same as --num-spaces
# option of genhtml)
genhtml_num_spaces = 8
# Highlight lines with converted-only data if non-zero (same as --highlight
# option of genhtml)
genhtml_highlight = 0
# Include color legend in HTML output if non-zero (same as --legend option of
# genhtml)
genhtml_legend = 0
# Use FILE as HTML prolog for generated pages (same as --html-prolog option of
# genhtml)
#genhtml_html_prolog = FILE
# Use FILE as HTML epilog for generated pages (same as --html-epilog option of
# genhtml)
#genhtml_html_epilog = FILE
# Use custom filename extension for pages (same as --html-extension option of
# genhtml)
#genhtml_html_extension = html
# Compress all generated html files with gzip.
#genhtml_html_gzip = 1
# Include sorted overview pages (can be disabled by the --no-sort option of
# genhtml)
genhtml_sort = 1
# Include function coverage data display (can be disabled by the
# --no-func-coverage option of genhtml)
#genhtml_function_coverage = 1
# Include branch coverage data display (can be disabled by the
# --no-branch-coverage option of genhtml)
#genhtml_branch_coverage = 1
# Specify the character set of all generated HTML pages
genhtml_charset=UTF-8
# Allow HTML markup in test case description text if non-zero
genhtml_desc_html=0
# Specify the precision for coverage rates
#genhtml_precision=1
# Show missed counts instead of hit counts
#genhtml_missed=1
# Demangle C++ symbols
#genhtml_demangle_cpp=1
# Location of the gcov tool (same as --gcov-info option of geninfo)
#geninfo_gcov_tool = gcov
# Adjust test names to include operating system information if non-zero
#geninfo_adjust_testname = 0
# Calculate checksum for each source code line if non-zero (same as --checksum
# option of geninfo if non-zero, same as --no-checksum if zero)
#geninfo_checksum = 1
# Specify whether to capture coverage data for external source files (can
# be overridden by the --external and --no-external options of geninfo/lcov)
#geninfo_external = 1
# Enable libtool compatibility mode if non-zero (same as --compat-libtool option
# of geninfo if non-zero, same as --no-compat-libtool if zero)
#geninfo_compat_libtool = 0
# Use gcov's --all-blocks option if non-zero
#geninfo_gcov_all_blocks = 1
# Specify compatiblity modes (same as --compat option of geninfo).
#geninfo_compat = libtool=on, hammer=auto, split_crc=auto
# Adjust path to source files by removing or changing path components that
# match the specified pattern (Perl regular expression format)
#geninfo_adjust_src_path = /tmp/build => /usr/src
# Specify if geninfo should try to automatically determine the base-directory
# when collecting coverage data.
geninfo_auto_base = 1
# Directory containing gcov kernel files
# lcov_gcov_dir = /proc/gcov
# Location of the insmod tool
lcov_insmod_tool = /sbin/insmod
# Location of the modprobe tool
lcov_modprobe_tool = /sbin/modprobe
# Location of the rmmod tool
lcov_rmmod_tool = /sbin/rmmod
# Location for temporary directories
lcov_tmp_dir = /tmp
# Show full paths during list operation if non-zero (same as --list-full-path
# option of lcov)
lcov_list_full_path = 0
# Specify the maximum width for list output. This value is ignored when
# lcov_list_full_path is non-zero.
lcov_list_width = 80
# Specify the maximum percentage of file names which may be truncated when
# choosing a directory prefix in list output. This value is ignored when
# lcov_list_full_path is non-zero.
lcov_list_truncate_max = 20
# Specify if function coverage data should be collected and processed.
lcov_function_coverage = 1
# Specify if branch coverage data should be collected and processed.
lcov_branch_coverage = 0
.TH gendesc 1 "LCOV 1.14" 2019\-02\-28 "User Manuals"
.SH NAME
gendesc \- Generate a test case description file
.SH SYNOPSIS
.B gendesc
.RB [ \-h | \-\-help ]
.RB [ \-v | \-\-version ]
.RS 8
.br
.RB [ \-o | \-\-output\-filename
.IR filename ]
.br
.I inputfile
.SH DESCRIPTION
Convert plain text test case descriptions into a format as understood by
.BR genhtml .
.I inputfile
needs to observe the following format:
For each test case:
.IP " \-"
one line containing the test case name beginning at the start of the line
.RE
.IP " \-"
one or more lines containing the test case description indented with at
least one whitespace character (tab or space)
.RE
.B Example input file:
test01
.RS
An example test case description.
.br
Description continued
.RE
test42
.RS
Supposedly the answer to most of your questions
.RE
Note: valid test names can consist of letters, decimal digits and the
underscore character ('_').
.SH OPTIONS
.B \-h
.br
.B \-\-help
.RS
Print a short help text, then exit.
.RE
.B \-v
.br
.B \-\-version
.RS
Print version number, then exit.
.RE
.BI "\-o " filename
.br
.BI "\-\-output\-filename " filename
.RS
Write description data to
.IR filename .
By default, output is written to STDOUT.
.RE
.SH AUTHOR
Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
.SH SEE ALSO
.BR lcov (1),
.BR genhtml (1),
.BR geninfo (1),
.BR genpng (1),
.BR gcov (1)